seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35658567505 | #!/usr/bin/python3
import sys
import socket
from random import randint
if len(sys.argv) < 2:
print(sys.argv[0] + ": <start_ip>-<stop_ip>")
sys.exit(1)
def get_ips(start_ip, stop_ip):
ips = []
tmp = []
for i in start_ip.split('.'):
tmp.append("%02X" % int(i))
start_dec = int(''.join(tmp), 16)
tmp = []
for i in stop_ip.split('.'):
tmp.append("%02X" % int(i))
stop_dec = int(''.join(tmp), 16)
while(start_dec < stop_dec + 1):
bytes = []
bytes.append(str(int(start_dec / 16777216)))
rem = start_dec % 16777216
bytes.append(str(int(rem / 65536)))
rem = rem % 65536
bytes.append(str(int(rem / 256)))
rem = rem % 256
bytes.append(str(rem))
ips.append(".".join(bytes))
start_dec += 1
return ips
def dns_reverse_lookup(start_ip, stop_ip):
ips = get_ips(start_ip, stop_ip)
while len(ips) > 0:
i = randint(0, len(ips) - 1)
lookup_ip = str(ips[i])
resolved_name = None
try:
resolved_name = socket.gethostbyaddr(lookup_ip)[0]
except socket.herror as e:
# Ignore unknown hosts
pass
except socket.error as e:
print(str(e))
if resolved_name:
print(lookup_ip + ":\t" + resolved_name)
del ips[i]
start_ip, stop_ip = sys.argv[1].split('-')
dns_reverse_lookup(start_ip, stop_ip)
| balle/python-network-hacks | reverse-dns-scanner.py | reverse-dns-scanner.py | py | 1,470 | python | en | code | 135 | github-code | 13 |
15474913775 | str2 = '8 0 0 0 0 0 0 0 1000000000'
rectangles_list = list(map(int, str2.split()))
stek = []
hi_dict = {}
ans = 0
i = -1
for rectangle in rectangles_list:
if i == -1:
i += 1
continue
exit_flag = False
while not exit_flag:
if stek:
if rectangle < stek[-1][0]:
element = stek.pop()
len = i - element[2]
if element[0] * len > ans:
ans = element[0] * len
if (rectangle >= element[1]):
stek.append((rectangle, element[1], element[2]))
else: exit_flag = True
else: exit_flag = True
if stek:
if rectangle >= stek[-1][0] + 1:
stek.append((rectangle, stek[-1][0] + 1,i))
else:
stek.append((rectangle, 1,i))
i += 1
while stek:
element = stek.pop()
len = i - element[2]
if element[0] * len > ans:
ans = element[0] * len
print(ans) | ougordeev/Yandex | 3_A_14_gistogramm_rectangle.py | 3_A_14_gistogramm_rectangle.py | py | 955 | python | en | code | 0 | github-code | 13 |
2213068410 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_bcrypt import Bcrypt
from flask_login import (
UserMixin,
login_user,
LoginManager,
current_user,
logout_user,
login_required,
)
import os
# flask is instantiated as an app.
app = Flask(__name__)
# sqlalchemy instance for database related stuffs.
db = SQLAlchemy(app)
# Initialising Marshmallow.
ma = Marshmallow(app)
# Initialising Bcrypt to generate hash of password.
bcrypt = Bcrypt(app)
# Initialising LOGIN MANAGER.
login_manager = LoginManager(app)
login_manager.session_protection = "strong"
login_manager.login_view = "login"
login_manager.login_message_category = "info"
# SECRET KEY.
app.config["SECRET_KEY"] = "6798ff58efe3cd907bfa5233"
# Base Directory.
basedir = os.path.abspath(os.path.dirname(__file__))
# setting up sqlite database.
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(basedir,'employeeData.db')
| 1809mayur/EmployeeManagement | employee/__init__.py | __init__.py | py | 999 | python | en | code | 0 | github-code | 13 |
15496713434 | ####The Hamming distance between two integers is the number of positions at which the corresponding bits are different.
##Given two integers x and y, calculate the Hamming distance.
def hammingDistance( x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
return bin(x^y).count("1")
##binary tree merge
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def mergeTrees(t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if not t1 or not t2:
return t2 or t1
merged=TreeNode(t1.val+t2.val)
merged.left=self.mergeTrees(t1.left,t2.left)
merged.right = self.mergeTrees(t1.right, t2.right)
return merged
###find the single value [set() function can make a dictionary for unique values]
def singleNumber(nums):
"""
:type nums: List[int]
:rtype: int
"""
noduplicated_list=[]
for item in nums:
try:
noduplicated_list.remove(item)
except:
noduplicated_list.append(item)
return noduplicated_list.pop()
a_string=")(1,2,3,4(,5))"
def del_quo(a_string):
if a_string ==None:
return None
d_pair=[]
del_list=[]
for i in range(len(a_string)):
if a_string[i] =="(":
d_pair=d_pair+[i]
elif a_string[i] ==")":
if d_pair !=[]:
del_list=del_list+[d_pair.pop()]+[i]
b_list=''
for j in range(len(a_string)):
if j not in del_list:
b_list=b_list+a_string[j]
return str(b_list) | Jinchili/Leetcode | Hamming_distance.py | Hamming_distance.py | py | 1,649 | python | en | code | 0 | github-code | 13 |
70239048018 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Profile
# Register your models here.
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'photo', 'company', 'position', 'phone_number']
list_display_links = ['user']
list_editable = ['phone_number', 'photo']
search_fields = ['user__email', 'user__username', 'user__first_name',
'user__last_name', 'phone_number']
list_filter = ['user__is_active', 'user__is_staff']
fieldsets = (
('Profile', {
'fields': (
(
'user',
'photo',
),
),
}),
('Extra info', {
'fields': (
('company', 'position'),
'phone_number',
),
})
)
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'Profiles'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
list_display = (
'username',
'email',
'first_name',
'last_name',
'is_active',
'is_staff'
)
admin.site.unregister(User)
admin.site.register(User, UserAdmin) | dawipo-project/dawipo | account/admin.py | admin.py | py | 1,375 | python | en | code | 0 | github-code | 13 |
34527131880 | from senate_stock_trades import util as u
def count_diff(first_senator_name, second_senator_name):
# integers for incrementing number of times senator name has appeared
first_count_of_transaction = 0
second_count_of_transaction = 0
# Iterate through list elements
for i, value in enumerate(u.get_data()):
# if the first senator name is in data
if u.get_data()[i]['senator'] == first_senator_name:
first_count_of_transaction = first_count_of_transaction + 1 # increment
# if second senator name is in data
if u.get_data()[i]['senator'] == second_senator_name:
second_count_of_transaction = second_count_of_transaction + 1 # increment
#return the count difference of the first and second senator names that appeared
return(first_count_of_transaction - second_count_of_transaction)
def amount_diff(first_senator_name, second_senator_name):
#create a dictionary
range_of_senator = {}
# Iterate through list elements
for key, value in enumerate(u.get_data()):
# if first senator name is in data
if u.get_data()[key]['senator'] == first_senator_name:
# if senator is not in dictionary
if u.get_data()[key]['senator'] not in range_of_senator:
# do not increment
range_of_senator[u.get_data()[key]['senator']] = (u.get_data()[key]['amount_range'][0], u.get_data()[key]['amount_range'][1])
# else add amount ranges
else:
range_of_senator[u.get_data()[key]['senator']] = u.add_amount_ranges(range_of_senator[u.get_data()[key]['senator']], u.get_data()[key]['amount_range'])
# if second senator name is in data
if u.get_data()[key]['senator'] == second_senator_name:
# if senator name is not in dictionary
if u.get_data()[key]['senator'] not in range_of_senator:
# do not increment
range_of_senator[u.get_data()[key]['senator']] = (u.get_data()[key]['amount_range'][0], u.get_data()[key]['amount_range'][1])
# else add amount ranges
else:
range_of_senator[u.get_data()[key]['senator']] = u.add_amount_ranges(range_of_senator[u.get_data()[key]['senator']], u.get_data()[key]['amount_range'])
# return the range difference of first and second senator name
return(u.sub_amount_ranges(range_of_senator[first_senator_name], range_of_senator[second_senator_name])) | alyssaKsmith/Python-CourseWork | Python-Coursework/a5/senate_stock_trades/compare.py | compare.py | py | 2,579 | python | en | code | 0 | github-code | 13 |
34049966232 | import time
import colors
original_deck = ['\u2665 A', '\u2665 2', '\u2665 3', '\u2665 4', '\u2665 5', '\u2665 6', '\u2665 7', '\u2665 8', '\u2665 9', '\u2665 10', '\u2665 J', '\u2665 Q', '\u2665 K',
'\u2666 A', '\u2666 2', '\u2666 3', '\u2666 4', '\u2666 5', '\u2666 6', '\u2666 7', '\u2666 8', '\u2666 9', '\u2666 10', '\u2666 J', '\u2666 Q', '\u2666 K',
'\u2663 A', '\u2663 2', '\u2663 3', '\u2663 4', '\u2663 5', '\u2663 6', '\u2663 7', '\u2663 8', '\u2663 9', '\u2663 10', '\u2663 J', '\u2663 Q', '\u2663 K',
'\u2660 A', '\u2660 2', '\u2660 3', '\u2660 4', '\u2660 5', '\u2660 6', '\u2660 7', '\u2660 8', '\u2660 9', '\u2660 10', '\u2660 J', '\u2660 Q', '\u2660 K']
# KINGS: 12, 25, 38, 51
power_deck = ['\u2665 7', '\u2665 8', '\u2665 9', '\u2665 10', '\u2665 J', '\u2665 Q', '\u2665 K',
'\u2666 7', '\u2666 8', '\u2666 9', '\u2666 10', '\u2666 J', '\u2666 Q', '\u2666 K',
'\u2663 7', '\u2663 8', '\u2663 9', '\u2663 10', '\u2663 J', '\u2663 Q', '\u2663 K',
'\u2660 7', '\u2660 8', '\u2660 9', '\u2660 10', '\u2660 J', '\u2660 Q', '\u2660 K']
power_dictionary = {'A': 1, 'J': 11, 'Q': 12, 'K': 13}
class playa:
def __init__(self, name: str, cards: list):
self.name = name
self.cards = cards
self.cards_index = [self.cards.index(x)+1 for x in self.cards]
self.CHANGED = False
self.WHAT = None
self.WHOSE = None
self.WHICH = None
def swap(self, card, index: int):
original = self.cards[index]
self.cards[index] = card
return original
def look(self, index: int):
print(f"\n{colors.purple}-> {self.name.upper()}'s CARD {index+1} : {original_deck[int(self.cards[index])]}{colors.ENDC}", end='')
time.sleep(4)
for i in range(30):
print('\b', end='')
print(f"{colors.purple}That's all the time you get, kid.{colors.ENDC}")
def update_cards(self, cards: list):
self.cards = cards
new_indexes = []
for index, card in enumerate(cards):
if card != '_':
new_indexes.append(index+1)
self.cards_index = new_indexes
def results(self):
return ''.join(f'{original_deck[int(x)]}, ' for x in self.cards)[:-2]
def show_cards(self):
to_print = ''
if self.CHANGED:
if 'burn' in self.WHAT:
to_print = to_print.join(f'{str(x)} ' for x in self.cards_index)
to_print = f'{colors.yellow}{to_print}{colors.ENDC}'
elif 'Cameo' in self.WHAT:
to_print = to_print.join(f'{str(x)} ' for x in self.cards_index)
to_print = f'{colors.yellow}{colors.BOLD}{to_print}{colors.ENDC}'
else:
for index in self.cards_index:
if self.WHICH is not None and index == int(self.WHICH):
if "looked" in self.WHAT and "swapped" in self.WHAT:
to_print += f'{colors.UNDERLINE}{colors.red}{str(index)}{colors.ENDC} '
elif "swap" in self.WHAT:
to_print += f'{colors.red}{str(index)}{colors.ENDC} '
elif "looked" in self.WHAT:
to_print += f'{colors.UNDERLINE}{str(index)}{colors.ENDC} '
else:
to_print += f"{index} "
else:
to_print = to_print.join(f'{str(x)} ' for x in self.cards_index)
return to_print
| simplysudhanshu/Cameo | playa.py | playa.py | py | 3,561 | python | en | code | 0 | github-code | 13 |
41872454140 | import nltk
from nltk import sent_tokenize
from nltk import word_tokenize
# Link to tutorial:
# https://medium.com/towards-artificial-intelligence/natural-language-processing-nlp-with-python-tutorial-for-beginners-1f54e610a1a0#7ec0
print("Please type one sentence.")
sentence = input()
tokenized_words = word_tokenize(sentence)
tagged_words = nltk.pos_tag(tokenized_words)
print(tagged_words)
# How to extract a Noun Phrase from text
# Regex reminders
# ? - optional character
# * - 0 or more repetitions
grammar = "NP : {<DT>?<JJ>*<NN>}"
# Create a parser:
parser = nltk.RegexpParser(grammar)
output = parser.parse(tagged_words)
print(output)
output.draw() | strinh418/quiz-maker | question_generator.py | question_generator.py | py | 660 | python | en | code | 0 | github-code | 13 |
72301112338 | import pyMeow as pm
from configparser import ConfigParser
class Aimbot:
def __init__(self):
self.config = dict()
self.region = dict()
self.enemy_in_fov = bool()
self.paused = bool()
self.colors = {
"blue": pm.get_color("skyblue"),
"red": pm.get_color("red"),
"orange": pm.get_color("orange"),
}
def read_config(self):
c = ConfigParser()
c.read("config.ini")
try:
self.config = {
"fps": c.getint("Main", "fps"),
"draw_fps": c.getboolean("Main", "draw_fps"),
"color": pm.get_color(c.get("Main", "color")),
"similarity": c.getint("Main", "similarity"),
"fov": c.getint("Main", "fov"),
"pause_btn": c.getint("Main", "pause_btn"),
"autoaim": c.getboolean("Aimbot", "autoaim"),
"aimkey": c["Aimbot"]["aimkey"],
"mark_color": pm.get_color(c.get("Aimbot", "mark_color")),
"smooth": c.getint("Aimbot", "smooth"),
}
except Exception as e:
quit(f"config.ini missing or invalid ({e})")
def run(self):
pm.overlay_init(fps=self.config["fps"])
self.region = {
"x": pm.get_screen_width() // 2 - self.config["fov"] // 2,
"y": pm.get_screen_height() // 2 - self.config["fov"] // 2,
}
self.main_loop()
def main_loop(self):
while pm.overlay_loop():
pixel = self.scan_pixel()
self.enemy_in_fov = len(pixel) > 10
pm.begin_drawing()
if self.config["draw_fps"]:
pm.draw_fps(0, 0)
self.draw_fov()
self.check_pause()
if not self.paused:
if self.enemy_in_fov:
bounds = self.calc_bounds(pixel)
self.draw_bounds(bounds)
aim_point = self.calc_aim_point(bounds)
if self.config["autoaim"]:
self.aim(aim_point, self.config["smooth"])
elif pm.mouse_pressed(self.config["aimkey"]):
self.aim(aim_point, self.config["smooth"])
else:
pm.draw_text(
text="Pause",
posX=pm.get_screen_width() // 2 - pm.measure_text("Pause", 20) // 2,
posY=(pm.get_screen_height() // 2) - 10,
fontSize=20,
color=self.colors["orange"]
)
pm.end_drawing()
def draw_fov(self):
pm.draw_rectangle_rounded_lines(
posX=self.region["x"],
posY=self.region["y"],
width=self.config["fov"],
height=self.config["fov"],
roundness=0.1,
segments=5,
color=self.colors["red"] if self.enemy_in_fov else self.colors["blue"],
lineThick=1.2
)
def scan_pixel(self):
return list(pm.pixel_search_colors(
x=self.region["x"],
y=self.region["y"],
width=self.config["fov"],
height=self.config["fov"],
colors=[self.config["color"]],
similarity=self.config["similarity"]
))
def calc_bounds(self, pixel):
minX, minY = float("inf"), float("inf")
maxX, maxY = float("-inf"), float("-inf")
for p in pixel:
minX = min(minX, p["x"])
minY = min(minY, p["y"])
maxX = max(maxX, p["x"])
maxY = max(maxY, p["y"])
return {"x": minX, "y": minY, "width": maxX - minX, "height": maxY - minY}
def draw_bounds(self, bounds):
pm.draw_rectangle_lines(
posX=self.region["x"] + bounds["x"],
posY=self.region["y"] + bounds["y"],
width=bounds["width"],
height=bounds["height"],
color=self.config["mark_color"],
lineThick=1.2,
)
def calc_aim_point(self, bounds):
point = {
"x": self.region["x"] + bounds["x"] + bounds["width"] // 2,
"y": self.region["y"] + bounds["y"] + bounds["height"] // 2
}
pm.draw_circle(
centerX=point["x"],
centerY=point["y"],
radius=5,
color=self.config["mark_color"]
)
return point
def aim(self, point, smooth):
"""
This might require a mouse driver depending on the game
"""
pm.mouse_move(
x=(point["x"] - pm.get_screen_width() // 2) // smooth,
y=(point["y"] - pm.get_screen_height() // 2) // smooth,
relative=True
)
def check_pause(self):
if pm.key_pressed(self.config["pause_btn"]):
self.paused = not self.paused
if __name__ == "__main__":
aimbot = Aimbot()
aimbot.read_config()
aimbot.run() | qb-0/pyMeow-PixelBot | main.py | main.py | py | 5,099 | python | en | code | 2 | github-code | 13 |
23052595250 | from vector import Vector
a = Vector(1, 1, 1)
b = Vector(6, 6, 6)
k1 = a.addition(b)
k2 = a.subtraction(b)
k3 = a.length()
k4 = a.multiplication(b)
k5 = a.angle(b)
k1.get()
k2.get()
print(k3)
print(k4)
print(k5) | nvovk/python | OOP/4 - Vectors/index.py | index.py | py | 214 | python | en | code | 0 | github-code | 13 |
42508905456 | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
question_bank = [] # Initialize the list
for question in question_data: # loop through the question_data list
question_text = question["text"] # text is the key-value pair at text
question_answer = question["answer"] # answer is the key-value pair at answer
new_question = Question(question_text, question_answer) # Pass in the variables into question constructor
question_bank.append(new_question) # Append it to the question_bank list
quiz = QuizBrain(question_bank) # Takes the QuizBrain class and initalizes the question_bank list
while quiz.still_has_questions():
quiz.next_question()
print("You've completed the quiz.")
print(f"Your final score was {quiz.score}/{quiz.question_number}") | Chachenski/100-Days-of-Python | quiz-game-start/main.py | main.py | py | 816 | python | en | code | 0 | github-code | 13 |
15409296607 | programming_dictionary = {
"Bug": "An error in a program that prevents the program from running as expected.",
"Function": "A piece of code that you can easily call over and over again.",
}
# Retrieving items from dictionary.
print(programming_dictionary["Bug"])
# Adding new items to dictionary.
programming_dictionary["Loop"] = "The action of doing something over and over again."
# Create an empty dictionary.
empty_dictionary = {}
# Wipe an existing dictionary.
# programming_dictionary = {}
# Edit an item in a dictionary
programming_dictionary["Bug"] = "A moth in your computer."
# Loop through a dicitonary
for key in programming_dictionary:
print(key)
print(programming_dictionary[key])
# Exercise #1
print("\nExercise #1")
student_scores = {
"Harry": 81,
"Ron": 78,
"Hermione": 99,
"Draco": 74,
"Neville": 62,
}
# 🚨 Don't change the code above 👆
# TODO-1: Create an empty dictionary called student_grades.
student_grades = {}
# TODO-2: Write your code below to add the grades to student_grades.👇
for key in student_scores:
if student_scores[key] > 90:
student_grades[key] = "Outstanding"
elif student_scores[key] > 80:
student_grades[key] = "Exceeds Expectations"
elif student_scores[key] > 70:
student_grades[key] = "Acceptable"
else:
student_grades[key] = "Fail"
# 🚨 Don't change the code below 👇
print(student_grades)
# Nesting
capitals = {
"France": "Paris",
"Germany": "Berlin",
}
# Nesting a List in a Dictionary
travel_log = {
"France": ["Paris", "Lille", "Dijon"],
"Germany": ["Berlin", "Hamburg", "Stuttgart"],
}
# Nesting Dictionary in a Dictionary
enhanced_travel_log = {
"France": {
"cities_visited": ["Paris", "Lille", "Dijon"],
"total_visits": 12,
},
"Germany": {
"cities_visited": ["Berlin", "Hamburg", "Stuttgart"],
"total_visits": 4,
},
"Jamaica": {
"cities_visited": ["Montego Bay"],
"total_visits": 1,
},
}
# Nesting Dictionary in a List
another_travel_log = [
{
"country": "France",
"cities_visited": ["Paris", "Lille", "Dijon"],
"total_visits": 12,
},
{
"country": "Germany",
"cities_visited": ["Berlin", "Hamburg", "Stuttgart"],
"total_visits": 4,
},
{
"country": "Jamaica",
"cities_visited": ["Montego Bay"],
"total_visits": 1,
},
]
# Exercise 2
print("\nExercise #2")
new_travel_log = [
{
"country": "France",
"visits": 12,
"cities": ["Paris", "Lille", "Dijon"]
},
{
"country": "Germany",
"visits": 5,
"cities": ["Berlin", "Hamburg", "Stuttgart"]
},
]
#🚨 Do NOT change the code above
#TODO: Write the function that will allow new countries
#to be added to the travel_log. 👇
def add_new_country(country, visits, cities):
new_travel_log.append({
"country": country,
"visits": visits,
"cities": cities,
})
#🚨 Do not change the code below
add_new_country("Italy", 2, ["Rome", "Milan", "Florence"])
print(new_travel_log)
| TylerJEShelton/100_days_of_code_python | day_009/exercises.py | exercises.py | py | 3,102 | python | en | code | 0 | github-code | 13 |
37992809518 | import AthenaPoolCnvSvc.ReadAthenaPool
svcMgr.EventSelector.InputCollections= ["AOD.09897018._000001.pool.root.1"]
theApp.EvtMax=100
algseq = CfgMgr.AthSequencer("AthAlgSeq") #gets a handle to the main athsequencer, for adding things to later!
algseq += CfgMgr.ThinGeantTruthAlg("ThinGeantTruthAlg")
from OutputStreamAthenaPool.MultipleStreamManager import MSMgr
xaodStream = MSMgr.NewPoolRootStream( "StreamXAOD", "xAOD.out.root" ) #suppose this is what your stream definition looked like
from AthenaServices.Configurables import ThinningSvc
svcMgr += ThinningSvc("ThinningSvc",Streams = ["StreamXAOD"]) #connects your stream to the thinningSvc
xaodStream.AddItem( ["xAOD::TruthParticleContainer#*","xAOD::TruthParticleAuxContainer#*","xAOD::TruthVertexContainer#*","xAOD::TruthVertexAuxContainer#*"] )
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/AnalysisCommon/ThinningUtils/share/jobOptions.py | jobOptions.py | py | 805 | python | en | code | 1 | github-code | 13 |
38009207758 | from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
from AthenaMonitoring.AthenaMonitoringConf import AthenaMonManager
def JetMonGetAxisRange():
import math
from AthenaCommon.BeamFlags import jobproperties
axisranges = {}
# mlog.info ("Beam type and energy: %s , %s" %(jobproperties.Beam.beamType,jobproperties.Beam.energy))
axisranges["radBins"] = [ 0.0, 1.0, 0.025 ] # radial bins
axisranges["etaBins"] = [ -5.0, 5.0, 0.2 ] # eta bins
axisranges["phiBins"] = [ -math.pi, math.pi, 0.2 ] # phi bins
axisranges["massBins"] = [ 0.0*GeV, 30.0*GeV, 0.2* GeV ] # mass bins
axisranges["radBinsHD"] = [ 0.0, 1.0, 0.1 ]
axisranges["etaBinsHD"] = [ -5.0, 5.0, 0.1 ]
axisranges["phiBinsHD"] = [ -math.pi, math.pi, 0.1 ]
axisranges["massBinsHD"] = [ 0.0*GeV, 30.0*GeV, 0.2*GeV ]
axisranges["njBins"] = [ 0., 50.0, 1. ]
if jobproperties.Beam.beamType == 'cosmics' :
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 1500.0*GeV, 10.0*GeV ] # energy bins
axisranges["etBins"] = [ 0.0*GeV, 1500.0*GeV, 10.0*GeV ] # energy bins
axisranges["econBins"] = [ -2.0*GeV, 80.0*GeV, 0.5*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 1500.0*GeV, 20.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 1500.0*GeV, 20.0*GeV ]
elif jobproperties.Beam.beamType == 'collisions' :
if jobproperties.Beam.energy == 450.*GeV:
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 500.0*GeV, 5.0*GeV ] # energy bin
axisranges["etBins"] = [ 0.0*GeV, 200.0*GeV, 2.0*GeV ] # energy bin
axisranges["econBins"] = [ -2.0*GeV, 80.0*GeV, 0.4*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 500.0*GeV, 5.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 200.0*GeV, 2.0*GeV ]
elif jobproperties.Beam.energy == 3.5*TeV:
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 3500.0*GeV, 5.0*GeV ] # energy bin
axisranges["etBins"] = [ 0.0*GeV, 1000.0*GeV, 2.0*GeV ] # energy bin
axisranges["econBins"] = [ -2.0*GeV, 200.0*GeV, 0.6*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 1000.0*GeV, 5.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 500.0*GeV, 2.0*GeV ] # energy bin
else:
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 2000.0*GeV, 5.0*GeV ] # energy bin ! tuned for the p+Pb run (it was 5000.0*GeV)
axisranges["etBins"] = [ 0.0*GeV, 500.0*GeV, 2.0*GeV ] # energy bin ! tuned for the p+Pb run (it was 1000.0*GeV)
axisranges["econBins"] = [ -2.0*GeV, 120.0*GeV, 0.6*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 1000.0*GeV, 5.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 500.0*GeV, 2.0*GeV ] # energy bin
else:
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 1000.0*GeV, 5.0*GeV ] # energy bins
axisranges["etBins"] = [ 0.0*GeV, 200.0*GeV, 2.0*GeV ] # energy bins
axisranges["econBins"] = [ -2.0*GeV, 200.0*GeV, 0.5*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 1000.0*GeV, 5.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 200.0*GeV, 2.0*GeV ]
from AthenaMonitoring.DQMonFlags import DQMonFlags
if DQMonFlags.monManDataType == 'heavyioncollisions':
axisranges["njBins"] = [ 0., 100.0, 1. ]
# -- 1-dim binning
axisranges["eBins"] = [ 0.0*GeV, 5000.0*GeV, 5.0*GeV ] # energy bin
axisranges["etBins"] = [ 0.0*GeV, 1000.0*GeV, 2.0*GeV ] # energy bin
axisranges["econBins"] = [ -2.0*GeV, 120.0*GeV, 0.6*GeV ] # energy bin
# -- 2-dim binning
axisranges["eBinsHD"] = [ 0.0*GeV, 1000.0*GeV, 5.0*GeV ]
axisranges["etBinsHD"] = [ 0.0*GeV, 500.0*GeV, 2.0*GeV ] # energy bin
return axisranges
| rushioda/PIXELVALID_athena | athena/Reconstruction/Jet/JetMonitoring/share/JetMonitoring_histoaxis.py | JetMonitoring_histoaxis.py | py | 4,164 | python | en | code | 1 | github-code | 13 |
23275254809 | import numpy as np
import cv2
import socket, threading, json
from cv2_utils import find_vidcapture
COMMAND_PORT = 9396
DEBUG = False
INSTRUCTIONS = \
"""
Type Q to quit
Type R to report the coordinates
Double click on image to set target
"""
def process_image(image):
"""
:param image:
:return: mask_red, mask_red_morphed, centroid_x, centroid_y
"""
mask_red = (image_hsv[:, :, 1] > 120) & (image_hsv[:, :, 0] > 150)
# Create a structuring element for OpenCV morphological operations
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# perform dilations and erosions on the mask obtain a nice object without gaps, and without background noise
mask_red = mask_red.astype(np.uint8) # convert type
mask_red_morphed = cv2.dilate(mask_red, element)
mask_red_morphed = cv2.erode(mask_red_morphed, element, iterations=2)
# mask2 = cv2.erode(mask2, element)
mask_red_morphed = cv2.dilate(mask_red_morphed, element)
mask_red_morphed = cv2.erode(mask_red_morphed, element, iterations=2)
# mask2 = cv2.erode(mask2, element)
mask_red_morphed = cv2.dilate(mask_red_morphed, element)
# find centroids
retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask_red_morphed)
# print(retval, stats, centroids)
if retval <= 1:
# No centroid found
cx, cy = -1, -1
else:
# find largest one
max_i = np.argmax(stats[1:, 4]) + 1
if DEBUG:
max_area = stats[max_i, 4]
print("The largest component is", max_i, "of area", max_area, "and has centroid", centroids[max_i])
cx, cy = centroids[max_i]
return mask_red, mask_red_morphed, cx, cy
def add_markers_to_image(image_bgr, head=(-1, -1), target=(-1, -1)):
"""
Add markers for head and target to a copy of the image. Original image is not changed
:param image_bgr: 3D np array
:param head: (x,y) of head
:param target: (x, y) of target
:return: 3D np array with markers
"""
image_markers = np.copy(image_bgr)
height, width, _ = image_markers.shape
head_color = (255, 255, 255)
cx = head[0]; cy = head[1]
hl = 25 # half lenght of cross
ht = 3 # half thickness of cross
image_markers[max(int(cy-hl), 0):min(int(cy+hl), height-1), max(int(cx-ht), 0):min(int(cx+ht), width-1), :] = head_color
image_markers[max(int(cy-ht), 0):min(int(cy+ht), height-1), max(int(cx-hl), 0):min(int(cx+hl), width-1), :] = head_color
target_color = (255, 0, 0)
cx = target[0]; cy = target[1]
hl = 20
ht = 2
image_markers[max(int(cy-hl), 0):min(int(cy+hl), height-1), max(int(cx-ht), 0):min(int(cx+ht), width-1), :] = target_color
image_markers[max(int(cy-ht), 0):min(int(cy+ht), height-1), max(int(cx-hl), 0):min(int(cx+hl), width-1), :] = target_color
return image_markers
def mouse_callback(event, x, y, flags, param):
# mouse callback function
if event == cv2.EVENT_LBUTTONDBLCLK:
print(f"Double clicked on ({x},{y})")
global target_x, target_y
target_x = x
target_y = y
# Thread to read incoming cmds over a socket
class ClientThread(threading.Thread):
def __init__(self, socket):
super().__init__()
self.socket = socket
def run(self):
DEBUG = True
global target_x, target_y
while True:
# Read line from socket
line = ''
while True:
c = self.socket.recv(1, socket.MSG_WAITALL).decode("ascii")
if c == '\n':
break
else:
line += c
# Parse JSON
if DEBUG:
print("SERVER GOT:", line)
data = json.loads(line)
if 'start' in data:
data = data['start']
if len(data) == 5:
funcname, p1, p2, p3, p4 = data
if funcname == 'get_current':
if DEBUG:
print("Processing 'get_current'")
print(f"x = {head_x}, y = {head_y}")
outdata = dict(x=head_x, y=head_y)
outdata_json_str = json.dumps(outdata) + '\n'
outdata_bytes = bytes(outdata_json_str, "utf8")
print(f"outdata_bytes = {outdata_bytes}")
self.socket.sendall(outdata_bytes)
if funcname == 'get_target':
if DEBUG:
print("Processing 'get_target'")
print(f"current target is: x = {target_x}, y = {target_y}")
outdata = dict(x=target_x, y=target_y)
outdata_json_str = json.dumps(outdata) + '\n'
outdata_bytes = bytes(outdata_json_str, "utf8")
print(f"outdata_bytes = {outdata_bytes}")
self.socket.sendall(outdata_bytes)
if funcname == 'set_target':
if DEBUG:
print(f"Processing 'set_target {p1}, {p2}'")
target_x = p1
target_y = p2
# ServerThread listens to incoming connections and then starts ClientThread to process commands
class ServerThread(threading.Thread):
def __init__(self):
super().__init__()
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(('0.0.0.0', COMMAND_PORT))
self.s.listen(10)
print(f"Listening for incoming connection on port {COMMAND_PORT}")
self.threads = []
def run(self):
while True:
# Accept a client
conn, addr = self.s.accept()
print(f"Incoming connection from {addr}; starting command-processing thread")
# Create and start the command processing thread
t = ClientThread(conn)
t.start()
self.threads.append(t)
# globals
target_x = -1
target_y = -1
head_x = -1
head_y = -1
if __name__ == "__main__":
print("Starting...")
# Find the right camera
#cv2.VideoCapture(0, cv2.CAP_DSHOW)
# find cam w specific resolution (only way to identify cam in set of multiple cams)
videocap, _ = find_vidcapture(1280, 960)
if videocap:
print("Found camera")
else:
print("Error! Camera not found... exiting")
# Set actual resolution to use
videocap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
videocap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# Start listening for control connections
server = ServerThread()
server.start()
print(f"Instructions:{INSTRUCTIONS}")
print("Processing images...")
iteration_counter = 0
cv2.namedWindow('Camera Image')
cv2.setMouseCallback('Camera Image', mouse_callback)
while True:
_, frame = videocap.read()
if DEBUG:
cv2.imshow('Original Camera Image', frame)
# wait 1 ms, if arg is 0, we wait forever :-(
keycode = cv2.waitKey(1)
if keycode & 0xFF == ord('q'):
break
if keycode & 0xFF == ord('r') or iteration_counter % 100 == 0:
print(f"Iteration {iteration_counter}: center = ({head_x}, {head_y}), target = ({target_x}, {target_y})")
image_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
image_red, image_red_morphed, head_x, head_y = process_image(image_hsv)
image_red = image_red * 128
image_red_morphed = image_red_morphed * 128
if DEBUG:
cv2.imshow('Red', image_red)
cv2.imshow('Red Morphed', image_red_morphed)
frame_markers = add_markers_to_image(frame, head=(head_x, head_y), target=(target_x, target_y))
# show camera frame with markers
cv2.imshow('Camera Image', frame_markers)
iteration_counter += 1
# closing all open windows
cv2.destroyAllWindows()
print("done")
| yoeriapts/ehb_robotics_delivery | cam_ctrlr.py | cam_ctrlr.py | py | 8,319 | python | en | code | 0 | github-code | 13 |
72952739218 | from transformers import AutoProcessor, AutoModel
import scipy
import numpy as np
max_lenght = 128
processor = AutoProcessor.from_pretrained(
"suno/bark",
cache_dir="checkpoints/bark/processor",
)
model = AutoModel.from_pretrained(
"suno/bark",
cache_dir="checkpoints/bark/model",
# torch_dtype=torch.float16,
)
model.to("cuda")
model = model.to_bettertransformer()
# model.enable_cpu_offload()
sampling_rate = model.generation_config.sample_rate
wave_collector = []
for sentence in [
"This paper presents fairseq S^2, a fairseq extension for speech synthesis.",
"We implement a number of autoregressive (AR) and non-AR text-to-speech models, and their multi-speaker variants.",
"To enable training speech synthesis models with less curated data, a number of preprocessing tools are built and their importance is shown empirically.",
"To facilitate faster iteration of development and analysis, a suite of automatic metrics is included.",
"Apart from the features added specifically for this extension, fairseq S^2 also benefits from the scalability offered by fairseq and can be easily integrated with other state-of-the-art systems provided in this framework.",
"The code, documentation, and pre-trained models are available at this https URL.",
]:
inputs = processor(
text=sentence,
voice_preset="en_speaker_1",
return_tensors="pt",
# max_length=max_lenght,
)
for key, value in inputs.items():
inputs[key] = value.to("cuda")
speech_values = model.generate(**inputs,)
wave_collector.append(
speech_values.cpu().numpy().flatten()
)
scipy.io.wavfile.write(
"bark_out.wav",
rate=sampling_rate,
data=np.concatenate(wave_collector),
)
| andompesta/pytorch-text-to-speech | bark.py | bark.py | py | 1,766 | python | en | code | 0 | github-code | 13 |
7296081656 | # -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from django_plotly_dash import DjangoDash
import pandas as pd
from dash.dependencies import Input, Output
from django.conf import settings
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = DjangoDash('DashPlot1', external_stylesheets=external_stylesheets)
#app = dash.Dash(__name__)
file_name = settings.MEDIA_ROOT + "/thermistorK.txt"
data = pd.read_csv(file_name,
sep = " ", header=None, names=["time","thermistor", "value"])
#print(data.info())
available_indicators = data['thermistor'].unique()
app.layout = html.Div(children=[
#html.H1(children='IFE Dashboard'),
#html.Div(children='''
# Created using Dash: A web application framework for Python.
#'''),
dcc.Graph(id='example-graph'),
html.Div([
dcc.Dropdown(
id='thermistor-num',
options=[{'label': i, 'value': i} for i in available_indicators],
value="thermistor number",
multi=True,
placeholder="Select a thermistor number"
)
]),
])
@app.callback(
Output('example-graph', 'figure'),
[Input('thermistor-num', 'value')])
def update_figure(thermistor_value):
traces = []
for i in thermistor_value:
traces.append(dict(
x=data[data['thermistor'] == i]['time'],
y=data[data['thermistor'] == i]['value'],
text=data[data['thermistor'] == i]['thermistor'],
mode = 'markers',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 0.5, 'color': 'white'}
},
name=i
))
return {
'data': traces,
'layout': dict(
xaxis={'title':'Time(s)'},
yaxis={'title':'ADC Bits'},
)
}
if __name__ == '__main__':
app.run_server(debug=True) | JoeLopez333/django_tut2 | blog/dash_app.py | dash_app.py | py | 2,017 | python | en | code | 0 | github-code | 13 |
73282859859 | import os
import re
import logging
import pathlib
from multiprocessing.dummy import Pool
from PyQt5 import QtCore, QtGui
from lector import sorter
from lector import database
# The following have to be separate
try:
from lector.parsers.pdf import render_pdf_page
except ImportError:
pass
try:
from lector.parsers.djvu import render_djvu_page
except ImportError:
pass
logger = logging.getLogger(__name__)
class BackGroundTabUpdate(QtCore.QThread):
def __init__(self, database_path, all_metadata, parent=None):
super(BackGroundTabUpdate, self).__init__(parent)
self.database_path = database_path
self.all_metadata = all_metadata
def run(self):
for i in self.all_metadata:
book_hash = i['hash']
database_dict = {
'Position': i['position'],
'LastAccessed': i['last_accessed'],
'Bookmarks': i['bookmarks'],
'Annotations': i['annotations']}
database.DatabaseFunctions(self.database_path).modify_metadata(
database_dict, book_hash)
class BackGroundBookAddition(QtCore.QThread):
def __init__(self, file_list, database_path, addition_mode, main_window, parent=None):
super(BackGroundBookAddition, self).__init__(parent)
self.file_list = file_list
self.database_path = database_path
self.addition_mode = addition_mode
self.main_window = main_window
self.errors = []
self.prune_required = True
if self.addition_mode == 'manual':
self.prune_required = False
def run(self):
books = sorter.BookSorter(
self.file_list,
('addition', self.addition_mode),
self.database_path,
self.main_window.settings,
self.main_window.temp_dir.path())
parsed_books, self.errors = books.initiate_threads()
self.main_window.lib_ref.generate_model('addition', parsed_books, False)
database.DatabaseFunctions(self.database_path).add_to_database(parsed_books)
if self.prune_required:
self.main_window.lib_ref.prune_models(self.file_list)
class BackGroundBookDeletion(QtCore.QThread):
def __init__(self, hash_list, database_path, parent=None):
super(BackGroundBookDeletion, self).__init__(parent)
self.hash_list = hash_list
self.database_path = database_path
def run(self):
database.DatabaseFunctions(
self.database_path).delete_from_database('Hash', self.hash_list)
class BackGroundBookSearch(QtCore.QThread):
def __init__(self, data_list, parent=None):
super(BackGroundBookSearch, self).__init__(parent)
self.valid_files = []
# Filter for checked directories
self.valid_directories = [
[i[0], i[1], i[2]] for i in data_list if i[
3] == QtCore.Qt.Checked and os.path.exists(i[0])]
self.unwanted_directories = [
pathlib.Path(i[0]) for i in data_list if i[3] == QtCore.Qt.Unchecked]
def run(self):
def is_wanted(directory):
directory_parents = pathlib.Path(directory).parents
for i in self.unwanted_directories:
if i in directory_parents:
return False
return True
def traverse_directory(incoming_data):
root_directory = incoming_data[0]
for directory, subdirs, files in os.walk(root_directory, topdown=True):
# Black magic fuckery
# Skip subdir tree in case it's not wanted
subdirs[:] = [d for d in subdirs if is_wanted(os.path.join(directory, d))]
for filename in files:
if os.path.splitext(filename)[1][1:] in sorter.available_parsers:
self.valid_files.append(os.path.join(directory, filename))
def initiate_threads():
_pool = Pool(5)
_pool.map(traverse_directory, self.valid_directories)
_pool.close()
_pool.join()
if self.valid_directories:
initiate_threads()
if self.valid_files:
info_string = str(len(self.valid_files)) + ' books found'
logger.info(info_string)
else:
logger.error('No books found on scan')
else:
logger.error('No valid directories')
class BackGroundCacheRefill(QtCore.QThread):
def __init__(self, image_cache, remove_value, filetype, book, all_pages, parent=None):
super(BackGroundCacheRefill, self).__init__(parent)
# TODO
# Return with only the first image in case of a cache miss
# Rebuilding the entire n image cache takes considerably longer
self.image_cache = image_cache
self.remove_value = remove_value
self.filetype = filetype
self.book = book
self.all_pages = all_pages
def run(self):
def load_page(current_page):
pixmap = QtGui.QPixmap()
if self.filetype in ('cbz', 'cbr'):
page_data = self.book.read(current_page)
pixmap.loadFromData(page_data)
elif self.filetype == 'pdf':
page_data = self.book.loadPage(current_page)
pixmap = render_pdf_page(page_data)
elif self.filetype == 'djvu':
page_data = self.book.pages[current_page]
pixmap = render_djvu_page(page_data)
return pixmap
remove_index = self.image_cache.index(self.remove_value)
if remove_index == 1:
first_path = self.image_cache[0][0]
self.image_cache.pop(3)
previous_page = self.all_pages[self.all_pages.index(first_path) - 1]
refill_pixmap = load_page(previous_page)
self.image_cache.insert(0, (previous_page, refill_pixmap))
else:
self.image_cache[0] = self.image_cache[1]
self.image_cache.pop(1)
try:
last_page = self.image_cache[2][0]
next_page = self.all_pages[self.all_pages.index(last_page) + 1]
refill_pixmap = load_page(next_page)
self.image_cache.append((next_page, refill_pixmap))
except (IndexError, TypeError):
self.image_cache.append(None)
class BackGroundTextSearch(QtCore.QThread):
def __init__(self):
super(BackGroundTextSearch, self).__init__(None)
self.search_content = None
self.search_text = None
self.case_sensitive = False
self.match_words = False
self.search_results = []
def set_search_options(
self, search_content, search_text,
case_sensitive, match_words):
self.search_content = search_content
self.search_text = search_text
self.case_sensitive = case_sensitive
self.match_words = match_words
def run(self):
if not self.search_text or len(self.search_text) < 3:
return
def get_surrounding_text(textCursor, words_before):
textCursor.movePosition(
QtGui.QTextCursor.WordLeft,
QtGui.QTextCursor.MoveAnchor,
words_before)
textCursor.movePosition(
QtGui.QTextCursor.NextWord,
QtGui.QTextCursor.KeepAnchor,
words_before * 2)
cursor_selection = textCursor.selection().toPlainText()
return cursor_selection.replace('\n', '')
self.search_results = {}
# Create a new QTextDocument of each chapter and iterate
# through it looking for hits
for i in self.search_content:
chapter_title = i[0]
chapterDocument = QtGui.QTextDocument()
chapterDocument.setHtml(i[1])
chapter_number = i[2]
findFlags = QtGui.QTextDocument.FindFlags(0)
if self.match_words:
findFlags = findFlags | QtGui.QTextDocument.FindWholeWords
if self.case_sensitive:
findFlags = findFlags | QtGui.QTextDocument.FindCaseSensitively
findResultCursor = chapterDocument.find(self.search_text, 0, findFlags)
while not findResultCursor.isNull():
result_position = findResultCursor.position()
words_before = 3
while True:
surroundingTextCursor = QtGui.QTextCursor(chapterDocument)
surroundingTextCursor.setPosition(
result_position, QtGui.QTextCursor.MoveAnchor)
surrounding_text = get_surrounding_text(
surroundingTextCursor, words_before)
words_before += 1
if surrounding_text[:2] not in ('. ', ', '):
break
# Case insensitive replace for find results
replace_pattern = re.compile(re.escape(self.search_text), re.IGNORECASE)
surrounding_text = replace_pattern.sub(
f'<b>{self.search_text}</b>', surrounding_text)
result_tuple = (
result_position, surrounding_text, self.search_text, chapter_number)
try:
self.search_results[chapter_title].append(result_tuple)
except KeyError:
self.search_results[chapter_title] = [result_tuple]
new_position = result_position + len(self.search_text)
findResultCursor = chapterDocument.find(
self.search_text, new_position, findFlags)
| BasioMeusPuga/Lector | lector/threaded.py | threaded.py | py | 9,711 | python | en | code | 1,479 | github-code | 13 |
45466278006 | import pygame as pg
from parameters import PARAMS
class Widget(pg.sprite.Sprite):
''' General class handling widgets. '''
def __init__(self, x, y, width, height, color, parent=None, name=""):
super().__init__()
self.parent = super()
self.image = pg.Surface([width, height])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.rect.width = width
self.rect.height = height
self.name = name
self.color = color
def belongs(self, pos):
''' Check if pos is on the widget. '''
x_, y_ = pos
xcond = self.rect.x <= x_ <= self.rect.x + self.rect.width
ycond = self.rect.y <= y_ <= self.rect.y + self.rect.height
return xcond and ycond
class ToolBar(Widget):
''' General widget for creating the tool bar. '''
def __init__(self, x, y, width, height, color, parent=None, name=""):
super().__init__(x, y, width, height, color)
self.parent = super()
self.name = name
self.image.fill(color)
class Label(Widget):
''' Label widget. '''
def __init__(self, x, y, text, font='Verdana', fontsize=14, color=PARAMS["color"]["w"], parent=None, name=""):
if parent is not None:
x += parent.rect.x + parent.rect.width//2
y += parent.rect.y + parent.rect.height//2
print(parent.name, x, y)
super().__init__(x, y, len(text), fontsize, color, name=name)
self.font = pg.font.SysFont(font, fontsize)
self.image = self.font.render(text, 1, color)
class Button(Widget):
''' Button widget - contains a label. '''
def __init__(self, x, y, width, height, color,
text="", font='Verdana', fontsize=14, text_color=PARAMS["color"]["k"],
parent=None, name=""):
if parent is not None:
x += parent.rect.x + parent.rect.width//2
y += parent.rect.y + parent.rect.height//2
super().__init__(x, y, width, height, color, parent=self, name=name)
x_text = x + width//2 - pg.font.SysFont(font, fontsize).size(text)[0]//2
self.text = Label(x_text, y, text, font=font, fontsize=fontsize, color=text_color)
class ColorPalette(Button):
def __init__(self, x, y, cell_size=20, margin=5, parent=None, name=""):
if parent is not None:
x += parent.rect.x + parent.rect.width//2
y += parent.rect.y + parent.rect.height//2
# for now, hard code the palette in this class.
# in the future, possibility to add colors as args and
# deduce the palette shape and behavior.
self.n_cols = 4
self.n_rows = 1
self.cell_size = cell_size
self.margin = margin
self.colors = {c:PARAMS["color"][c] for c in ["k","w","r","g"]}# hard-coded
self.color_surfaces = []
for i, (name_, c) in enumerate(self.colors.items()):
xc = x + i*(cell_size+margin) + margin
# for y, I consider i=0 systematically for one row (for now).
yc = y + 0*(cell_size+margin) + margin
surf = Widget(xc, yc, cell_size, cell_size, c, name=name+"c="+name_)
self.color_surfaces.append(surf)
# build layout
width = self.n_cols * cell_size + (self.n_cols+1)*margin
height = self.n_rows * cell_size + (self.n_rows+1)*margin
super().__init__(x, y, width, height, (90,0,150), name=name)
class Block(Widget):
''' Slider block '''
def __init__(self, x, y, width, height, color, parent=None, name=""):
if parent is not None:
x += parent.rect.x + parent.rect.width//2
y += parent.rect.y + parent.rect.height//2
super().__init__(x, y, width, height, color)
class Slider(Widget):
''' Slider widget.
Includes a slider block.
'''
def __init__(self, x, y, width, height, color, block_color, init_value, min_value, max_value, parent=None, name=""):
if parent is not None:
x += parent.rect.x + parent.rect.width//2
y += parent.rect.y + parent.rect.height//2
super().__init__(x, y, width, height, color)
self.parent = super()
self.image.fill(color)
self.block_size = height * 4 # TODO: careful with the '4'. changing it changes the centering
self.slider_block = Block(-self.block_size//4, -self.block_size//4-height,
self.block_size, self.block_size,
block_color, parent=self, name=name+"_block")
self.block_color = block_color
self.slider_block.image.fill(block_color) # change color to parameter later
# set block pos according to init_value
self.slider_block.rect.x = init_value / (max_value-min_value) * width + x
self.width = width
self.height = height
self.x = x
self.y = y
self.rect.x = x
self.rect.y = y
self.min_value = min_value
self.max_value = max_value
self.value = min_value
self.name = name
self.label = Label(-self.width, -PARAMS["slider"]["thickness"] - 4, name, color=PARAMS["color"]["w"],
parent=self, name=name)
def update_block_pos(self, pos):
''' Sets the position of the slider block along the slider.
Sets the corresponding value accordingly.
The current implementation is only for horizontal sliders.
'''
x, y = pos
if self.rect.x <= x <= self.rect.x + self.rect.width:
self.slider_block = Widget(x-self.block_size//4, self.y-self.block_size//4-self.height//2,
self.block_size, self.block_size,
(145,140,200), self.parent)
self.block_color = PARAMS["slider"]["block_color"]
self.slider_block.image.fill(self.block_color)
self.value = x - self.x
self.value = self.value / self.width * (self.max_value - self.min_value) + self.min_value
self.value = int(self.value)
return self.slider_block, self.value
def belongs(self, pos):
''' Checks if pos is in the slider.
Essentially useful only for the block.
Returns 2 when on the block.
'''
x_, y_ = pos
xcond = self.slider_block.rect.x <= x_ <= self.slider_block.rect.x + self.slider_block.rect.width
ycond = self.slider_block.rect.y <= y_ <= self.slider_block.rect.y + self.slider_block.rect.height
if xcond and ycond:
return 2
elif self.parent.belongs(pos):
return 1
else:
return 0
| MaGnaFlo/BlackBoard | widgets.py | widgets.py | py | 5,851 | python | en | code | 0 | github-code | 13 |
349888525 | name = str(input("please enter your name: "))
score = int(input("Please enter your score: "))
if score > 69 and score <101:
print ( "You got an 'A', keep it up")
else:
if score > 59 and score < 70:
print ( "You got a 'B', keep it up")
else:
if score > 49 and score < 60:
print ( "You got a 'C', you can be better")
else:
if score >44 and score < 50:
print ( "You got a 'D', Try to improve your rating")
else:
if score > 39 and score <45:
print ( "You got and 'E', your rating is poor, you definitely have to work on it ")
else:
if score > 100:
print ( "The score you inputted is invalid, scorePlease input the right score")
else:
print ( "you failed, go work n yourself and try again another time")
| ReroSantos/Code-Lagos | Grade system test.py | Grade system test.py | py | 965 | python | en | code | 0 | github-code | 13 |
9098015916 | import pandas as pd
import datetime
import smtplib
import os
from pandas.tseries.offsets import BDay
current_path = os.getcwd()
print(current_path)
os.chdir(current_path)
GMAIL_ID = input("Enter your Gmail ID: ")
GMAIL_PSWD = input("Enter you Gmail Password: ")
def sendEmail(to, sub, msg):
print(f"Email to {to} sent: \nSubject: {sub}, \nMessage: {msg}")
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(GMAIL_ID, GMAIL_PSWD)
s.sendmail(GMAIL_ID, to, f"Subject: {sub} \n\n {msg}")
s.quit()
if __name__ == "__main__":
df = pd.read_excel("friends_data.xlsx")
today = datetime.datetime.now().strftime("%d-%m")
yearNow = datetime.datetime.now().strftime("%Y")
writeInd = []
for index, item in df.iterrows():
bday = item['Birthday']
bday = datetime.datetime.strptime(bday, "%d-%m-%Y")
bday = bday.strftime("%d-%m")
if (today == bday) and yearNow not in str(item['LastWishedYear']):
sendEmail(item['Email'], "Happy Birthday", item['Dialogue'])
writeInd.append(index)
if writeInd != None:
for i in writeInd:
oldYear = df.loc[i, 'LastWishedYear']
df.loc[i, 'LastWishedYear'] = str(oldYear) +", " + str(yearNow)
df.to_excel('friends_data.xlsx', index = False)
| MuhammedMusharaf007/Automatic_Birthday_wisher_MM007 | wisher.py | wisher.py | py | 1,316 | python | en | code | 2 | github-code | 13 |
41024001051 | import os
import torch
import numpy as np
import pandas as pd
from typing import List
from torch_geometric.data import Batch, Data
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from polymerlearn.utils import GraphDataset
def get_vector(
data: pd.DataFrame,
prop: str = 'Mw (PS)',
fill_value: float = None,
use_log: bool = True):
'''
Get vector to be added as sample-wide feature in model
Args:
data (pd.DataFrame): Base dataframe from which to extract the data.
prop (str, optional): Name of column (property) for which to get the
vector. (:default: :obj:`Mw (PS)`)
fill_value (float, optional): Value with which to fill missing values
in the column. If `None`, will fill missing values with median from
the column. (:default: :obj:`None`)
use_log (bool): Log transforms the values if true
standard_scale (bool):
:rtype: pd.Series
'''
if fill_value is None:
to_fill = sorted(data[prop].loc[data[prop].notna()])[int(sum(data[prop].notna())/2)]
else:
to_fill = fill_value
if prop != '%TMP' and use_log:
vec = np.log(data[prop].fillna(to_fill))
else:
# Don't log transform TMP
vec = data[prop].fillna(to_fill)
return vec
def get_IV_add(data):
'''
Return the standard IV additional data (i.e. resin properties) used
in the paper.
Args:
data (pd.Dataframe)
No arguments
'''
mw_vector = get_vector(data, prop = 'Mw (PS)').to_numpy()
an_vector = get_vector(data, prop = 'AN').to_numpy()
ohn_vector = get_vector(data, prop = 'OHN').to_numpy()
tmp_vector = get_vector(data, prop = '%TMP', fill_value=0).to_numpy()
add = np.stack([mw_vector, an_vector, ohn_vector, tmp_vector]).T
return add
def get_IV_add_nolog(data):
mw_vector = get_vector(data, prop = 'Mw (PS)', use_log = False).to_numpy()
an_vector = get_vector(data, prop = 'AN', use_log = False).to_numpy()
ohn_vector = get_vector(data, prop = 'OHN', use_log = False).to_numpy()
tmp_vector = get_vector(data, prop = '%TMP', fill_value=0).to_numpy()
add = np.stack([mw_vector, an_vector, ohn_vector, tmp_vector]).T
return add
def get_Tg_add(data):
mw_vector = get_vector(data, prop = 'Mw (PS)', use_log = True).to_numpy()
add = np.stack([mw_vector]).T
return add
def get_Tg_add_nolog(data):
mw_vector = get_vector(data, prop = 'Mw (PS)', use_log = False).to_numpy()
add = np.stack([mw_vector]).T
return add
def get_add_properties(data: pd.DataFrame, prop_names: List[str], use_log: List[bool] = None):
'''
Gets properties to add to the model given data, names of properties,
and whether to log transform them.
'''
add_vectors = []
if use_log is None:
use_log = [True] * len(prop_names)
for p,l in zip(prop_names, use_log):
fill_value = 0 if p=='%TMP' else None
add_vectors.append(
get_vector(data, prop=p, use_log=l, fill_value=fill_value)
)
return np.stack(add_vectors).T
def make_like_batch(batch: tuple):
'''
Decomposes a batch of acid/glycol into tensors to be fed into model
Args:
batch (tuple): Must be of length 2 and contain (Acid_data, Glycol_data).
:type: tuple[`torch.geometric.data.Batch`, `torch.geometric.data.Batch`]
'''
Adata, Gdata = batch
Abatch = Batch().from_data_list(Adata)
Gbatch = Batch().from_data_list(Gdata)
return Abatch, Gbatch
def check_early_stop(loss_list, delay = 100):
'''
Checks early stopping criterion for training procedure
Check max, see if <delay> epochs have passed since the max
'''
largest = np.argmin(loss_list)
# Can enforce some smoothness condition:
low = max(largest - 5, 0)
up = largest + 6
# Check if the difference between average around it and itself is different enough
minloss = loss_list[largest]
around_min = np.concatenate([loss_list[low:largest], loss_list[(largest+1):up]])
smooth = np.abs(np.mean(around_min) - minloss) < np.abs(minloss * 0.25)
return ((len(loss_list) - largest) > delay) and smooth
def train(
model: torch.nn.Module,
optimizer,
criterion,
dataset: GraphDataset,
batch_size: int = 64,
epochs: int = 1000
):
'''
Args:
model: Neural network to train
optimizer: Optimizer to use when training the model.
criterion: Loss function.
dataset: Dataset class.
batch_size: Number of samples on which to optimize at each iteration. See
the description in CV_Eval
epochs: Number of iterations to train on the data.
'''
for e in range(epochs):
# Batch:
batch, Y, add_features = dataset.get_train_batch(size = batch_size)
test_batch, Ytest, add_test = dataset.get_test()
train_predictions = []
cum_loss = 0
for i in range(batch_size):
# Predictions:
#predictions = torch.tensor([model(*make_like_batch(batch[i])) for i in range(batch_size)], requires_grad = True).float()
af = None if add_features is None else torch.tensor(add_features[i]).float()
train_prediction = model(*make_like_batch(batch[i]), af)
train_predictions.append(train_prediction.clone().detach().item())
#print(predictions)
# Compute and backprop loss
loss = criterion(train_prediction, torch.tensor([Y[i]]))
optimizer.zero_grad()
loss.backward()
cum_loss += loss.item()
optimizer.step()
# Test:
test_preds = []
with torch.no_grad():
for i in range(Ytest.shape[0]):
at = None if add_test is None else add_test[i].clone().detach()
test_preds.append(model(*make_like_batch(test_batch[i]), at).clone().detach().item())
r2_test = r2_score(Ytest.numpy(), test_preds)
mse_test = mean_squared_error(Ytest.numpy(), test_preds)
if e % 10 == 0:
print(f'Epoch: {e}, \t Train r2: {r2_score(Y, train_predictions):.4f} \t Train Loss: {cum_loss:.4f} \t Test r2: {r2_test:.4f} \t Test Loss {mse_test:.4f}')
def CV_eval(
dataset: GraphDataset,
model_generator: torch.nn.Module,
optimizer_generator,
criterion,
model_generator_kwargs: dict = {},
optimizer_kwargs: dict = {},
batch_size = 64,
verbose = 1,
epochs = 1000,
use_val = False,
val_size = 0.1,
stop_option = 0,
early_stop_delay = 100,
save_state_dicts = False,
get_scores = False,
device = None):
'''
Args:
dataset (GraphDataset): Preprocessed dataset matching the GraphDataset
API.
model_generator (torch.nn.Module): Class of the neural network/model that
can be instantiated multiple times within the function.
optimizer_generator: Optimizer that can be instantiated multiple times within
the function.
criterion: Loss function that can be instantiated multiple times within
the function.
model_generator_kwargs (dict): Dictionary of keyword arguments to be passed
to the model for every instantiation.
optimizer_kwargs (dict): Dictionary of keyword arguments to be passed
to the optimizer for every instantiation.
batch_size (int): Number of samples to be optimized on for each step. Note
this works differently than batch size in stochastic gradient descent.
Here, the higher value for the argument denotes more samples to be
trained on per epoch (usually vice versa is standard).
verbose (int): Level at which to print. Should be 0 or 1.
epochs (int): Number of training iterations on the dataset.
use_val (bool): If true, uses the validation set in the Dataset class.
val_sise (float): Size of the validation set to use
stop_option (int): Option that specifies which method to use for early
stopping/validation saving. 0 simply performs all epochs for each fold.
1 performs all epochs but uses model with highest validation score for
evaluation on test set. 2 stops early if the validation loss was at least
`early_stop_delay` epochs ago; it loads that trial's model and evaluates
on it.
early_stop_delay (int): Number of epochs to wait after an early stopping condition
is met.
save_state_dicts (bool): If True, returns state dictionaries for the model at
each fold. Useful for explainability.
get_scores (bool, optional): If True, return only the average values of metrics
across the folds
device (str): Device name at which to run torch calculations on. Supports GPU.
'''
num_folds = 5
fold_count = 0
r2_test_per_fold = []
mse_test_per_fold = []
mae_test_per_fold = []
all_predictions = []
all_y = []
all_reference_inds = []
model_state_dicts = []
for test_batch, Ytest, add_test, test_inds in \
dataset.Kfold_CV(folds = num_folds, val = True, val_size = val_size):
# Instantiate fold-level model and optimizer:
model = model_generator(**model_generator_kwargs).to(device)
# Move model to GPU before setting optimizer
optimizer = optimizer_generator(model.parameters(), **optimizer_kwargs)
fold_count += 1
loss_list = []
if stop_option >= 1:
min_val_loss = 1e10
min_val_state_dict = None
for e in range(epochs):
# Bootstrap batches:
batch, Y, add_features = dataset.get_train_batch(size = batch_size)
train_predictions = []
cum_loss = 0
for i in range(batch_size):
# Predictions:
af = None if add_features is None else torch.tensor(add_features[i]).float()
if verbose > 1:
print('Additional it={}'.format(i), af)
train_prediction = model(*make_like_batch(batch[i]), af)
if verbose > 1:
print('pred', train_prediction.item())
train_predictions.append(train_prediction.clone().detach().item())
# Compute and backprop loss
loss = criterion(train_prediction, torch.tensor([Y[i]]))
optimizer.zero_grad()
loss.backward()
cum_loss += loss.item()
optimizer.step()
if verbose > 1:
print('Train predictions', train_predictions)
# Test on validation:
if use_val:
model.eval()
val_batch, Yval, add_feat_val = dataset.get_validation()
cum_val_loss = 0
val_preds = []
with torch.no_grad():
for i in range(Yval.shape[0]):
pred = model(*make_like_batch(val_batch[i]), add_feat_val[i])
val_preds.append(pred.item())
cum_val_loss += criterion(pred, Yval[i]).item()
loss_list.append(cum_val_loss)
model.train() # Must switch back to train after eval
if e % 50 == 0 and (verbose >= 1):
print_str = f'Fold: {fold_count} \t Epoch: {e}, \
\t Train r2: {r2_score(Y, train_predictions):.4f} \t Train Loss: {cum_loss:.4f}'
if use_val:
print_str += f'Val r2: {r2_score(Yval, val_preds):.4f} \t Val Loss: {cum_val_loss:.4f}'
print(print_str)
if stop_option >= 1:
if cum_val_loss < min_val_loss:
# If min val loss, store state dict
min_val_loss = cum_val_loss
min_val_state_dict = model.state_dict()
# Check early stop if needed:
if stop_option == 2:
# Check criteria:
if check_early_stop(loss_list, early_stop_delay) and e > early_stop_delay:
break
if stop_option >= 1: # Loads the min val loss state dict even if we didn't break
# Load in the model with min val loss
model = model_generator(**model_generator_kwargs)
model.load_state_dict(min_val_state_dict)
# Test:
test_preds = []
with torch.no_grad():
for i in range(Ytest.shape[0]):
at = None if add_test is None else torch.tensor(add_test[i]).float()
pred = model(*make_like_batch(test_batch[i]), at).clone().detach().item()
test_preds.append(pred)
all_predictions.append(pred)
all_y.append(Ytest[i].item())
all_reference_inds.append(test_inds[i])
r2_test = r2_score(Ytest.numpy(), test_preds)
mse_test = mean_squared_error(Ytest.numpy(), test_preds)
mae_test = mean_absolute_error(Ytest.numpy(), test_preds)
print(f'Fold: {fold_count} \t Test r2: {r2_test:.4f} \t Test Loss: {mse_test:.4f} \t Test MAE: {mae_test:.4f}')
r2_test_per_fold.append(r2_test)
mse_test_per_fold.append(mse_test)
mae_test_per_fold.append(mae_test)
if save_state_dicts:
model_state_dicts.append(model.state_dict())
print('Final avg. r2: ', np.mean(r2_test_per_fold))
print('Final avg. MSE:', np.mean(mse_test_per_fold))
print('Final avg. MAE:', np.mean(mae_test_per_fold))
r2_avg = np.mean(r2_test_per_fold)
mae_avg = np.mean(mae_test_per_fold)
big_ret_dict = {
'r2': r2_avg,
'mae': mae_avg,
'all_predictions': all_predictions,
'all_y': all_y,
'all_reference_inds': all_reference_inds,
'model_state_dicts': model_state_dicts
}
if save_state_dicts:
if get_scores:
return big_ret_dict
else:
return all_predictions, all_y, all_reference_inds, model_state_dicts
if get_scores: # Return scores:
return big_ret_dict
return all_predictions, all_y, all_reference_inds
def train_joint(
model,
optimizer,
criterion,
dataset,
batch_size = 64,
epochs = 100,
gamma = 1e4
):
for e in range(epochs):
# Batch:
batch, Y, add_features = dataset.get_train_batch(size = batch_size)
test_batch, Ytest, add_test = dataset.get_test()
#Y = np.log(Y)
#Ytest = np.log(Ytest)
# Y[:, 0] = np.log(Y[:, 0])
# Ytest[:, 0] = np.log(Ytest[:, 0])
train_predictions = []
cum_loss = 0
model.train()
for i in range(batch_size):
# Predictions:
#predictions = torch.tensor([model(*make_like_batch(batch[i])) for i in range(batch_size)], requires_grad = True).float()
af = None if add_features is None else torch.tensor(add_features[i]).float()
train_prediction = model(*make_like_batch(batch[i]), af)
#train_predictions.append(train_prediction.clone().detach().item())
train_predictions.append([train_prediction[i].clone().detach().item() for i in ['IV', 'Tg']])
#print(predictions)
# Compute and backprop loss
#loss = criterion(train_prediction, torch.tensor([Y[i]]))
loss_IV = criterion(train_prediction['IV'], torch.tensor([Y[i][0]]))
loss_Tg = criterion(train_prediction['Tg'], torch.tensor([Y[i][1]]))
loss = gamma * loss_IV + loss_Tg
optimizer.zero_grad()
loss.backward()
cum_loss += loss.item()
optimizer.step()
# Test:
# model.eval()
# test_predIV = []
# test_predTg = []
# with torch.no_grad():
# for i in range(Ytest.shape[0]):
# pred = model(*make_like_batch(test_batch[i]), torch.tensor(add_test[i]).float())
# test_predIV.append(pred['IV'].clone().detach().item())
# test_predTg.append(pred['Tg'].clone().detach().item())
# r2_testIV = r2_score(Ytest[:,0].numpy(), test_predIV)
# r2_testTg = r2_score(Ytest[:,1].numpy(), test_predTg)
if e % 10 == 0:
print(f'Epoch: {e}, \t Train r2: {r2_score(Y, train_predictions):.4f} \t Train Loss: {cum_loss:.4f}') #\t Test r2: {r2_testIV:.4f} \t Test r2 (Tg): {r2_testTg}')
def CV_eval_joint(
dataset,
model_generator: torch.nn.Module,
optimizer_generator,
criterion,
model_generator_kwargs: dict = {},
optimizer_kwargs: dict = {},
batch_size = 64,
verbose = 1,
gamma = 1e4,
epochs = 1000,
get_scores = False,
device = None,
save_state_dicts = False,
check_r2_thresh = True):
'''
Cross validation of the joint Tg/IV model
Args:
gamma (float): Weighting factor applied to IV loss. Used
to balance the losses between IV and Tg during the joint
training process.
'''
num_folds = 5
fold_count = 0
r2_test_per_fold = []
r2_test_per_fold_IV = []
r2_test_per_fold_Tg = []
mse_test_per_fold = []
mse_test_per_fold_IV = []
mse_test_per_fold_Tg = []
mae_test_per_fold = []
mae_test_per_fold_IV = []
mae_test_per_fold_Tg = []
all_predictions = []
all_y = []
all_reference_inds = []
model_state_dicts = []
for test_batch, Ytest, add_test, test_inds in dataset.Kfold_CV(folds = num_folds):
model = model_generator(**model_generator_kwargs).to(device)
optimizer = optimizer_generator(model.parameters(), **optimizer_kwargs)
fold_count += 1
#Ytest = np.log(Ytest) # Log transform Ytest
#Ytest[:, 0] = np.log(Ytest[:, 0])
model.train()
#for e in range(epochs):
e = 0
while True:
# Batch:
batch, Y, add_features = dataset.get_train_batch(size = batch_size)
if add_features is not None:
add_features = torch.tensor(add_features).float().to(device)
#Y = np.log(Y) # Log transform Y
#[:, 0] = Y[:, 0])
train_predictions = []
cum_loss = 0
for i in range(batch_size):
# Predictions:
#predictions = torch.tensor([model(*make_like_batch(batch[i])) for i in range(batch_size)], requires_grad = True).float()
af = None if add_features is None else add_features[i]
A, G = make_like_batch(batch[i])
A, G = A.to(device), G.to(device)
train_prediction = model(A, G, af)
#train_prediction = model(*make_like_batch(batch[i]), af)
train_predictions.append([train_prediction[i].clone().detach().item() for i in ['IV', 'Tg']])
#print(predictions)
# Compute and backprop joint loss
loss_IV = criterion(train_prediction['IV'], torch.tensor([Y[i][0]]).to(device))
loss_Tg = criterion(train_prediction['Tg'], torch.tensor([Y[i][1]]).to(device))
loss = gamma * loss_IV + loss_Tg # Loss is additive between the two
optimizer.zero_grad()
loss.backward()
cum_loss += loss.item()
optimizer.step()
try:
r2IV = r2_score(Y[:][0], train_predictions[0][:])
except:
r2IV = -1
try:
r2Tg = r2_score(Y[:][1], train_predictions[1][:])
except:
r2Tg = -1
if e % 50 == 0:
#print(f'Fold: {fold_count} \t Epoch: {e}, \t Train r2: {r2_score(Y, train_predictions):.4f} \t Train Loss: {cum_loss:.4f}')
print(f'Fold: {fold_count} : {e}, Train r2 IV, Tg: {r2IV:.4f}, {r2Tg:.4f} \t Train Loss: {cum_loss:.4f}')
if check_r2_thresh and (e > epochs) and (r2IV > 0.9) and (r2Tg > 0.9):
# Check for stable learning on both IV and Tg
# Checks traning value, not validation
break
e += 1
# Test:
model.eval()
test_preds = []
with torch.no_grad():
for i in range(Ytest.shape[0]):
#test_preds.append(model(*make_like_batch(test_batch[i]), torch.tensor(add_test[i]).float()).clone().detach().item())
at = None if add_test is None else torch.tensor(add_test[i]).float().to(device)
A, G = make_like_batch(test_batch[i])
A, G = A.to(device), G.to(device)
test_pred = model(A, G, at)
pred = [test_pred[i].clone().detach().item() for i in ['IV', 'Tg']]
test_preds.append(pred)
all_predictions.append(pred)
all_y.append(Ytest[i,:].detach().clone().tolist())
all_reference_inds.append(test_inds[i])
r2_test = r2_score(Ytest.cpu().numpy(), test_preds)
r2_test_IV = r2_score(Ytest.cpu().numpy()[:, 0], np.array(test_preds)[:, 0])
r2_test_Tg = r2_score(Ytest.cpu().numpy()[:, 1], np.array(test_preds)[:, 1])
mse_test = mean_squared_error(Ytest.cpu().numpy(), test_preds)
mse_test_IV = mean_squared_error(Ytest.cpu().numpy()[:, 0], np.array(test_preds)[:, 0])
mse_test_Tg = mean_squared_error(Ytest.cpu().numpy()[:, 1], np.array(test_preds)[:, 1])
mae_test = mean_absolute_error(Ytest.cpu().numpy(), test_preds)
mae_test_IV = mean_absolute_error(Ytest.cpu().numpy()[:, 0], np.array(test_preds)[:, 0])
mae_test_Tg = mean_absolute_error(Ytest.cpu().numpy()[:, 1], np.array(test_preds)[:, 1])
print(f'Fold: {fold_count} \t Test r2: {r2_test:.4f} \t r2_IV: {r2_test_IV:.4f} \t r2_Tg: {r2_test_Tg:.4f} \t MSE: {mse_test:.4f} \t MSE_IV: {mse_test_IV:.4f} \t MSE_Tg: {mse_test_Tg:.4f} \t MAE: {mae_test:.4f} \t MAE_IV: {mae_test_IV:.4f} \t MAE_Tg: {mae_test_Tg:.4f}')
r2_test_per_fold.append(r2_test)
r2_test_per_fold_IV.append(r2_test_IV)
r2_test_per_fold_Tg.append(r2_test_Tg)
mse_test_per_fold.append(mse_test)
mse_test_per_fold_IV.append(mse_test_IV)
mse_test_per_fold_Tg.append(mse_test_Tg)
mae_test_per_fold.append(mae_test)
mae_test_per_fold_IV.append(mae_test_IV)
mae_test_per_fold_Tg.append(mae_test_Tg)
if save_state_dicts:
model_state_dicts.append(model.state_dict())
print('Final avg. r2: ', np.mean(r2_test_per_fold))
print('Final avg. r2 IV: ', np.mean(r2_test_per_fold_IV))
print('Final avg. r2 Tg: ', np.mean(r2_test_per_fold_Tg))
print('Final avg. MSE:', np.mean(mse_test_per_fold))
print('Final avg. MSE IV: ', np.mean(mse_test_per_fold_IV))
print('Final avg. MSE Tg: ', np.mean(mse_test_per_fold_Tg))
print('Final avg. MAE:', np.mean(mae_test_per_fold))
print('Final avg. MAE IV: ', np.mean(mae_test_per_fold_IV))
print('Final avg. MAE Tg: ', np.mean(mae_test_per_fold_Tg))
d = {
'IV':(np.mean(r2_test_per_fold_IV), np.mean(mae_test_per_fold_IV)),
'Tg':(np.mean(r2_test_per_fold_Tg), np.mean(mae_test_per_fold_Tg)),
'all_predictions': all_predictions,
'all_y': all_y,
'all_reference_inds': all_reference_inds,
'model_state_dicts': model_state_dicts
}
if save_state_dicts:
if get_scores:
return d
else:
return all_predictions, all_y, all_reference_inds, model_state_dicts
if get_scores:
# Return in a dictionary
return d
return all_predictions, all_y, all_reference_inds | owencqueen/PolymerGNN | polymerlearn/utils/train_graphs.py | train_graphs.py | py | 24,177 | python | en | code | 7 | github-code | 13 |
463785625 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import WebKit
from sugar3.activity import activity
from sugar3.activity.widgets import ActivityButton
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.activity.widgets import StopButton
from viewtoolbar import ViewToolbar
HOME = "file://"+os.path.join(activity.get_bundle_path(), 'sugarizehelp/Introduction')
#HOME = "http://website.com/something.html"
class SugarizeHelp(activity.Activity):
def __init__(self, handle):
activity.Activity.__init__(self, handle)
self.props.max_participants = 1
self._web_view = WebKit.WebView()
toolbox = ToolbarBox()
self.set_toolbar_box(toolbox)
toolbox.show()
toolbar = toolbox.toolbar
toolbar.show()
activity_button = ActivityButton(self)
toolbar.insert(activity_button, -1)
viewtoolbar = ViewToolbar(self)
viewtoolbar_button = ToolbarButton(
page=viewtoolbar, icon_name='toolbar-view')
toolbar.insert(viewtoolbar_button, -1)
toolbar.show_all()
self._back = ToolButton('go-previous-paired')
self._back.set_tooltip(_('Back'))
self._back.props.sensitive = False
self._back.connect('clicked', self._go_back_cb)
toolbar.insert(self._back, -1)
self._back.show()
self._forward = ToolButton('go-next-paired')
self._forward.set_tooltip(_('Forward'))
self._forward.props.sensitive = False
self._forward.connect('clicked', self._go_forward_cb)
toolbar.insert(self._forward, -1)
self._forward.show()
home = ToolButton('go-home')
home.set_tooltip(_('Home'))
home.connect('clicked', self._go_home_cb)
toolbar.insert(home, -1)
home.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
toolbar.insert(separator, -1)
stopbtn = StopButton(self)
toolbar.insert(stopbtn, -1)
toolbar.show_all()
self._web_view.connect('load-finished', self.update_navigation_buttons)
self.set_canvas(self._web_view)
self._web_view.show()
self._web_view.load_uri(HOME)
def _go_back_cb(self, button):
self._web_view.go_back()
pass
def _go_forward_cb(self, button):
self._web_view.go_forward()
pass
def _go_home_cb(self, button):
self._web_view.load_uri(HOME)
pass
def update_navigation_buttons(self, *args):
can_go_back = self._web_view.can_go_back()
self._back.props.sensitive = can_go_back
can_go_forward = self._web_view.can_go_forward()
self._forward.props.sensitive = can_go_forward
| sugarlabs/Sugarizehelp | sugarizehelp.py | sugarizehelp.py | py | 3,641 | python | en | code | 0 | github-code | 13 |
2302317339 | #i/p=[1,2,3,4,5,6]
#o/p=[1,2,3,6,5,4]
l1=[1,2,3,4,5,6]
'''b=input("entre the numbers")
l1=[]
l1.append(b)
print(l1)'''
list=[]
l=0
u=len(l1)
m=int((l+u)/2)
for i in range(0,m):
a=l1[i]
list.append(a)
#print(list)
i=u
while i>m:
i=i-1
a=l1[i]
list.append(a)
print(list)
| sneha1sneha/pgms | pROGRAMS/imp2.py | imp2.py | py | 281 | python | en | code | 0 | github-code | 13 |
20995689443 | from itertools import repeat, product
from functools import partial
from array import array
def mandel_for(re, im, max_dist=2**6, max_iter=255):
z_re, z_im = re, im
for i in range(max_iter):
re_sqr = z_re*z_re
im_sqr = z_im*z_im
if ((re_sqr + im_sqr) >= max_dist):
return i
z_re, z_im = re_sqr - im_sqr + re, 2*z_re*z_im + im
return max_iter
def mandel_classic(re1, im1, re2, im2, width, height):
"""
Calculates iterations of mandelbrot set into array of floats
pure Python reference implementation
re1, im1 - upper left corner
re2, im2 - lower right corner
width, height - size of output image
"""
a = array('B', repeat(255, width*height))
comp_size_re = re2 - re1
comp_size_im = im2 - im1
pixel_size_re = comp_size_re / width
pixel_size_im = comp_size_im / height
for y, x in product(range(height), range(width)):
index = x + y*width
a[index] = mandel_for(re1 + x*pixel_size_re, im1 + y*pixel_size_im)
return a
def f(re_im):
re, im = re_im
return mandel_for(re, im)
| rotaliator/profract | profract/mandel/pure_python.py | pure_python.py | py | 1,109 | python | en | code | 1 | github-code | 13 |
12877212948 | """Кофнфиг серверного логгера"""
from logger import GetLogger
LOGGER = GetLogger(logger_name='server_logger').get_logger()
# отладка
if __name__ == '__main__':
LOGGER.critical('Критическая ошибка')
LOGGER.error('Ошибка')
LOGGER.debug('Отладочная информация')
LOGGER.info('Информационное сообщение')
| Roman-R2/telemetron_telegram_bot | services/logging_config.py | logging_config.py | py | 414 | python | ru | code | 0 | github-code | 13 |
8048020145 | import numpy as np
import argparse
import associate
def align(model, data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = np.zeros((3, 3))
for column in range(model.shape[1]):
W += np.outer(model_zerocentered[:, column], data_zerocentered[:, column])
U, d, Vh = np.linalg.svd(W.transpose())
S = np.matrix(np.identity(3))
if (np.linalg.det(U) * np.linalg.det(Vh) < 0):
S[2, 2] = -1
rot = U * S * Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = np.sqrt(np.sum(np.multiply(alignment_error, alignment_error), 0)).A[0]
return rot, trans, trans_error
def compute_ATE(gt, est):
"""Compute the Absolute Trajectory Error (ATE) between ground truth and estimated trajectories.
Input:
gt -- ground truth trajectory (3xn)
est -- estimated trajectory (3xn)
Output:
ate -- absolute trajectory error (scalar)
"""
rot, trans, trans_error = align(gt, est)
ate = np.mean(trans_error)
return ate
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("groundtruth_file", help="ground truth file")
parser.add_argument("estimated_file", help="estimated file")
parser.add_argument("--plot", help="plot resulting ATE", action='store_true')
args = parser.parse_args()
# Load data
gt_traj = associate.read_file(args.groundtruth_file)
est_traj = associate.read_file(args.estimated_file)
# Synchronize data
matched_traj = associate.associate(gt_traj, est_traj)
gt_times = np.array([t for (_, t) in matched_traj])
gt_traj = np.array([x for (x, _) in matched_traj])
est_traj = np.array([x for (_, x) in matched_traj])
# Compute Absolute Trajectory Error (ATE)
ate = compute_ATE(gt_traj, est_traj)
print(f"ATE: {ate:.3f} m")
# Compute mean, median, and RMSE of error
error = np.abs(gt_traj - est_traj)
mean_error = np.mean(error)
median_error = np.median(error)
rmse = np.sqrt(np.mean(np.square(error)))
print(f"Mean error: {mean_error:.3f} m")
print(f"Median error: {median_error:.3f} m")
print(f"RMSE: {rmse:.3f} m")
# Plot ATE (if requested)
if args.plot:
try:
import matplotlib.pyplot as plt
except ImportError:
print("Failed to import matplotlib.pyplot, plotting is not available.")
return
plt.plot(gt_times, error, 'b', label="Absolute Trajectory Error")
plt.plot(gt_times, np.ones(len(gt_times)) * ate, 'r', label="ATE Mean")
plt.plot(gt_times, np.ones(len(gt_times)) * median_error, 'g', label="ATE Median")
plt.plot(gt_times, np.ones(len(gt_times)) * rmse, 'k', label="ATE RMSE")
plt.title("Absolute Trajectory Error")
plt.legend(loc=0, prop={'size':10})
plt.xlabel("Time (s)")
plt.ylabel("ATE (m)")
plt.show()
if __name__ == "__main__":
main() | Johnemad96/masters | orbslam3_docker/orbslam_modifiedFork/Datasets/evaluate_using_rgbd_paper/associate.py | associate.py | py | 3,375 | python | en | code | 1 | github-code | 13 |
3745983738 | #!/usr/bin/env python3
#
# Check AppStore/GooglePlay metadata
#
import os
import sys
import glob
import shutil
from urllib.parse import urlparse
os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", ".."))
# https://support.google.com/googleplay/android-developer/answer/9844778?visit_id=637740303439369859-3116807078&rd=1#zippy=%2Cview-list-of-available-languages
GPLAY_LOCALES = [
"af", # Afrikaans
"sq", # Albanian
"am", # Amharic
"ar", # Arabic
"hy-AM", # Armenian
"az-AZ", # Azerbaijani
"bn-BD", # Bangla
"eu-ES", # Basque
"be", # Belarusian
"bg", # Bulgarian
"my-MM", # Burmese
"ca", # Catalan
"zh-HK", # Chinese (Hong Kong)
"zh-CN", # Chinese (Simplified)
"zh-TW", # Chinese (Traditional)
"hr", # Croatian
"cs-CZ", # Czech
"da-DK", # Danish
"nl-NL", # Dutch
"en-IN", # English
"en-SG", # English
"en-ZA", # English
"en-AU", # English (Australia)
"en-CA", # English (Canada)
"en-GB", # English (United Kingdom)
"en-US", # English (United States)
"et", # Estonian
"fil", # Filipino
"fi-FI", # Finnish
"fr-CA", # French (Canada)
"fr-FR", # French (France)
"gl-ES", # Galician
"ka-GE", # Georgian
"de-DE", # German
"el-GR", # Greek
"gu", # Gujarati
"iw-IL", # Hebrew
"hi-IN", # Hindi
"hu-HU", # Hungarian
"is-IS", # Icelandic
"id", # Indonesian
"it-IT", # Italian
"ja-JP", # Japanese
"kn-IN", # Kannada
"kk", # Kazakh
"km-KH", # Khmer
"ko-KR", # Korean
"ky-KG", # Kyrgyz
"lo-LA", # Lao
"lv", # Latvian
"lt", # Lithuanian
"mk-MK", # Macedonian
"ms", # Malay
"ms-MY", # Malay (Malaysia)
"ml-IN", # Malayalam
"mr-IN", # Marathi
"mn-MN", # Mongolian
"ne-NP", # Nepali
"no-NO", # Norwegian
"fa", # Persian
"fa-AE", # Persian
"fa-AF", # Persian
"fa-IR", # Persian
"pl-PL", # Polish
"pt-BR", # Portuguese (Brazil)
"pt-PT", # Portuguese (Portugal)
"pa", # Punjabi
"ro", # Romanian
"rm", # Romansh
"ru-RU", # Russian
"sr", # Serbian
"si-LK", # Sinhala
"sk", # Slovak
"sl", # Slovenian
"es-419", # Spanish (Latin America)
"es-ES", # Spanish (Spain)
"es-US", # Spanish (United States)
"sw", # Swahili
"sv-SE", # Swedish
"ta-IN", # Tamil
"te-IN", # Telugu
"th", # Thai
"tr-TR", # Turkish
"uk", # Ukrainian
"ur", # Urdu
"vi", # Vietnamese
"zu", # Zulu
]
# From a Fastline error message and https://help.apple.com/app-store-connect/#/dev997f9cf7c
APPSTORE_LOCALES = [
"ar-SA", "ca", "cs", "da", "de-DE", "el", "en-AU", "en-CA",
"en-GB", "en-US", "es-ES", "es-MX", "fi", "fr-CA", "fr-FR",
"he", "hi", "hr", "hu", "id", "it", "ja", "ko", "ms", "nl-NL",
"no", "pl", "pt-BR", "pt-PT", "ro", "ru", "sk", "sv", "th", "tr",
"uk", "vi", "zh-Hans", "zh-Hant"
]
def error(path, message, *args, **kwargs):
print("❌", path + ":", message.format(*args, **kwargs), file=sys.stderr)
return False
def done(path, ok):
if ok:
print("✅", path)
return ok
def check_raw(path, max_length):
ok = True
with open(path, 'r') as f:
text = f.read()
if text[-1] == os.linesep:
text = text[:-1]
else:
ok = error(path, "missing new line")
cur_length = len(text)
if cur_length > max_length:
ok = error(path, "too long: got={}, expected={}", cur_length, max_length)
return ok, text
def check_text(path, max, optional=False):
try:
return done(path, check_raw(path, max)[0])
except FileNotFoundError as e:
if optional:
return True,
print("🚫", path)
return False,
def check_url(path,):
(ok, url) = check_raw(path, 500)
url = urlparse(url)
if not url.scheme in ('https', 'http'):
ok = error(path, "invalid URL: {}", url)
return done(path, ok)
def check_email(path):
(ok, email) = check_raw(path, 500)
ok = ok and email.find('@') != -1 and email.find('.') != -1
return done(path, ok)
def check_exact(path, expected):
(ok, value) = check_raw(path, len(expected))
if value != expected:
ok = error(path, "invalid value: got={}, expected={}", value, expected)
return done(path, ok)
def check_android():
ok = True
flavor = 'android/app/src/fdroid/play/'
ok = check_url(flavor + 'contact-website.txt') and ok
ok = check_email(flavor + 'contact-email.txt') and ok
ok = check_exact(flavor + 'default-language.txt', 'en-US') and ok
for locale in glob.glob(flavor + 'listings/*/'):
if locale.split('/')[-2] not in GPLAY_LOCALES:
ok = error(locale, 'unsupported locale') and ok
continue
ok = check_text(locale + 'title.txt', 50) and ok
ok = check_text(locale + 'title-google.txt', 30) and ok
ok = check_text(locale + 'short-description.txt', 80) and ok
ok = check_text(locale + 'short-description-google.txt', 80, True) and ok
ok = check_text(locale + 'full-description.txt', 4000) and ok
ok = check_text(locale + 'full-description-google.txt', 4000, True) and ok
ok = check_text(locale + 'release-notes.txt', 499) and ok
for locale in glob.glob(flavor + 'release-notes/*/'):
if locale.split('/')[-2] not in GPLAY_LOCALES:
ok = error(locale, 'unsupported locale') and ok
continue
ok = check_text(locale + 'default.txt', 499) and ok
return ok
def check_ios():
ok = True
for locale in APPSTORE_LOCALES:
locale_dir = os.path.join('iphone', 'metadata', locale)
english_dir = os.path.join('iphone', 'metadata', 'en-US')
overlay_dir = os.path.join('keywords', 'ios', locale)
if not os.path.isdir(locale_dir):
os.mkdir(locale_dir)
for name in ["name.txt", "subtitle.txt", "promotional_text.txt",
"description.txt", "release_notes.txt", "keywords.txt",
"support_url.txt", "marketing_url.txt", "privacy_url.txt"]:
overlay_path = os.path.join(overlay_dir, name)
english_path = os.path.join(english_dir, name)
target_path = os.path.join(locale_dir, name)
if os.path.exists(overlay_path):
shutil.copy(overlay_path, target_path)
elif os.path.exists(english_path) and not os.path.exists(target_path):
shutil.copy(english_path, target_path)
for locale in glob.glob('iphone/metadata/*/'):
if locale.split('/')[-2] not in APPSTORE_LOCALES:
ok = error(locale, "unsupported locale") and ok
continue
ok = check_text(locale + "name.txt", 30) and ok
ok = check_text(locale + "subtitle.txt", 30) and ok
ok = check_text(locale + "promotional_text.txt", 170) and ok
ok = check_text(locale + "description.txt", 4000) and ok
ok = check_text(locale + "release_notes.txt", 4000) and ok
ok = check_text(locale + "keywords.txt", 100, True) and ok
ok = check_url(locale + "support_url.txt") and ok
ok = check_url(locale + "marketing_url.txt") and ok
ok = check_url(locale + "privacy_url.txt") and ok
for locale in glob.glob('keywords/ios/*/'):
if locale.split('/')[-2] not in APPSTORE_LOCALES:
ok = error(locale, "unsupported locale") and ok
continue
return ok
if __name__ == "__main__":
ok = True
if len(sys.argv) == 2 and sys.argv[1] == 'android':
if check_android():
sys.exit(0)
sys.exit(2)
elif len(sys.argv) == 2 and sys.argv[1] == "ios":
if check_ios():
sys.exit(0)
sys.exit(2)
else:
print("Usage:", sys.argv[0], "android|ios", file=sys.stderr)
sys.exit(1)
| organicmaps/organicmaps | tools/python/check_store_metadata.py | check_store_metadata.py | py | 7,989 | python | en | code | 7,565 | github-code | 13 |
33536119847 | import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndi
import pandas as pd
import random
import segyio
np.random.seed(1234)
'''
# model
tmax = 0.2
tmin = 0
xt = np.arange(0, 200)
# impedance range
max_guess, min_guess = 4500, 1000
max_imp, min_imp = 3000, 1500
# wavelet parameters
f = 50
length = 0.1
dt = 0.001 # 1ms, 1,000Hz
# Input seismogram
lenSeis = 200 + 1
# Optimisation criterion
L1 = True
###########################
nsamp = int((tmax - tmin) / dt) + 1
t = np.linspace(tmin, tmax, nsamp)
np.random.seed(12)
def createModel(t):
imp_LF = 2250 + 200 * np.sin(2*np.pi*20 * t + 10) + 100 * np.sin(2*np.pi*80 * t - 2) # Determines the range of synthetic impedance model
return imp_LF
imp_syn = createModel(t)
plt.plot(imp_syn)
plt.show()
def power(timeseries):
fourier_transform = np.fft.rfft(timeseries)
abs_fourier_transform = np.abs(fourier_transform)
power_spectrum = np.square(abs_fourier_transform)
frequency = np.linspace(0, 1/(2*dt), len(power_spectrum))
return frequency, power_spectrum
def calcuRc(imp):
Rc = []
#nsamp = np.shape(imp)[0]
for i in range(0, nsamp - 1):
Rc.append((imp[i + 1] - imp[i]) / (imp[i + 1] + imp[i]))
return Rc
rc = calcuRc(imp_syn)
# define function of ricker wavelet
def ricker(f, length, dt):
t0 = np.arange(-length / 2, (length - dt) / 2, dt)
y = (1.0 - 2.0 * (np.pi ** 2) * (f ** 2) * (t0 ** 2)) * np.exp(-(np.pi ** 2) * (f ** 2) * (t0 ** 2))
return y
wavelet = ricker(f, length, dt)
#imp = createModel(nsamp)
#print(imp)
#Rc = calcuRc(imp)
for sample in range(np.shape(rc)[0]):
if abs(rc[sample]) < 0.005:
rc[sample] = 0
def SyntheticTrace(Rc, wavelet, nsamp):
noise = np.random.normal(0, 0.3, nsamp-1)
noise_smoothed = ndi.uniform_filter1d(noise, size=7) # Controls the noise level
synthetic_trace = np.convolve(Rc, wavelet, mode='same')
synthetic_norm = synthetic_trace / max(synthetic_trace)
synthetic_ctm = synthetic_norm + noise_smoothed
return synthetic_trace, synthetic_norm, synthetic_ctm
Synthetic_raw, synthetic, synthetic_contaminated = SyntheticTrace(rc, wavelet, nsamp)
def power(timeseries):
fourier_transform = np.fft.rfft(timeseries)
abs_fourier_transform = np.abs(fourier_transform)
power_spectrum = np.square(abs_fourier_transform)
frequency = np.linspace(0, 1/(2*dt), len(power_spectrum))
return frequency, power_spectrum
f_rc, p_rc = power(rc)
f_imp, p_imp = power(imp_syn)
plt.plot(f_imp, p_imp, 'r')
plt.ylabel('power of imp')
plt.show()
plt.plot(f_rc, p_rc, 'b')
plt.ylabel('power of rc')
plt.show()
'''
'''
f_trace, p_trace = power(synthetic_contaminated)
f_imp, p_imp = power(imp)
f_wav, p_wav = power(wavelet)
#plt.plot(f_trace, p_trace, 'r')
#plt.show()
#plt.plot(f_imp, p_imp, 'b')
#plt.show()
#plt.plot(f_wav, p_wav, 'g')
#plt.show()
#plt.plot(synthetic, 'r')
#plt.plot(synthetic_contaminated, 'b')
#plt.show()
'''
'''
# number of spikes
no_spikes = np.random.poisson(10, 1)
# location of spikes
lo_spikes = np.random.randint(0, 200, no_spikes)
# magnitude and direction of spikes
spikes = np.random.normal(0, 0.1, no_spikes)
for a, b in zip(spikes, lo_spikes):
rc = (b, a)
fig, ax = plt.subplots(1, 1)
x = np.arange(0, 200)
ax.plot(x, rc, 'bo', ms=8)
ax.vlines(x, 0, spikes, colors='b', lw=5, alpha=0.5)
tt = [1,4,6,7,9,3,4,6,8,6,3,7]
x = np.arange(0, len(tt))
plt.step(x, tt, label='pre (default)')
plt.plot(x, tt, 'o--', color='grey', alpha=0.3)
plt.show()
plt.plot(tt)
plt.show()
'''
# define function of ricker wavelet
def ricker(f, length, dt):
t0 = np.arange(-length / 2, (length - dt) / 2, dt)
y = (1.0 - 2.0 * (np.pi ** 2) * (f ** 2) * (t0 ** 2)) * np.exp(-(np.pi ** 2) * (f ** 2) * (t0 ** 2))
return y
# wavelet parameters
f = 50
length = 0.1
dt = 1e-3 # 1ms, 1,000Hz
wavelet = ricker(f, length, dt)
rc = np.zeros(200)
rc[75] = 0.5
rc[150] = -0.25
syn = np.convolve(wavelet, rc, mode='same')
plt.plot(syn)
plt.show()
'''
wavelet = []
for f in [50, 100, 150]:
wavelet.append(ricker(f, length, dt))
plt.subplot(1, 3, 1)
plt.plot(wavelet[0])
plt.subplot(1, 3, 2)
plt.plot(wavelet[1])
plt.subplot(1, 3, 3)
plt.plot(wavelet[2])
plt.show()
for length in [0.05, 0.1, 0.2]:
wavelet.append(ricker(f, length, dt))
plt.subplot(1, 3, 1)
plt.plot(wavelet[3])
plt.subplot(1, 3, 2)
plt.plot(wavelet[4])
plt.subplot(1, 3, 3)
plt.plot(wavelet[5])
plt.show()
for dt in [0.5e-3, 1e-3, 1.5e-3]:
wavelet.append(ricker(f, length, dt))
plt.subplot(1, 3, 1)
plt.plot(wavelet[6], 'r+')
plt.subplot(1, 3, 2)
plt.plot(wavelet[7], 'r+')
plt.subplot(1, 3, 3)
plt.plot(wavelet[8], 'r+')
plt.show()
'''
| Sheng154/impedance_inversion | Synthetic_case_3.py | Synthetic_case_3.py | py | 4,662 | python | en | code | 0 | github-code | 13 |
12852349387 | import re
import threading
import pandas as pd
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.remote.webelement import WebElement
from src.utils import get_full_path
from src.model import aliexpress
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from src.user_agents import user_agents
import random
pro_links_list = pd.read_json(get_full_path('../data/links_pd.json'))
pro_links_list = pro_links_list[0].tolist()
print(f"Length of products link ==> {len(pro_links_list)}")
class ProductDescriptionGetter:
products_list = []
def __init__(self, link):
self.pro_url = link
self.product = aliexpress.copy()
self.product['source'] = self.pro_url
options = Options()
options.add_argument("start-maximized")
options.add_argument("--disable-extensions")
options.add_argument('--headless')
options.add_argument('window-size=1920x1080')
options.add_argument('--no-sandbox')
options.add_argument("--hide-scrollbars")
options.add_argument("disable-infobars")
options.add_argument('--disable-dev-shm-usage')
prefs = {
"translate_whitelists": {"fr": "en", "de": "en", 'it': 'en', 'no': 'en', 'es': 'en', 'sv': 'en', 'nl': 'en',
'da': 'en', 'pl': 'en', 'fi': 'en', 'cs': 'en'},
"translate": {"enabled": "true"}
}
options.add_experimental_option("prefs", prefs)
useragent = random.choice(user_agents)
print(f'User Agent ==> {useragent}')
options.add_argument(f'user-agent={useragent}')
self.driver = webdriver.Chrome(options=options)
def __del__(self):
print("Delete")
def start(self):
print('Start')
self.get_pro_desc()
def save_csv_file(self):
self.products_list.append(self.product.copy())
dataframe = pd.DataFrame(self.products_list)
dataframe.to_csv(get_full_path("../data/dataset.csv"), index=False)
# dataframe.to_json(get_full_path("../data/dataset.json"), orient='records')
print(f'File saved! Records ==> {len(self.products_list)}')
def get_pro_title(self):
try:
title_pro: WebElement = self.driver.find_element_by_css_selector('div[itemprop="name"]').text
title_pro_str = str(str(title_pro).strip())
self.product['product_name'] = title_pro_str
print(f"Product title ==> {title_pro_str}")
except Exception as error:
print(f"Error! in getting pro Title ==> {error}")
def get_pro_price(self):
try:
price: WebElement = self.driver.find_element_by_css_selector('span[itemprop="price"]').text
print(f"Product price ==> {price}")
# self.product['price'] = price
return price
except Exception as error:
print(f"Error! in getting Price and currency {error}")
def get_pro_description(self):
try:
description: WebElement = self.driver.find_element_by_css_selector('div#product-description')
description_source_code = description.get_attribute("innerHTML")
description_soup: BeautifulSoup = BeautifulSoup(description_source_code, 'html.parser')
description_f = str(description_soup.get_text(' ')).encode('ascii', 'ignore').decode('utf-8')
description_f = str(description_f.strip().replace('\n', ' ').replace(' ', ' ').replace(' ', ' '))
check = re.split(r'=|%2F|%20|&|,|\t| {3}| {4}|;|:|/|\?', str(description_f).strip().lower())
joined = ' '.join(check)
self.product['description'] = joined.strip()
print(f"Product description ==> {joined}")
except Exception as error:
print(f"Error! in getting Description {error}")
def get_pro_photos(self):
other_pic_list = []
try:
product_gallery = self.driver.find_element_by_css_selector('ul.images-view-list')
product_gallery_source_code = product_gallery.get_attribute("innerHTML")
product_gallery_soup: BeautifulSoup = BeautifulSoup(product_gallery_source_code, 'html.parser')
for i, pro_link_for in enumerate(product_gallery_soup.findAll('img')):
try:
other_pic = pro_link_for['src']
single_pic = f"{other_pic.split('.jpg')[0]}.jpg"
other_pic_list.append(single_pic)
except KeyError as error:
print(error)
try:
product_descp = self.driver.find_element_by_css_selector('div#product-description')
product_descp_source_code = product_descp.get_attribute("innerHTML")
product_descp_soup: BeautifulSoup = BeautifulSoup(product_descp_source_code, 'html.parser')
for pro_link_for in product_descp_soup.findAll('img'):
try:
single_pic = pro_link_for["src"]
other_pic_list.append(single_pic)
except KeyError as error:
print(error)
except Exception as error:
print(f"Error! in getting Photos in Description {error}")
rd_other_pic_list = list(dict.fromkeys(other_pic_list))
for i, photo in enumerate(rd_other_pic_list):
print(f"Product Photo ==> {photo}")
self.product[f'IM{i + 1}'] = None
self.product[f'IM{i + 1}'] = photo
except Exception as error:
print(f"Error! in getting Photos list {error}")
def get_pro_quantity(self):
try:
quantity: WebElement = self.driver.find_element_by_css_selector('div.product-quantity-tip span').text
print(f"Product Quantity ==> {str(quantity).split()[0]}")
return str(quantity).split()[0]
except Exception as error:
print(f"Error! in getting brand ==> {error}")
self.driver.quit()
def get_pro_sku(self):
try:
sku = str(self.pro_url).split('.html')[0].split('/')[-1]
self.product['sku'] = sku
print(f'Product SKU ==> {sku}')
except Exception as e:
print(f'Product SKU not found => {e}')
def remove_popup_banner(self):
try:
self.driver.implicitly_wait(10)
WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'a.next-dialog-close'))).click()
print('==> Clicked and remove popup banner')
self.driver.implicitly_wait(5)
self.driver.execute_script("window.scrollTo(0, 200)")
sleep(1)
except Exception as e:
print('Error in clicking popup banner BTN : ' + str(e))
def infinatescroll(self):
try:
sleep(2)
self.driver.execute_script("window.scrollTo(0, 1000)")
pre = 1000
for i in range(0, 6):
sleep(1)
nextscr = pre + 1000
pre = pre + 1000
self.driver.execute_script(f"window.scrollTo({pre}, {nextscr});")
print('Scrolling Down...')
# Wait to load page
sleep(1)
except Exception as e:
print('Error in scrolling : ' + str(e))
def get_pro_reviews(self):
try:
self.driver.execute_script("window.scrollTo(0, 200)")
self.driver.implicitly_wait(10)
WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[5]/div/div[3]/div[2]/div[2]/div[1]/div/div[1]/ul/li[2]'))).click()
print('==> Review tab Clicked')
self.driver.implicitly_wait(5)
self.driver.execute_script("window.scrollTo(200, 1200)")
except Exception as error:
print(f'Error in Clicking review tab ==>{error}')
try:
sleep(5)
che = WebDriverWait(self.driver, 10).until(EC.frame_to_be_available_and_switch_to_it(
self.driver.find_element_by_xpath("//*[@id='product-evaluation']")))
print(f'==> Swithched to iframe {che}')
print()
sleep(3)
review_elem_source_code = self.driver.execute_script("return document.body.innerHTML;")
review_elem_soup: BeautifulSoup = BeautifulSoup(review_elem_source_code, 'html.parser')
for i, review_raw in enumerate(review_elem_soup.select('div.feedback-item')):
print('*' * 100)
review = str(review_raw.select_one('dt.buyer-feedback > span').get_text()).encode('ascii',
'ignore').decode(
'utf-8').strip()
print(f'Review {i + 1} ==> {review}')
self.product[f'review{i + 1}'] = None
self.product[f'review{i + 1}'] = review
if i >= 9:
break
print('*' * 100)
self.driver.switch_to.default_content()
except Exception as error:
self.product['Review'] = 'Reviews Not available'
print(f'Error in getting Reviews ==>{error}')
def get_varients(self):
varients_list = self.driver.find_elements_by_css_selector('div.sku-property')
if len(varients_list) > 1:
check1 = str(varients_list[0].find_element_by_css_selector('div.sku-title').text).find('Ship')
check2 = str(varients_list[1].find_element_by_css_selector('div.sku-title').text).find('Color')
if check1 != -1 and check2 != -1:
print('Method 1: Shipping and Color')
self.m1(index1=0, index2=1)
return None
check1 = str(varients_list[0].find_element_by_css_selector('div.sku-title').text).find('Color')
check2 = str(varients_list[1].find_element_by_css_selector('div.sku-title').text).find('Ship')
if check1 != -1 and check2 != -1:
print('Method 2: Color and Shipping')
self.m1(index1=1, index2=0)
return None
print('Other Method')
try:
li_list = self.driver.find_elements_by_css_selector('ul.sku-property-list')[0]
sleep(1)
if len(li_list.find_elements_by_css_selector('li.sku-property-item')) > 1:
# for i, li_click in enumerate(li_list.find_elements_by_css_selector('li.sku-property-item')[1:]):
for i, li_click in enumerate(li_list.find_elements_by_css_selector('li.sku-property-item')):
try:
li_click.click()
except:
continue
try:
self.varent_product = self.product.copy()
self.varent_product['price'] = None
self.varent_product['variation'] = f'variation{i + 1}'
try:
variation_photo_source_code = li_click.get_attribute("innerHTML")
product_gallery_soup: BeautifulSoup = BeautifulSoup(variation_photo_source_code,
'html.parser')
for i, pro_link_for in enumerate(product_gallery_soup.findAll('img')):
try:
self.varent_product['variation_photo'] = None
other_pic = pro_link_for['src']
single_pic = f"{other_pic.split('.jpg')[0]}.jpg"
self.varent_product['variation_photo'] = single_pic
except KeyError as error:
print(error)
except Exception as error:
print(f"Error! in variation photo ==> {error}")
try:
self.varent_product['quantity'] = None
self.varent_product['quantity'] = self.get_pro_quantity()
except Exception as error:
print(f"Error! in getting brand ==> {error}")
self.varent_product['price'] = self.get_pro_price()
self.products_list.append(self.varent_product.copy())
except Exception as error:
print(f"Error! in getting Price and currency {error}")
except Exception as e:
print('Error in clicking popup banner BTN : ' + str(e))
def m1(self, index1, index2):
shipping_list = self.driver.find_elements_by_css_selector('ul.sku-property-list')[
int(index1)].find_elements_by_css_selector('li.sku-property-item')
for ship in shipping_list:
try:
ship.click()
except:
continue
ship_from = str(ship.text).strip()
print(ship_from)
color_type_list = self.driver.find_elements_by_css_selector('ul.sku-property-list')[
int(index2)].find_elements_by_css_selector('li.sku-property-item')
for i, li_click in enumerate(color_type_list):
try:
li_click.click()
except:
continue
try:
pro_single = self.product.copy()
pro_single['price'] = None
pro_single['price'] = self.get_pro_price()
pro_single['quantity'] = None
pro_single['quantity'] = self.get_pro_quantity()
pro_single['Ship_From'] = ship_from
pro_single['variation'] = f'variation{i + 1}'
try:
# product_gallery = self.driver.find_element_by_css_selector('ul.images-view-list')
variation_photo_source_code = li_click.get_attribute("innerHTML")
product_gallery_soup: BeautifulSoup = BeautifulSoup(variation_photo_source_code,
'html.parser')
for i, pro_link_for in enumerate(product_gallery_soup.findAll('img')):
try:
pro_single['variation_photo'] = None
other_pic = pro_link_for['src']
single_pic = f"{other_pic.split('.jpg')[0]}.jpg"
pro_single['variation_photo'] = single_pic
except KeyError as error:
print(error)
except Exception as error:
print(f"Error! in variation photo ==> {error}")
self.products_list.append(pro_single.copy())
# color_type_list[0].click()
except Exception as error:
print(f"Error! in getting Price and currency {error}")
sleep(1)
def get_pro_desc(self):
try:
print(f"Getting Desc for URL ==> {self.pro_url}")
self.driver.get(self.pro_url)
sleep(5)
self.product['price'] = self.get_pro_price()
self.product['quantity'] = self.get_pro_quantity()
self.get_pro_title()
self.get_pro_sku()
self.remove_popup_banner()
self.infinatescroll()
self.get_pro_description()
self.get_pro_reviews()
self.get_pro_photos()
self.get_varients()
self.save_csv_file()
print(self.product)
print('*' * 150)
self.driver.quit()
except Exception as error:
print(f"Error in getting desctiption page ==> {error}")
thread = 1
def main():
for i in range(0, len(pro_links_list), thread):
all_t = []
twenty_records = pro_links_list[i:i + thread]
for record_arg in twenty_records:
try:
pdg = ProductDescriptionGetter(record_arg)
t = threading.Thread(target=pdg.start)
t.start()
all_t.append(t)
except Exception as error:
print(f"Error in starting thread ==> {error}")
for count, t in enumerate(all_t):
print(f" joining Thread no ==> {count}")
t.join()
if __name__ == "__main__":
main()
| mobinalhassan/Aliexpress | src/get_product_description.py | get_product_description.py | py | 16,931 | python | en | code | 0 | github-code | 13 |
7275764186 | def sum_all(lst):
result = 0
sum = 0
for row in range(len(lst)):
for col in lst[row]:
sum += col
if sum == 0:
result = sum
return sum
if __name__ == "__main__":
lst = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print(sum_all(lst)) | violaflora/howard-introcs | Homework 3 - Destructive:non-destructive, pass-by-value:pass-by-ref, and others/sum_all.py | sum_all.py | py | 345 | python | en | code | 0 | github-code | 13 |
16268993604 | #zscore_conflict_analyzer
'''
By Collin A. O'Leary, Moss Lab, Iowa State University
This script will compare the Zavg values from a final partners file of one ScanFold structure model to the conflict list generated from the ct_compare.py script
The conflict list is any nt that had an alternitive structure from two compared structural models (i.e. an unconstrained and reactivity constrained model comparison)
Zavg values are binned from -2 to +2 at increments of 1 and then further divided based on their conflicts
The output will be a text file containing the percent agreement for each zscore bin
To use: python zscore_conflict_analyzer.py final_partners.txt ct_compare_output.txt output_name
'''
import sys
import os
filename1 = sys.argv[1] #This needs to be final partners file from ScanFold output
filename2 = sys.argv[2] #This is the output file from the ct_compare.py script... verify the slicing starts at the right index below
output_name = sys.argv[3] #Need to specify an output name here, no spaces allowed
with open(filename1 , 'r') as fp: #Open final partners file and seperate it by lines
fp_lines = fp.readlines()[1:]
with open(filename2 , 'r') as cp: #Open ct_compare.py output file and seperate it by lines starting at the confliclt list
cp_lines = cp.readlines()[19:]
conflicts = [] #A list to hold the conflicting nt position
for cp_line in cp_lines: #A loop to write the conflicting position to the list
data = cp_line.split()
conflicts.append(int(data[0]))
ntzs = [] #A list to hold the nt position and associate Zavg value from the final partners file
x = 1 #Used to keep track of nt position
for fp_line in fp_lines: #This loop writes the positin and Zavg values to the list
data = fp_line.split()
ntzs.append((x,float(data[4])))
x += 1
zsn2_sim = [] #A list to hold the position of nts with Zavg <-2 that are in agreement
zsn2_con = [] #A list to hold the position of nts with Zavg <-2 that are in conflict
zsn1_sim = [] #A list to hold the position of nts with Zavg >-2, <-1 that are in agreement
zsn1_con = [] #A list to hold the position of nts with Zavg >-2, <-1 that are in conclict
zsn0_sim = [] #A list to hold the position of nts with Zavg >-1, <0 that are in agreement
zsn0_con = [] #A list to hold the position of nts with Zavg >-1, <0 that are in conclict
zsp2_sim = [] #A list to hold the position of nts with Zavg >2 that are in agreement
zsp2_con = [] #A list to hold the position of nts with Zavg >2 that are in conflict
zsp1_sim = [] #A list to hold the position of nts with Zavg <2, >1 that are in agreement
zsp1_con = [] #A list to hold the position of nts with Zavg <2, >1 that are in conflict
zsp0_sim = [] #A list to hold the position of nts with Zavg <1, >0 that are in agreement
zsp0_con = [] #A list to hold the position of nts with Zavg <1, >0 that are in conflict
for x in ntzs: #A loop that iterates through the ntzs list, and first determines what the Zavg value is, then checks whether its corresponding position is in conflict. It then adds the position to the appropriate bin list
if x[1] <= -2:
if x[0] in conflicts:
zsn2_con.append(x[0])
else:
zsn2_sim.append(x[0])
elif x[1] <= -1:
if x[0] in conflicts:
zsn1_con.append(x[0])
else:
zsn1_sim.append(x[0])
elif x[1] <= 0:
if x[0] in conflicts:
zsn0_con.append(x[0])
else:
zsn0_sim.append(x[0])
elif x[1] >= 2:
if x[0] in conflicts:
zsp2_con.append(x[0])
else:
zsp2_sim.append(x[0])
elif x[1] >= 1:
if x[0] in conflicts:
zsp1_con.append(x[0])
else:
zsp1_sim.append(x[0])
elif x[1] >= 0:
if x[0] in conflicts:
zsp0_con.append(x[0])
else:
zsp0_sim.append(x[0])
else:
print('error!!')
if (len(zsn2_sim)+len(zsn2_con)) == 0: #This checks whether there are any values in either bin list, if there are no values then N/A, else it calculates the percent agreement of that bin
n2percent = "N/A"
else:
n2percent = ((len(zsn2_sim)/(len(zsn2_sim)+len(zsn2_con)))*100)
if (len(zsn1_sim)+len(zsn1_con)) == 0: #Same as above
n2n1percent = "N/A"
else:
n2n1percent = ((len(zsn1_sim)/(len(zsn1_sim)+len(zsn1_con)))*100)
if (len(zsn0_sim)+len(zsn0_con)) == 0: #Same as above
n1n0percent = "N/A"
else:
n1n0percent = ((len(zsn0_sim)/(len(zsn0_sim)+len(zsn0_con)))*100)
if (len(zsp0_sim)+len(zsp0_con)) == 0: #Same as above
p0p1percent = "N/A"
else:
p0p1percent = ((len(zsp0_sim)/(len(zsp0_sim)+len(zsp0_con)))*100)
if (len(zsp1_sim)+len(zsp1_con)) == 0: #Same as above
p1p2percent = "N/A"
else:
p1p2percent = ((len(zsp1_sim)/(len(zsp1_sim)+len(zsp1_con)))*100)
if (len(zsp2_sim)+len(zsp2_con)) == 0: #Same as above
p2percent = "N/A"
else:
p2percent = ((len(zsp2_sim)/(len(zsp2_sim)+len(zsp2_con)))*100)
with open(f"{output_name}.txt", "w") as file: #Creates and opens an outputfile with the specified output name, then writes the calculated metrics along with input file names
file.write(f'input final partner file:\t{filename1}\n')
file.write(f'input conflict list:\t{filename2}\n\n')
file.write(f'<=-2 nt avg. z-score similarity:\t{n2percent}%\n# of similar nt <= -2 zs:\t{len(zsn2_sim)}\n# of conflicting nt <= -2 zs:\t{len(zsn2_con)}\n\n')
file.write(f'>=-2, <=-1 nt avg. z-score similarity:\t{n2n1percent}%\n# of similar nt >=-2 and <=-1 zs:\t{len(zsn1_sim)}\n# of conflicting nt >=-2 and <=-1 zs:\t{len(zsn1_con)}\n\n')
file.write(f'>=-1, <=0 nt avg. z-score similarity:\t{n1n0percent}%\n# of similar nt >=-1 and <=0 zs:\t{len(zsn0_sim)}\n# of conflicting nt >=-1 and <=0 zs:\t{len(zsn0_con)}\n\n')
file.write(f'>=0, <=1 nt avg. z-score similarity:\t{p0p1percent}%\n# of similar nt >=0 and <=1 zs:\t{len(zsp0_sim)}\n# of conflicting nt >=0 and <=1 zs:\t{len(zsp0_con)}\n\n')
file.write(f'>=1, <=2 nt avg. z-score similarity:\t{p1p2percent}%\n# of similar nt >=1 and <=2 zs:\t{len(zsp1_sim)}\n# of conflicting nt >=1 and <=2 zs:\t{len(zsp1_con)}\n\n')
file.write(f'>=2 nt avg. z-score similarity:\t{p2percent}%\n# of similar nt >= 2 zs:\t{len(zsp2_sim)}\n# of conflicting nt >= 2 zs:\t{len(zsp2_con)}\n\n')
file.close() | moss-lab/SARS-CoV-2 | zscore_conflict_analyzer.py | zscore_conflict_analyzer.py | py | 6,935 | python | en | code | 0 | github-code | 13 |
34757124192 | import random, functools
import numpy as np
@functools.cache
def targs(q):
p = list(q)
n = len(p)
targets = []
# Loop through players
for i in range(n):
maxx = 0
target = 0
# Loop through potential targets
for j in list(range(i)) + list(range(i+1,n)):
newP = tuple(p[:j]+p[j+1:])
newStart, newWho = getInputs(n, i, i, j)
newMaxx = f(newP)[newWho][newStart]
if newMaxx >= maxx:
maxx = newMaxx
target = j
targets.append(target)
return targets
@functools.cache
def f(q):
p = list(q)
n = len(p)
# Base case (2 people)
if n == 2:
answer = np.diag(p)@ np.array([[1,1-p[1]],[1-p[0],1]])
answer /= (1 - (1-p[0])*(1-p[1]))
else:
# Set the targets
targets = targs(q)
# Construct matrix V
V = np.zeros(shape = (n,n))
for j in range(n):
t = targets[j]
newP = tuple(p[:t]+p[t+1:])
temp = f(newP)
for i in range(targets[j]):
newStart, newWho = getInputs(n, j, i, t)
V[i][j] = temp[newWho][newStart]
for i in range(targets[j]+1,n):
newStart, newWho = getInputs(n, j, i, t)
V[i][j] = temp[newWho][newStart]
# Construct matrix P
P = np.diag(p)
# Construct matrix Q
Q = np.zeros(shape = (n,n))
mult = 1
# Loop over who is first player
for j in range(n):
if j == 0:
# Loop over who is first to hit their target
for i in range(n):
Q[i][0] = mult
mult *= 1-p[i]
else:
for i in range(n):
Q[i][j] = Q[i][j-1] / (1 - p[j-1])
Q[j-1][j] = mult / (1 - p[j-1])
answer = V@P@Q
answer /= (1 - mult)
return answer
# If player start+1 kills player kill+1, returns indices of
# the new start player and new "who" (person we are concerned with)
def getInputs(n, start, who, kill):
if kill > start:
newStart = (start + 1) % (n-1)
else:
newStart = start % (n-1)
if who < kill:
newWho = who
else:
newWho = who - 1
return newStart, newWho
# Computes the equilibrium and outputs probabilities of each person winning + initial targets
def main(p, start = 0):
x = f(tuple(p))
t = targs(tuple(p))
for i in range(len(p)):
print('Probability of player', i+1, 'winning:', x[i][start])
for i in range(len(p)):
print('Player', i+1, 'targets player', t[i] + 1)
# Receives input from user
def getProbs():
n = int(input('How many players are there? '))
probs = []
for i in range(1,n+1):
x = input('Enter the probability of player %s hitting their shot as a decimal: ' % i)
probs.append(float(x))
return probs
if __name__ == "__main__":
main(getProbs())
input()
| DeclanStacy/nPersonDuel | truelGeneralization3.py | truelGeneralization3.py | py | 3,117 | python | en | code | 0 | github-code | 13 |
70458223377 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sklearn.datasets import make_classification
def get_synthetic(n_samples, n_features, n_classes, n_informative=None, n_clusters_per_class=None, flip_y=None, class_sep=None):
# import numpy as np
# data_x = np.zeros((n_samples, 2), dtype="float32")
# data_y = np.random.randint(2, size=n_samples, dtype="int64")
# data_x[range(n_samples), data_y] = 1
# return data_x, data_y
n_informative = n_informative or n_features
n_clusters_per_class = n_clusters_per_class or 1
flip_y = flip_y or 0.1
class_sep = class_sep or 1.0
params = {
"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"n_redundant": 0,
"n_repeated": 0,
"n_classes": n_classes,
"n_clusters_per_class": n_clusters_per_class,
"weights": None,
"flip_y": flip_y,
"class_sep": class_sep,
}
data_x, data_y = make_classification(**params)
return data_x.astype("float32"), data_y.astype("int64")
| codeislife99/learning_to_optimize | l2o/dataset.py | dataset.py | py | 1,067 | python | en | code | 1 | github-code | 13 |
34165786977 | import jinja2
import PyRSS2Gen
from aiohttp import web
from markupsafe import Markup
from datetime import datetime
import dateutil.parser
import json
import settings
def RunServ(serve_static = False, serve_storage = False, serve_js = False):
app = App()
# YanDev code g o
app.router.add_get('/', page_index)
app.router.add_get('/projects', page_projects)
app.router.add_get('/places', page_places)
app.router.add_get('/downloads', page_downloads)
app.router.add_get('/downloads/software', page_downloads_software)
app.router.add_get('/downloads/cursors', page_downloads_cursors)
app.router.add_get('/about', page_about)
app.router.add_get('/about/contact', page_contact)
app.router.add_get('/computers', page_my_computers)
app.router.add_get('/mcsrv', page_mc_srv)
app.router.add_get('/mcsrv/rules', page_mc_srv_rules)
app.router.add_get('/mcsrv/plugins', page_mc_srv_plugins)
app.router.add_get('/blog', page_blog)
app.router.add_get('/blog/rss', blog_rss)
app.router.add_get('/discord', page_discord_server_redir)
app.router.add_get('/projects/pubsite', page_pubsite_details)
app.router.add_get('/projects/pubsite/ssgallery', page_pubsite_ssgallery)
if settings.TESTING:
app.router.add_get('/testing', page_testing)
app.router.add_get('/testing/too', page_testing_too)
if serve_static:
app.router.add_static('/static', 'static')
if serve_storage:
app.router.add_static('/storage', 'storage')
if serve_js:
app.router.add_static('/js', 'javascript')
app.router.add_route('*', '/{path:.*}', handle_404)
app.jinja_env = jinja2.Environment(
loader = jinja2.FileSystemLoader('tmpl'),
autoescape = jinja2.select_autoescape(default = True),
)
return app
class App(web.Application):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# YanDev code g o
async def page_index(req):
return render(req, 'index.html')
async def page_projects(req):
return render(req, 'projects.html', {
'title': 'Projects'
})
async def page_places(req):
return render(req, 'places.html', {
'title': 'Places'
})
async def page_downloads(req):
return render(req, 'downloads.html', {
'title': 'Downloads'
})
async def page_downloads_software(req):
return render(req, 'downloads.software.html', {
'title': 'Downloads | Software'
})
async def page_downloads_cursors(req):
return render(req, 'downloads.cursors.html', {
'title': 'Downloads | Cursors'
})
async def page_my_computers(req):
return render(req, 'computers.html', {
'title': 'My computers'
})
async def page_about(req):
return render(req, 'about.html', {
'title': 'About me'
})
async def page_contact(req):
return render(req, 'about.contact.html', {
'title': 'Contact info | About me'
})
async def page_mc_srv(req):
return render(req, 'minecraft.srv.html', {
'title': 'MC server'
})
async def page_mc_srv_rules(req):
return render(req, 'minecraft.rules.html', {
'title': 'MC rules',
})
async def page_mc_srv_plugins(req):
return render(req, 'minecraft.plugins.html', {
'title': 'MC plugins',
})
async def page_discord_server_redir(req):
return render(req, 'discord.html', {
'title': 'Discord server',
})
async def page_pubsite_details(req):
return render(req, 'projects.pubsite.html', {
'title': 'Pubsite | Projects'
})
async def page_pubsite_ssgallery(req):
return render(req, 'projects.pubsite.ssgallery.html', {
'title': 'Screenshot Gallery | Pubsite | Projects'
})
async def page_testing(req):
return render(req, 'testing.html', {
'title': 'Testing | Page 1'
})
async def page_testing_too(req):
return render(req, 'testing.too.html', {
'title': 'Testing | Page 2'
})
async def page_blog(req):
with open('json/posts.json', 'rb') as bp:
bp_json = json.loads(bp.read())
bp.close()
entries = []
for date, items in bp_json.items():
tmpl = req.app.jinja_env.get_template('blog.post.item.html')
items_markup = [tmpl.render(item = Markup(item)) for item in items]
tmpl = req.app.jinja_env.get_template('blog.post.html')
entries.append(tmpl.render(date = dateutil.parser.isoparse(date).strftime('%Y-%m-%d'), items = Markup('\n'.join(items_markup))))
return render(req, 'blog.html', {
'title': 'Blog',
'entries': Markup('\n'.join(entries))
})
async def blog_rss(req):
with open('json/posts.json', 'rb') as bp:
bp_json = json.loads(bp.read())
bp.close()
rss = PyRSS2Gen.RSS2(
title = "HIDEN's Blog",
link = "https://hiden.pw/blog",
description = "My blog, where I post about... well... things.",
generator = "PyRSS2Gen",
docs = "https://validator.w3.org/feed/docs/rss2.html",
language = "en-us",
webMaster = "hiden64@protonmail.com",
lastBuildDate = datetime.utcnow(),
items = [
PyRSS2Gen.RSSItem(
title = dateutil.parser.isoparse(date).strftime('%Y-%m-%d'),
description = ''.join(['{}\n'.format(entry) for entry in entries]),
pubDate = dateutil.parser.isoparse(date),
) for date, entries in bp_json.items()
]
)
return web.Response(status = 200, content_type = 'text/xml', text = rss.to_xml(encoding = 'utf-8'))
async def handle_404(req):
return render(req, '404.html', {
'title': 'Page not found'
}, status = 404
)
def render(req, tmpl, ctxt = None, status = 200):
tmpl = req.app.jinja_env.get_template(tmpl)
if ctxt is None:
ctxt = {}
content = tmpl.render(**ctxt)
return web.Response(status = status, content_type = 'text/html', text = content)
| dkay0670/hidens-website | site_ctrl.py | site_ctrl.py | py | 5,640 | python | en | code | 0 | github-code | 13 |
72096934097 | """
The experiment_wrapper module creates a level of abstraction between the control of actual instruments and control of
the entire experiment as a whole. For example, instead of initializing each instrument on its own and then setting
settings like the lock-in reference input, the initialize_instruments() function does all of this automatically.
"""
import numpy as np
from instruments import SR830, Agilent33220A, PasternackPE11S390, Agilent34401A
from inst_io import Instrument, Prologix
freq_synth = None
lock_in = None
func_gen = None
multimeter = None
gpib_manager = None
freq_multiple = None
def get_freq_synth_enable():
"""
Returns true if the frequency sythesizer is enabled, false otherwise.
"""
if freq_synth.get_output_state() == PasternackPE11S390.OUTPUT_STATE_ON:
return True
return False
def set_freq_synth_enable(enable=False):
"""
Turns the frequency synthesizer output on or off.
:param enable: True if the frequency synthesizer should be on, false if off.
"""
if enable:
freq_synth.set_output_state(PasternackPE11S390.OUTPUT_STATE_ON)
else:
freq_synth.set_output_state(PasternackPE11S390.OUTPUT_STATE_OFF)
def get_freq_synth_freq():
"""
Returns the frequency in GHz.
"""
return freq_synth.get_frequency() * freq_multiple
def set_freq_synth_frequency(freq=200):
"""
Sets the frequency synthesizer to continuous wave mode at the specified frequency where units are in GHz. This will
automatically divide the ranges by freq_multiple (as the source is attached to a set of frequency multipliers).
"""
freq_synth.set_frequency(freq / freq_multiple)
def get_freq_multiplier():
"""
Returns the frequency multiplier, which can be changed depending on the experimental setup.
"""
return freq_multiple
def set_freq_multiplier(multiple = 18):
"""
Sets the frequency multiplier, which should be equal to the product of the frequency multipliers present in the experimental setup.
"""
global freq_multiple
freq_multiple = multiple
def get_freq_synth_power():
"""
Gets the power level of the frequency synthesizer in dBm.
"""
return freq_synth.get_power()
def set_freq_synth_power(power_level=0.0):
"""
Sets the power level of the frequency synthesizer in dBm.
:param power_level: The power level in dBm
"""
freq_synth.set_power(power_level)
def get_chopper_frequency():
"""
Gets the chopper frequency in kHz.
:return: The chopper frequency in kHz
"""
return func_gen.get_wave_frequency() / 1000.0
def set_chopper_frequency(freq=10):
"""
Sets the chopper frequency in kHz.
:param freq: The frequency in kHz
"""
func_gen.set_wave_frequency(freq * 1000.0)
def get_chopper_amplitude():
"""
Returns the chopper amplitude in volts.
"""
return func_gen.get_wave_amplitude()
def set_chopper_amplitude(amplitude=0.5):
"""
Sets the chopper amplitude in volts.
:param amplitude: The amplitude in volts
"""
func_gen.set_wave_amplitude(amplitude)
def set_chopper_on(turn_on=False):
"""
Sets the chopper output on if turn_on is True.
:param turn_on: Turns on if True
"""
if turn_on:
func_gen.set_output_state(Agilent33220A.STATE_ON)
else:
func_gen.set_output_state(Agilent33220A.STATE_OFF)
_SENSITIVITY_DICT = {0: 0.000002,
1: 0.000005,
2: 0.00001,
3: 0.00002,
4: 0.00005,
5: 0.0001,
6: 0.0002,
7: 0.0005,
8: 0.001,
9: 0.002,
10: 0.005,
11: 0.01,
12: 0.02,
13: 0.05,
14: 0.1,
15: 0.2,
16: 0.5,
17: 1.0,
18: 2.0,
19: 5.0,
20: 10.0,
21: 20.0,
22: 50.0,
23: 100.0,
24: 200.0,
25: 500.0,
26: 1000.0}
def get_sensitivity():
"""
Returns the current sensitivity of the lock-in in mV.
:return: The sensitivity in mV
"""
return _SENSITIVITY_DICT.get(lock_in.get_sensitivity())
def set_sensitivity(sensitivity=1000.0):
"""
Sets the sensitivity of the lock-in in mV. The lock-in has a set of allowed sensitivities. This method will choose
the first allowed sensitivity that is larger than the one entered.
:param sensitivity: The preferred sensitivity in mV
:return: The chosen sensitivity
"""
sens_key = 26
for key, value in _SENSITIVITY_DICT.iteritems():
if sensitivity <= value:
sens_key = key
break
lock_in.set_sensitivity(sens_key)
return _SENSITIVITY_DICT.get(sens_key)
_TIME_CONSTANT_DICT = {0: 0.01,
1: 0.03,
2: 0.1,
3: 0.3,
4: 1,
5: 3,
6: 10,
7: 30,
8: 100,
9: 300,
10: 1 * (10 ** 3),
11: 3 * (10 ** 3),
12: 10 * (10 ** 3),
13: 30 * (10 ** 3),
14: 100 * (10 ** 3),
15: 300 * (10 ** 3),
16: 1 * (10 ** 6),
17: 3 * (10 ** 6),
18: 10 * (10 ** 6),
19: 30 * (10 ** 6)}
def get_sync_enabled():
"""
Returns true if the lock-in synchronous filter is enabled.
:return: True if the synchronous filter is enabled.
"""
return SR830.SYNC_FILTER_ON == lock_in.get_synchronous_filter_status()
def set_sync_enabled(enable):
"""
Sets the lock-in synchronous filter to enabled if enable is True, disabled if False.
"""
if enable:
lock_in.set_synchronous_filter_status(on_off=SR830.SYNC_FILTER_ON)
else:
lock_in.set_synchronous_filter_status(on_off=SR830.SYNC_FILTER_OFF)
def get_time_constant():
"""
Returns the current time constant of the lock-in in ms.
:return: The sensitivity in ms
"""
return _TIME_CONSTANT_DICT.get(lock_in.get_time_constant())
def set_time_constant(time_constant=1000):
"""
Sets the time constant of the lock-in in ms. The lock-in has a set of allowed time constants. This method will
choose the first allowed time constant that is larger than the one entered.
:param time_constant: The preferred time constant in ms
:return: The chosen time constant
"""
time_const_key = 19
for key, value in _TIME_CONSTANT_DICT.iteritems():
if time_constant <= value:
time_const_key = key
break
lock_in.set_time_constant(time_const_key)
return _TIME_CONSTANT_DICT.get(time_const_key)
_LOW_PASS_SLOPE = {SR830.LOW_PASS_FILTER_SLOPE_6dB_PER_OCT: 6,
SR830.LOW_PASS_FILTER_SLOPE_12dB_PER_OCT: 12,
SR830.LOW_PASS_FILTER_SLOPE_18dB_PER_OCT: 18,
SR830.LOW_PASS_FILTER_SLOPE_24dB_PER_OCT: 24}
def get_low_pass_slope():
"""
Returns the current low pass filter slope of the lock-in in dB per octave.
:return: The slope in dB per octave
"""
return _LOW_PASS_SLOPE.get(lock_in.get_low_pass_filter_slope())
def set_low_pass_slope(slope=18):
"""
Sets the low pass filter slope of the lock-in in dB per octave. The lock-in has a set of allowed slopes. This method
will choose the first allowed slope that is smaller than the one entered. If no allowed slope is smaller, 6dB per
octave will be selected.
:param slope: The preferred slope in dB per octave
:return: The chosen slope in dB per octave
"""
slope_key = SR830.LOW_PASS_FILTER_SLOPE_6dB_PER_OCT
for key, value in _LOW_PASS_SLOPE.iteritems():
if slope >= value:
slope_key = key
else:
break
lock_in.set_low_pass_filter_slope(slope_key)
return _LOW_PASS_SLOPE.get(slope_key)
_SAMPLE_RATE_DICT = {0: 0.0625,
1: 0.125,
2: 0.25,
3: 0.5,
4: 1,
5: 2,
6: 4,
7: 8,
8: 16,
9: 32,
10: 64,
11: 128,
12: 256,
13: 512}
def get_sample_rate():
"""
Returns the current sample rate of the lock-in in Hz.
:return: The sample rate in Hz
"""
return _SAMPLE_RATE_DICT.get(lock_in.get_sample_rate())
def set_sample_rate(sample_rate=512):
"""
Sets the sample rate of the lock-in in Hz. The lock-in has a set of allowed sample rates. This method will choose
the first allowed sample rate constant that is larger than the one entered.
:param sample_rate: The preferred sample rate in Hz
:return: The chosen time constant
"""
sample_rate_key = 13
for key, value in _SAMPLE_RATE_DICT.iteritems():
if sample_rate <= value:
sample_rate_key = key
break
lock_in.set_sample_rate(sample_rate_key)
return _SAMPLE_RATE_DICT.get(sample_rate_key)
def get_time_to_fill():
"""
Returns the time needed to fill storage in seconds.
:return: The time needed to fill storage in seconds
"""
return lock_in.get_storage_time()
def get_multimeter_dc_measurement():
"""
Returns the DC voltage measured by the multimeter.
:return: The DC voltage.
"""
return multimeter.get_dc_voltage_measurement()
def snap_data():
"""
Gets the current value in the X and Y readouts on the lock-in amplifier
:return: A tuple of the form (x, y) in volts.
"""
data_dict = lock_in.snap_values(['X', 'Y'])
x = data_dict.get('X')
y = data_dict.get('Y')
return x, y
def start_scan():
"""
Starts data collection. Returns the time needed to fill storage in seconds.
:return: The time needed to fill storage in seconds
"""
lock_in.reset_scan()
storage_time = lock_in.get_storage_time()
lock_in.start_scan()
return storage_time
def stop_scan():
"""
Stops the current scan.
"""
lock_in.pause_scan()
def get_data():
"""
Gets the recorded data as a numpy array.
:return: A numpy array object with frequency in the first column and R in the second column
"""
length = lock_in.get_scanned_data_length()
channel1_data = lock_in.get_channel1_scanned_data(0, length)
channel2_data = lock_in.get_channel2_scanned_data(0, length)
return np.array([channel1_data, channel2_data])
def _convert_raw_sweep_data_to_frequency(raw_data):
"""
Converts DC voltage data (where the voltage is proportional to the current frequency of the sweep oscillator) to
frequency data in Hz.
:param raw_data: A list of 'raw' data, in other words a list of DC voltages
:return: A list of frequency data
"""
frequency_data = []
for raw_data_point in raw_data:
# For each data point multiply by 1/10 * 20.40GHz (i.e. 20.40 * 10^9)
frequency_data.append(float(raw_data_point) * (1.0 / 10.0) * (20.40 * (10 ** 9)))
return frequency_data
def set_data(col1='X', col2='Y'):
"""
Sets the data to record in columns 1 and 2.
:param col1: Either 'X', 'R', 'X noise', 'Aux1', or 'Aux2'
:param col2: Either 'Y', 'Theta', 'Y noise', 'Aux3', or 'Aux4'
"""
# Set column 1
if col1 == 'X':
lock_in.set_channel1_display(SR830.DISPLAY_CHANNEL1_X)
elif col1 == 'R':
lock_in.set_channel1_display(SR830.DISPLAY_CHANNEL1_R)
elif col1 == 'X noise':
lock_in.set_channel1_display(SR830.DISPLAY_CHANNEL1_X_NOISE)
elif col1 == 'Aux1':
lock_in.set_channel1_display(SR830.DISPLAY_CHANNEL1_AUX1)
elif col1 == 'Aux2':
lock_in.set_channel1_display(SR830.DISPLAY_CHANNEL1_AUX2)
# Set column 2
if col2 == 'Y':
lock_in.set_channel2_display(SR830.DISPLAY_CHANNEL2_Y)
elif col2 == 'Theta':
lock_in.set_channel2_display(SR830.DISPLAY_CHANNEL2_THETA)
elif col2 == 'Y noise':
lock_in.set_channel2_display(SR830.DISPLAY_CHANNEL2_Y_NOISE)
elif col2 == 'Aux3':
lock_in.set_channel2_display(SR830.DISPLAY_CHANNEL2_AUX3)
elif col2 == 'Aux4':
lock_in.set_channel2_display(SR830.DISPLAY_CHANNEL2_AUX4)
def initialize():
"""
Initializes the instruments and prepares the relevant settings.
"""
# Use global variables
global freq_synth
global lock_in
global func_gen
global multimeter
global gpib_manager
global freq_multiple
# Create new ConnectionManagers to deal with all of the instruments being used.
gpib_manager = Prologix(port='/dev/ttyUSB0')
# Instantiate each instrument
freq_synth = PasternackPE11S390('/dev/usbtmc0', Instrument.CONNECTION_TYPE_USB, gpib_manager)
lock_in = SR830(8, Instrument.CONNECTION_TYPE_PROLOGIX_GPIB, gpib_manager)
func_gen = Agilent33220A(10, Instrument.CONNECTION_TYPE_PROLOGIX_GPIB, gpib_manager)
multimeter = Agilent34401A(28, Instrument.CONNECTION_TYPE_PROLOGIX_GPIB, gpib_manager)
# Name each instrument
freq_synth.set_name('Frequency Synthesizer')
lock_in.set_name('Lock-In')
func_gen.set_name('Function Generator')
multimeter.set_name('Multimeter')
# Open each instrument
freq_synth.open()
lock_in.open()
func_gen.open()
multimeter.open()
# Initialize the multimeter
multimeter.initialize_instrument()
# Initialize the frequency synthesizer
freq_synth.initialize_instrument()
# Initialize the lock-in, reset, set the reference source and trigger, set what happens when the data buffer is full, and set the display and data recording settings.
lock_in.initialize_instrument()
lock_in.reset()
lock_in.set_input_shield_grounding(SR830.INPUT_SHIELD_GROUNDING_GROUND)
lock_in.set_input_coupling(SR830.INPUT_COUPLING_AC)
lock_in.set_input_configuration(SR830.INPUT_CONFIGURATION_A)
lock_in.set_input_notch_line_filter(SR830.INPUT_NOTCH_OUT_OR_NO)
lock_in.set_reserve_mode(SR830.RESERVE_MODE_LOW_NOISE)
lock_in.set_reference_source(SR830.REFERENCE_SOURCE_EXTERNAL)
lock_in.set_reference_trigger_mode(SR830.REFERENCE_TRIGGER_MODE_TTL_RISING_EDGE)
lock_in.set_trigger_mode(SR830.TRIGGER_START_MODE_OFF)
lock_in.set_end_of_buffer_mode(SR830.END_OF_BUFFER_SHOT)
lock_in.set_channel1_output(SR830.CHANNEL1_OUTPUT_DISPLAY)
lock_in.set_channel2_output(SR830.CHANNEL2_OUTPUT_DISPLAY)
# Initialize the function generator and set the trigger source to software
func_gen.set_wave_type(Agilent33220A.WAVE_TYPE_SQUARE)
func_gen.set_output_state(Agilent33220A.STATE_OFF)
func_gen.set_sweep_state(Agilent33220A.STATE_OFF)
# Set freq_multiple to 18, as is standard with this experiment
freq_multiple = 18.0
def close():
freq_synth.close()
lock_in.close()
func_gen.close()
def _command_line(address, connection_manager):
inst = Instrument(connection_manager, address)
inst.open()
while True:
user_input = input("Type 'EXIT' to stop, 'QUERY [command]' to query, 'WRITE [command]' to write, and 'READ' to read.\n(Note the prompt is not case sensitive.)\n")
if user_input.lower() == 'exit':
break
elif user_input.lower() == 'read':
print(inst.read())
elif user_input.rfind(' ') != -1 and user_input.lower()[0:user_input.find(' ')] == 'query':
print(inst.query(user_input[user_input.find(' ') + 1:]))
elif user_input.rfind(' ') != -1 and user_input.lower()[0:user_input.find(' ')] == 'write':
print(inst.write(user_input[user_input.find(' ') + 1:]))
else:
print("Please use one of the commands specified")
inst.close()
| jc-roth/Microwave-Transmission-Experiment | setup_control/setup_control/experiment_wrapper.py | experiment_wrapper.py | py | 16,232 | python | en | code | 0 | github-code | 13 |
40640205301 | #!/usr/bin/env python3
"""
Provide a list of interferograms to compute the timeseries using
the short baseline subset (SBAS) approach, with or without
regularization.
"""
### IMPORT MODULES ---
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from osgeo import gdal
from viewingFunctions import mapPlot, imagettes
### PARSER ---
def createParser():
import argparse
parser = argparse.ArgumentParser(description='Compute the SBAS timeseries.')
# Data sets
parser.add_argument('-f','--files', dest='files', type=str, required=True, help='Files to be analyzed')
parser.add_argument('-no-reg','--no-regularization', dest='regularization', action='store_false', help='Regularization ([True]/False)')
# Reference point
parser.add_argument('-refLaLo', dest='refLaLo', type=float, nargs=2, help='Reference lat/lon, e.g., 37.5 90.0')
parser.add_argument('-refYX', dest='refYX', type=int, nargs=2, help='Reference Y/X, e.g., 45 119')
parser.add_argument('--no-ref', dest='noRef', action='store_true', help='Do not use a reference point. Must be explicit.')
# Plotting specs
parser.add_argument('-pctmin', dest='pctmin', type=float, default=0, help='Min percent clip')
parser.add_argument('-pctmax', dest='pctmax', type=float, default=100, help='Max percent clip')
parser.add_argument('-bg','--background', dest='background', default=None, help='Background value')
# Outputs
parser.add_argument('-v','--verbose', dest='verbose', action='store_true', help='Verbose mode')
parser.add_argument('--plot-inputs', dest='plotInputs', action='store_true', help='Plot input interferograms')
parser.add_argument('-o','--outName', dest='outName', type=str, default=None, help='Output name, for difference map and analysis plots')
return parser
def cmdParser(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
### OBJECTS ---
## Object for passing parameters
class TSparams:
def __init__(self):
pass
### LOAD DATA ---
## Load data from geotiff files
def loadARIAdata(inpt):
from glob import glob
from geoFormatting import GDALtransform
# Detect file names
inpt.fnames=glob(inpt.files)
# Empty lists
stack=[] # data cube
inpt.pairNames=[] # pair names
inpt.pairs=[] # pairs as lists
# Loop through files to load data
for fname in inpt.fnames:
# Print filename if requested
if inpt.verbose is True:
print('Loading: {}'.format(fname))
# Add pair name to list
pairName=os.path.basename(fname).split('.')[0]
pairName=pairName[:17]
inpt.pairNames.append(pairName)
inpt.pairs.append(pairName.split('_'))
# Load gdal data set
DS=gdal.Open(fname,gdal.GA_ReadOnly)
img=DS.GetRasterBand(1).ReadAsArray()
# Add image to stack
stack.append(img)
# Grab spatial parameters from final map data set
inpt.R=DS.RasterYSize; inpt.S=DS.RasterXSize
inpt.Proj=DS.GetProjection()
inpt.Tnsf=DS.GetGeoTransform()
inpt.T=GDALtransform(DS)
del DS
# Convert stack to 3D array
stack=np.array(stack)
return stack
### REFERENCE POINT ---
## Format and apply reference point
def refPoint(inpt,stack):
# Check that a refernce point is provided, or no-ref is specified
if inpt.noRef is True:
print('No reference point specified')
else:
assert inpt.refLaLo is not None or inpt.refYX is not None, 'Reference point must be specified'
## Find reference
# If reference point is given in lat lon, find equivalent pixels
if inpt.refLaLo is not None:
# Y pixel
deltaLat=(inpt.refLaLo[0]-inpt.T.ystart)
yref=int(0+deltaLat/inpt.T.ystep)
# X pixel
deltaLon=(inpt.refLaLo[1]-inpt.T.xstart)
xref=int(0+deltaLon/inpt.T.xstep)
# Ref La Lo
inpt.refYX=[yref,xref]
# If reference point is given in pixels, find equivalent lat lon
if inpt.refLaLo is None:
# Lat
refLat=inpt.T.ystart+inpt.T.ystep*inpt.refYX[0]
# Lon
refLon=inpt.T.xstart+inpt.T.xstep*inpt.refYX[1]
# Ref Y X
inpt.refLaLo=[refLat,refLon]
# Remove reference value from each map
for k in range(stack.shape[0]):
stack[k,:,:]=stack[k,:,:]-stack[k,inpt.refYX[0],inpt.refYX[1]]
return stack
### SBAS COMPUTATION ---
## SBAS
class SBAS:
def __init__(self,inpt,stack):
# Basic parameters
self.verbose=inpt.verbose
# Spatial parameters
if inpt.noRef is False:
self.refY=inpt.refYX[0]
self.refX=inpt.refYX[1]
self.refLat=inpt.refLaLo[0]
self.refLon=inpt.refLaLo[1]
# List of epochs to solve for
self.epochList(inpt)
# Design matrix
self.constructIncidenceMatrix(inpt)
if inpt.regularization is True: self.regularizeMatrix()
# Solve for displacements
self.constructDisplacements(inpt,stack)
# Epochs
def epochList(self,inpt):
# Number of interferograms
self.M=len(inpt.pairs)
# List of all dates, including redundant
allDates=[]
[allDates.extend(pair) for pair in inpt.pairs]
# List of unique dates
self.dates=[]
[self.dates.append(date) for date in allDates if date not in self.dates]
self.dates.sort() # sort oldest to youngest
self.N=len(self.dates)
# Reference date
self.referenceDate=self.dates[0]
# Epochs to solve for
self.epochs=[datetime.strptime(date,'%Y%m%d') for date in self.dates]
# Times since reference date
self.times=[(epoch-self.epochs[0]).days/365.2422 for epoch in self.epochs]
# Report if requested
if self.verbose==True:
print(self.dates)
print('{} dates'.format(len(self.dates)))
print('Reference date: {}'.format(self.referenceDate))
# Incidence matrix
def constructIncidenceMatrix(self,inpt):
"""
The incidence matrix A is an M x (N-1) matrix in which
the master date is represented by +1, and the slave
date is -1 if it is not the reference date.
"""
# Empty matrix of all zeros
self.A=np.zeros((self.M,self.N-1))
# Loop through pairs
for i,pair in enumerate(inpt.pairs):
masterDate=pair[0]
slaveDate=pair[1]
# Master date
masterNdx=self.dates.index(masterDate) # index within date list
self.A[i,masterNdx-1]=1 # Index-1 to ignore reference date
# Slave date
if slaveDate!=self.referenceDate:
slaveNdx=self.dates.index(slaveDate) # index within date list
self.A[i,slaveNdx-1]=-1 # Index-1 to ignore reference date
# Regularization functions
def regularizeMatrix(self):
"""
Regularization based on the linear model phs - v(tj-t1) - c = 0
"""
# Expand A matrix
self.A=np.concatenate([self.A,np.zeros((self.M,2))],axis=1)
self.A=np.concatenate([self.A,np.zeros((self.N-1,self.N-1+2))],axis=0)
for i in range(self.N-1):
self.A[self.M+i,i]=1
self.A[self.M+i,-2]=-(self.epochs[i+1]-self.epochs[0]).days/365.2422
self.A[self.M+i,-1]=-1
# Report if requested
if self.verbose==True: print('Enforcing regularization')
# Solve for displacements
def constructDisplacements(self,inpt,stack):
## Setup
# Invert design matrix
Ainv=np.linalg.inv(np.dot(self.A.T,self.A)).dot(self.A.T)
# Empty maps of solution values
self.PHS=np.zeros((self.N,inpt.R,inpt.S)) # empty phase cube
self.V=np.zeros((inpt.R,inpt.S))
self.C=np.zeros((inpt.R,inpt.S))
## Without regularization, use a linear fit to the data
if inpt.regularization==False:
for i in range(inpt.R):
for j in range(inpt.S):
# Interferogram values of pixel
series=stack[:,i,j]
series=series.reshape(self.M,1)
# Solve for displacements
self.PHS[1:,i,j]=Ainv.dot(series).flatten()
# Solve for linear velocity and constant using polyfit
fit=np.polyfit(self.times,self.PHS[:,i,j],1)
self.V[i,j]=fit[0]
self.C[i,j]=fit[1]
## Slow method - pixel-by-pixel
elif inpt.regularization==True:
# Simulatenously solve for phase and velocity on a
# pixel-by-pixel basis
for i in range(inpt.R):
for j in range(inpt.S):
# Interferogram values of pixel
series=stack[:,i,j]
series=series.reshape(self.M,1)
# Add zeros for regularization
series=np.concatenate([series,np.zeros((self.N-1,1))],axis=0)
# Solve for dipslacement, velocity, and constant
sln=Ainv.dot(series)
# Add results to arrays
self.PHS[1:,i,j]=sln[:-2].flatten()
self.V[i,j]=sln[-2]
self.C[i,j]=sln[-1]
## Plot results
def plotResults(self):
## Plot velocity map
velFig,velAx=mapPlot(self.V,cmap='jet',pctmin=inpt.pctmin,pctmax=inpt.pctmax,background=inpt.background,
extent=None,showExtent=False,cbar_orientation='horizontal',title='LOS velocity')
# Plot reference point
if inpt.noRef is False: velAx.plot(self.refX,self.refY,'ks')
return velFig, velAx
### PHASE ANALYSIS ---
def phaseAnalysis(event):
print('Phase analysis')
# Location
px=event.xdata; py=event.ydata
px=int(round(px)); py=int(round(py))
# Report position and cumulative values
print('px {} py {}'.format(px,py)) # report position
print('long-term velocity {}'.format(S.V[py,px]))
# Extract phase values
phsValues=S.PHS[:,py,px]
print('Phase values: {}'.format(phsValues))
velocityFit=np.poly1d([S.V[py,px],S.C[py,px]])
resids=phsValues-velocityFit(S.times)
print('RMS {}'.format(np.sqrt(np.mean(resids**2))))
# Plot phase over time
phsAx.cla()
phsAx.plot(S.epochs,velocityFit(S.times),'g',label='linear fit')
phsAx.plot(S.epochs,phsValues,'k.',label='reconstructed phase')
phsAx.set_xticks(S.epochs)
labels=[datetime.strftime(epoch,'%Y%m%d') for epoch in S.epochs]
phsAx.set_xticklabels(labels,rotation=80)
phsFig.tight_layout()
# Draw
phsFig.canvas.draw()
### MAIN ---
if __name__=='__main__':
# Gather inputs
inpt=cmdParser()
## Load data
stack=loadARIAdata(inpt)
# Plot data if requested
if inpt.plotInputs:
imagettes(stack,3,4,cmap='viridis',pctmin=inpt.pctmin,pctmax=inpt.pctmax,
colorbarOrientation='horizontal',background=inpt.background,
extent=inpt.T.extent,showExtent=False,titleList=inpt.pairNames,supTitle='Inputs')
## Refereence point
stack=refPoint(inpt,stack)
## SBAS
# Compute SBAS
S=SBAS(inpt,stack)
# Plot displacement timeseries
velFig,velAx=S.plotResults()
## Analyze displacement timeseries
# Plot phase over time
phsFig=plt.figure(figsize=(7,6))
phsAx=phsFig.add_subplot(111)
phsAx.set_title('Phase over time')
phsAx.set_xlabel('time'); phsAx.set_ylabel('phase')
# Interact with velocity figure
velFig.canvas.mpl_connect('button_press_event',phaseAnalysis)
plt.show()
| EJFielding/InsarToolkit | SBAS/SBASrz.py | SBASrz.py | py | 10,332 | python | en | code | 4 | github-code | 13 |
35784935638 | import numpy as np
from datetime import datetime as dt
from backports.datetime_fromisoformat import MonkeyPatch
MonkeyPatch.patch_fromisoformat()
MAX_PACKET_SIZE = 4096
BYTES_IN_PACKET = 1456
np.set_printoptions(threshold=np.inf,linewidth=325)
class Organizer:
def __init__(self, all_data, num_chirp_loops, num_rx, num_tx, num_samples, verbose=False):
self.data = all_data[0]
self.packet_num = all_data[1]
self.byte_count = all_data[2]
self.num_packets = len(self.byte_count)
self.num_chirps = num_chirp_loops*num_tx
self.num_rx = num_rx
self.num_samples = num_samples
self.BYTES_IN_FRAME = self.num_chirps * self.num_rx * self.num_samples * 2 * 2
self.BYTES_IN_FRAME_CLIPPED = (self.BYTES_IN_FRAME // BYTES_IN_PACKET) * BYTES_IN_PACKET
self.UINT16_IN_FRAME = self.BYTES_IN_FRAME // 2
self.NUM_PACKETS_PER_FRAME = self.BYTES_IN_FRAME // BYTES_IN_PACKET
self.start_time = all_data[3]
self.end_time = all_data[4]
self.verbose = verbose
def iq(self, raw_frame):
"""Reorganizes raw ADC data into a full frame
Args:
raw_frame (ndarray): Data to format
num_chirps: Number of chirps included in the frame
num_rx: Number of receivers used in the frame
num_samples: Number of ADC samples included in each chirp
Returns:
ndarray: Reformatted frame of raw data of shape (num_chirps, num_rx, num_samples)
"""
ret = np.zeros(len(raw_frame) // 2, dtype=np.csingle)
# Separate IQ data
ret[0::2] = raw_frame[0::4] + 1j * raw_frame[2::4]
ret[1::2] = raw_frame[1::4] + 1j * raw_frame[3::4]
return ret.reshape((self.num_chirps, self.num_rx, self.num_samples))
def get_frames(self, start_chunk, end_chunk, bc):
# if first packet received is not the first byte transmitted
if bc[start_chunk] == 0:
bytes_left_in_curr_frame = 0
start = start_chunk*(BYTES_IN_PACKET // 2)
else:
frames_so_far = bc[start_chunk] // self.BYTES_IN_FRAME
bytes_so_far = frames_so_far * self.BYTES_IN_FRAME
# bytes_left_in_curr_frame = bc[start_chunk] - bytes_so_far
bytes_left_in_curr_frame = (frames_so_far+1)*self.BYTES_IN_FRAME - bc[start_chunk]
start = (bytes_left_in_curr_frame // 2) + start_chunk*(BYTES_IN_PACKET // 2)
# if self.verbose: print(start_chunk, start)
# find num of frames
total_bytes = bc[end_chunk] - (bc[start_chunk] + bytes_left_in_curr_frame)
num_frames = total_bytes // (self.BYTES_IN_FRAME)
# if self.verbose: print(bc[end_chunk])
# if self.verbose: print(num_frames, start_chunk, end_chunk, self.BYTES_IN_FRAME)
frames = np.zeros((num_frames, self.UINT16_IN_FRAME), dtype=np.int16)
ret_frames = np.zeros((num_frames, self.num_chirps, self.num_rx, self.num_samples), dtype=complex)
# compress all received data into one byte stream
all_uint16 = np.array(self.data).reshape(-1)
# only choose uint16 starting from a new frame
all_uint16 = all_uint16[start:]
# organizing into frames
for i in range(num_frames):
frame_start_idx = i*self.UINT16_IN_FRAME
frame_end_idx = (i+1)*self.UINT16_IN_FRAME
frame = all_uint16[frame_start_idx:frame_end_idx]
frames[i][:len(frame)] = frame.astype(np.int16)
ret_frames[i] = self.iq(frames[i])
return ret_frames
def organize(self):
radar_unix_start_time = dt.timestamp(dt.fromisoformat(self.start_time[:-1]))*1e6
radar_unix_end_time = dt.timestamp(dt.fromisoformat(self.end_time[:-1]))*1e6
if self.verbose: print('Start time: ', self.start_time)
if self.verbose: print('End time: ', self.end_time)
self.byte_count = np.array(self.byte_count)
self.data = np.array(self.data)
self.packet_num = np.array(self.packet_num)
# Reordering packets
# sorted_idx = np.argsort(self.packet_num)
# if self.verbose: print(sorted_idx.dtype)
# if self.verbose: print(len(self.packet_num), len(self.byte_count), len(self.data), sorted_idx.shape)
# self.packet_num = self.packet_num[sorted_idx]
# self.data = self.data[sorted_idx]
# self.byte_count = self.byte_count[sorted_idx]
# self.packet_num = self.packet_num.tolist()
# self.byte_count = self.byte_count.tolist()
# self.data = self.data.tolist()
bc = np.array(self.byte_count)
packets_ooo = np.where(np.array(self.packet_num[1:])-np.array(self.packet_num[0:-1]) != 1)[0]
is_not_monotonic = np.where(np.array(self.packet_num[1:])-np.array(self.packet_num[0:-1]) < 0)[0]
if self.verbose: print('Non monotonic packets: ', is_not_monotonic)
if len(packets_ooo) == 0:
if self.verbose: print('packets in order')
start_chunk = 0
ret_frames = self.get_frames(start_chunk, -1, bc)
elif len(packets_ooo) == 1:
if self.verbose: print('1 packet not in order')
start_chunk = packets_ooo[0]+1
ret_frames = self.get_frames(start_chunk, -1, bc)
# start_chunk = 0
else:
if self.verbose: print('Packet num not in order')
packets_ooo = np.append(packets_ooo, len(self.packet_num)-1)
# if self.verbose: print('Packets ooo', packets_ooo)
# if self.verbose: print(self.NUM_PACKETS_PER_FRAME)
# diff = [44]
# for i in range(len(packets_ooo)-1):
# # if self.verbose: print(i, len(packets_ooo))
# diff.append(self.packet_num[packets_ooo[i+1]]-self.packet_num[packets_ooo[i]+1])
# if self.verbose: print('Packets received before atleast 1 loss ', diff)
# if self.verbose: print('Total packets received ', np.sum(np.array(diff)))
diff = []
for i in range(len(packets_ooo)-1):
diff.append(self.packet_num[packets_ooo[i]+1]-self.packet_num[packets_ooo[i]])
# if self.verbose: print('Packets lost before atleast 1 reception ', diff)
packets_lost = np.sum(np.array(diff))
packets_expected = self.packet_num[-1]-self.packet_num[0]+1
if self.verbose: print('Total packets lost ', packets_lost)
if self.verbose: print('Total packets expected ', packets_expected)
if self.verbose: print('Fraction lost ', packets_lost/packets_expected)
new_packets_ooo = []
start_new_packets_ooo = []
end_new_packets_ooo = []
for i in range(1, len(packets_ooo)):
if (packets_ooo[i] - packets_ooo[i-1]) > self.NUM_PACKETS_PER_FRAME*2:
new_packets_ooo.append(packets_ooo[i-1])
start_new_packets_ooo.append(packets_ooo[i-1])
end_new_packets_ooo.append(packets_ooo[i])
new_packets_ooo = np.append(new_packets_ooo, -1)
# if self.verbose: print('New packets ooo', new_packets_ooo)
# if self.verbose: print('Start new packets ooo', start_new_packets_ooo)
# if self.verbose: print('End new packets ooo', end_new_packets_ooo)
# exit()
for i in range(len(start_new_packets_ooo)):
# for i in range(len(new_packets_ooo)-1):
# for i in [len(new_packets_ooo)-2]:
# start_chunk = new_packets_ooo[i]+1
# end_chunk = new_packets_ooo[i+1]
start_chunk = start_new_packets_ooo[i]+1
end_chunk = end_new_packets_ooo[i]
# if self.verbose: print(self.packet_num[start_chunk],self.packet_num[start_chunk-1])
# if self.verbose: print(self.byte_count[start_chunk],self.byte_count[start_chunk-1])
curr_frames = self.get_frames(start_chunk, end_chunk, bc)
if i == 0:
ret_frames = curr_frames
else:
ret_frames = np.concatenate((ret_frames, curr_frames), axis=0)
return ret_frames
# Old approach
# frame_start_idx = np.where((bc % self.BYTES_IN_FRAME_CLIPPED == 0) & (bc != 0))[0]
# num_frames = len(frame_start_idx)-1
# frames = np.zeros((num_frames, self.UINT16_IN_FRAME), dtype=np.int16)
# ret_frames = np.zeros((num_frames, self.num_chirps, self.num_rx, self.num_samples), dtype=complex)
# for i in range(num_frames):
# d = np.array(self.data[frame_start_idx[i]:frame_start_idx[i+1]])
# frame = d.reshape(-1)
# frames[i][:len(frame)] = frame.astype(np.int16)
# ret_frames[i] = self.iq(frames[i])
# return ret_frames | UCLA-VMG/EquiPleth | nndl/rf/organizer.py | organizer.py | py | 7,749 | python | en | code | 6 | github-code | 13 |
17045013064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicCrowdInnerQueryModel(object):
def __init__(self):
self._channel = None
self._crowd_id = None
self._group_id = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def crowd_id(self):
return self._crowd_id
@crowd_id.setter
def crowd_id(self, value):
self._crowd_id = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
def to_alipay_dict(self):
params = dict()
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.crowd_id:
if hasattr(self.crowd_id, 'to_alipay_dict'):
params['crowd_id'] = self.crowd_id.to_alipay_dict()
else:
params['crowd_id'] = self.crowd_id
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicCrowdInnerQueryModel()
if 'channel' in d:
o.channel = d['channel']
if 'crowd_id' in d:
o.crowd_id = d['crowd_id']
if 'group_id' in d:
o.group_id = d['group_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOpenPublicCrowdInnerQueryModel.py | AlipayOpenPublicCrowdInnerQueryModel.py | py | 1,822 | python | en | code | 241 | github-code | 13 |
73091828498 | #-- class란?
# ● 데이터와 데이터를 변형하는 함수를 같은 공간으로 작성
# - 메서드(Method)
# - 인스턴스(Instance)
# - 정보 은닉(Information Hiding)
# - 추상화(Abstraction)
#-- 클래스와 인스턴스
class Person: # 클래스 정의
Name = 'Default Name' # 멤버 변수
def Print(self): # 멤버 메소드
print('My Name is {0}'.format(self.Name))
p1 = Person() # 인스턴스 객체 생성
p1.Print() # 멤버 변수값을 출력
| gymcoding/learning-python | docs/#5_class/#1_basic.py | #1_basic.py | py | 531 | python | ko | code | 1 | github-code | 13 |
23530842470 | import random
import json
genres = ['rock', 'rap', 'metal', 'jazz', 'pop', 'country']
artists = [
'The Beatles',
'Led Zeppelin',
'Pink Floyd',
'The Rolling Stones',
'Queen',
'AC/DC',
'Black Sabbath',
'The Who',
'Guns N\' Roses',
'Nirvana',
'Metallica',
'U2',
'The Doors',
'Jimi Hendrix',
'Bruce Springsteen',
'David Bowie',
'The Eagles',
'Tom Petty',
'Fleetwood Mac',
'Van Halen',
'Rush',
'Notorious B.I.G.', 'Tupac', 'Nas', 'Jay-Z', 'Eminem', 'Kendrick Lamar', 'Drake', 'Kanye West', 'Outkast', 'Wu-Tang Clan', 'Public Enemy', 'Run-DMC', 'Beastie Boys', 'LL Cool J', 'Ice Cube', 'Snoop Dogg', '50 Cent', 'Lil Wayne', 'Missy Elliott', 'Busta Rhymes', 'A Tribe Called Quest', 'Mos Def', 'Common', 'Chance the Rapper', 'Cardi B', 'J. Cole', 'Travis Scott', 'Migos', 'Post Malone', 'Tyler, The Creator', 'Childish Gambino', 'Lauryn Hill', 'The Roots', 'Gang Starr', 'Rakim', 'KRS-One', 'Big Daddy Kane', 'Grandmaster Flash and the Furious Five', 'De La Soul', 'Fugees', 'N.W.A.', 'Scarface', 'DMX', 'Ludacris', 'Big L', 'Ghostface Killah', 'Raekwon', 'GZA', 'MF DOOM', 'Black Star', 'Run the Jewels', 'Danny Brown',
'Louis Armstrong', 'Duke Ellington', 'Miles Davis', 'Charlie Parker', 'John Coltrane', 'Ella Fitzgerald', 'Billie Holiday', 'Thelonious Monk', 'Dave Brubeck', 'Art Blakey', 'Chet Baker', 'Herbie Hancock', 'Sonny Rollins', 'Ornette Coleman', 'Dizzy Gillespie', 'Stan Getz', 'Cannonball Adderley', 'Charles Mingus', 'Oscar Peterson', 'Wes Montgomery', 'Django Reinhardt', 'Pat Metheny', 'Sarah Vaughan', 'Coleman Hawkins', 'Lester Young', 'Keith Jarrett', 'Wynton Marsalis', 'Count Basie', 'Benny Goodman', 'Lionel Hampton', 'Chick Corea', 'Joe Pass', 'George Benson', 'Gil Evans', 'Bud Powell', 'Clifford Brown', 'Max Roach', 'Jimmy Smith', 'Jimmy Heath', 'Johnny Hartman', 'Tony Williams', 'Pharoah Sanders', 'Bill Evans', 'Eddie Palmieri', 'Lee Morgan', 'Horace Silver', 'Cecil Taylor', 'Roy Ayers', 'Gary Burton', 'Joe Henderson',
]
song_titles = [
"Stairway to Heaven",
"Bohemian Rhapsody",
"going to the grocery store",
"riding a bicycle through times square",
"going to coachella with your friends",
"working at red robin slinging burgers",
"teaching at the university of michigan",
"swimming in the ocean in hawaii",
"going on vacation",
"being a college student",
"teeth like god's shoeshine",
"the predatory wasp of the palisades is out to get us",
"take me in your arms",
"like minded people",
"windows futures like minds country roads"
]
# [
# "Stronger",
# "Baby Love",
# "Acid Tracks",
# "Pride is the Devil",
# "Passionfruit",
# "Africa",
# "After the Gold Rush",
# "Single Ladies",
# "Umbrella",
# "Shake it Off",
# "Toxic",
# "Rolling in the Deep",
# "Firework",
# "Dynamite",
# "Rehab",
# "Blinding Lights",
# "Dancing on my own",
# "Hey Ya!",
# "Old Town Road",
# "Hips Don't Lie",
# "SexyBack",
# "Call Me Maybe",
# "Uptown Funk",
# "Poker Face",
# "Hollaback Girl",
# "Starships",
# "Royals",
# "Party in the U.S.A.",
# "Bad Guy",
# "Good as Hell",
# "Get Lucky",
# "Adore You",
# "No Tears Left to Cry",
# "Drivers License",
# "Sorry",
# "Titanium",
# "Happy",
# "Hotline Bling",
# "Levitating",
# "Milkshake",
# "Hung Up",
# "Despacito",
# "Bootylicious",
# "Work It",
# "Can’t Get You Out of My Head",
# "American Boy",
# "Since U Been Gone",
# "Let Me Blow Ya Mind",
# "Gangnam Style",
# "Crazy",
# "Clint Eastwood",
# "Paper Planes",
# "I Want You",
# "What's Goin' On",
# "Bridge Over Troubled Water",
# "Family Affair",
# "Dancing Queen",
# "No Woman No Cry" ]
def generate_examples():
training = []
validation = []
cnt = 0
for artist in artists:
for genre in genres:
for title in song_titles:
examples = [
f'I want {genre} lyrics by {artist} for my song titled {title}',
f'I want {genre} lyrics by {artist} about {title} NOW!',
f'Please generate a {genre} song about {title} in the style of {artist}',
f'Please write a {genre} song with the title {title} in the style of {artist}',
f'write a {genre} song titled {title} in the style of {artist}',
f'make a {genre} song by {artist} titled {title}',
f'create {genre} song lyrics in the style of {artist} about {title}',
f'generate {genre} lyrics about {title} from the perspective of {artist}',
f'generate {artist} lyrics titled {title} in a {genre} genre',
f'write {artist} lyrics in a {genre} genre and the title is {title} please',
f'write lyrics by {artist} please about {title}',
f'please generate a song about {title} but the lyrics are by {artist} pretty please',
f'write lyrics about {title} by {artist} in the style of {genre} for me please',
f'write a {genre} song about {title}',
f'write a song about {title} and it is a {genre} song by {artist} um yes',
f'would you make a {genre} song by {artist} please and the title is something like {title}',
f'generate lyrics to a song about {title}',
f'write a song with the title {title}',
f'write a song titled {title} in a {genre} style',
f'would you make a song about {title} by {artist} please',
f'generate lyrics to a song about {title} please in the style of {artist}',
f'write lyrics in the style of {artist} for me with the title {title}',
f'I want lyrics by about {title} for my song'
]
for example in examples:
entities_list = []
try:
artist_start = example.index(artist)
artist_end = artist_start + len(artist)
entities_list.append((artist_start, artist_end, 'ARTIST'))
except:
pass
try:
genre_start = example.index(genre)
genre_end = genre_start + len(genre)
entities_list.append((genre_start, genre_end, 'GENRE'))
except:
pass
try:
title_start = example.index(title)
title_end = title_start + len(title)
entities_list.append((title_start, title_end, 'TITLE'))
except:
pass
if cnt < 225000: # TODO
training.append((example, {'entities': entities_list}))
cnt = cnt + 1
else:
validation.append((example, {'entities': entities_list}))
cnt = cnt + 1
print("NUM EXAMPLES\n")
print(cnt)
return training, validation
# artists = [x.lower() for x in artists]
song_titles = [x.lower() for x in song_titles]
training, validation = generate_examples()
training_output = {'classes': ['GENRE', 'ARTIST'], 'annotations': training}
validation_output = {'classes': ['GENRE', 'ARTIST'], 'annotations': validation}
with open('val_dataset.json', 'w') as f:
json.dump(validation_output, f)
with open('train_dataset.json', 'w') as f:
json.dump(training_output, f)
# consider making all user input lowercase for consistency | mumichians/ner | generateExamples.py | generateExamples.py | py | 8,392 | python | en | code | 0 | github-code | 13 |
39191998179 | import time
repeatInput = input("Type something for me to repeat: ")
loopy = True
if repeatInput == "bunny":
print("You found the easter egg! Therefore, I will not print 'bunny' alot ofr times.")
else:
delay = input("Now, enter the time gap between repeats: ")
delay = float(delay)
while loopy:
print(repeatInput)
time.sleep(delay) | hazlenuts/LiMega | limega.py | limega.py | py | 366 | python | en | code | 0 | github-code | 13 |
21096672047 | from setuptools import setup, find_packages
req_tests = ["pytest"]
req_lint = ["flake8", "flake8-docstrings"]
req_etc = ["black", "isort"]
req_dev = req_tests + req_lint + req_etc
with open('requirements.txt', 'r') as f:
install_requires = [
s for s in [
line.split('#', 1)[0].strip(' \t\n') for line in f
] if s != ''
]
setup_options = {
"name": "Url Counter",
"version": "0.1",
"description": "Simple Url Counter from Json files or folder.",
"packages": find_packages(),
"python_requires": ">=3.11.0",
"install_requires": install_requires,
"extras_require": {
"tests": req_tests,
"lint": req_lint,
"dev": req_dev
},
"package_dir": {"": "."},
"entry_points": {
"console_scripts": [
"counter=url_counter.main:main",
],
},
}
setup(**setup_options)
| windies21/simple_url_counter | setup.py | setup.py | py | 883 | python | en | code | 0 | github-code | 13 |
1398663032 | import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pylab
import seaborn as sns
import pmdarima as pm
from pmdarima.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import io
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
from scipy import stats
def draw(column):
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
train = df.loc[:123,column]
pre = df.loc[122:, column]
plt.plot(train, label='真实值', color='deepskyblue')
plt.plot(pre, label='预测值', color='orange',linestyle='--')
if column == 'cp':
plt.ylabel('植被覆盖率', fontsize=16)
else:
plt.ylabel(column, fontsize=16)
plt.legend(prop={'size': 10})
plt.grid(True,axis='y')
if column == 'cp':
plt.title('植被覆盖率预测',fontsize=16)
else:
plt.title(column+'预测', fontsize=16)
plt.xticks(range(0, df.shape[0], 4), df.loc[range(0, df.shape[0], 4), 'date'],
rotation=45)
def draw_shidu():
df = pd.read_csv('湿度画图所需数据.csv')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
df = df.iloc[:141]
train = df.loc[:123, 'train']
pred = df.loc[122:, 'pred']
plt.plot(train, label='真实值', color='deepskyblue')
plt.plot(pred, label='预测值', color='orange',linestyle='--')
plt.ylabel('10cm湿度(kg/m2)', fontsize=16)
plt.title('10cm湿度(kgm2)预测', fontsize=16)
plt.legend()
plt.grid(True,axis='y')
plt.xticks(range(0, df.shape[0], 4), df.loc[range(0, df.shape[0], 4), 'date'],
rotation=45)
if __name__ == '__main__':
pindex = 1
df = pd.read_csv('../第二问/所有特征整合数据.csv')
df = df.iloc[:141,:]
columns = ['降水量(mm)', '土壤蒸发量(mm)', '植被指数(NDVI)']
# columns = ['icstore']
# columns = ['40cm湿度(kgm2)', '100cm湿度(kgm2)', '200cm湿度(kgm2)']
columns=['cp']
for column in columns:
draw(column)
plt.show()
plt.subplot(4, 1, 1)
draw_shidu()
pindex = 2
columns = ['40cm湿度(kgm2)', '100cm湿度(kgm2)', '200cm湿度(kgm2)']
for column in columns:
plt.subplot(4, 1, pindex)
draw(column)
pindex +=1
plt.show()
# statistic, pvalue = stats.mstats.ttest_ind(df.loc[122:143, column], df.loc[1:122, column])
# print(statistic, pvalue)
# draw_final() | MomoCyann/math_model_toolkits | math_model_final_battle/第六问/湿度预测画图.py | 湿度预测画图.py | py | 2,708 | python | en | code | 0 | github-code | 13 |
25568167682 | # augmenter.py
# Created by abdularis on 26/03/18
import scipy.misc
import numpy as np
import os
import argparse
from tqdm import tqdm
from keras.preprocessing.image import ImageDataGenerator
# augmentasi data citra pada direktori 'image_dir' output ke 'output_dir'
# dengan jumlah augmentasi percitra 'augment_per_image'
def augment_images(image_dir, output_dir, augment_per_image):
if not os.path.exists(image_dir):
print('Direktori %s tidak ada' % image_dir)
return
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data_gen = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, zoom_range=0.2, horizontal_flip=True)
for file_name in tqdm(os.listdir(image_dir)):
file_path = os.path.join(image_dir, file_name)
if not os.path.isfile(file_path):
print('[!] "%s" adalah direktori, abaikan' % file_path)
continue
img = np.array(scipy.misc.imread(file_path, mode='RGB'), ndmin=4)
data_gen.fit(img)
scipy.misc.imsave(os.path.join(output_dir, file_name), img[0])
count = 0
for imgs in data_gen.flow(img, None, batch_size=1):
scipy.misc.imsave(os.path.join(output_dir, 'aug_%d_%s' % (count, file_name)), imgs[0])
count += 1
if count >= augment_per_image:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Resize image')
parser.add_argument('--image-dir', type=str, help='Direktori tempat image yang akan diaugmentasi', required=True)
parser.add_argument('--output-dir', type=str, help='Direktori output image', required=True)
parser.add_argument('--augment-per-image', type=int, help='Jumlah augmentasi per image', required=True)
args = parser.parse_args()
augment_images(args.image_dir, args.output_dir, args.augment_per_image)
| deepdumbo/DeepLearningCNN | preprocess/augmenter.py | augmenter.py | py | 1,880 | python | ta | code | 0 | github-code | 13 |
36945608659 | """
Imagine, you are developing a vending machine. You need to keep your vending machine state: which items are presented
on which shelves, how much money inside machine to give change, how much money user inserted in current time, which
purchases users made etc. You need to create a data structure for that using known data types.
"""
from typing import Dict, List
class VMachine:
"""Vending machine class."""
def __init__(self, money_inside=0, item_prices=None, shelve_items=None):
"""
Instantiate a vending machine.
:param money_inside: how much money inside.
:param item_prices: prices of items.
:param shelve_items: what items are there on the shelves.
"""
if shelve_items is None:
shelve_items = dict()
if item_prices is None:
item_prices = dict()
self.item_prices = item_prices # dict of str keys (item name) and values (cost)
self.shelve_items = shelve_items # dict of str keys (shelve id/name) and list values (item name)
self.money_inside = money_inside
self.user_balance = 0 # assume that our currency is counted in integer numbers
self.purchases = dict() # dict of str keys (item name) and values (number)
self.purchases_total_cost = dict() # dict of str keys (item name) and values (number)
def add_money(self, amount: int) -> None:
"""
User method to add more money.
:param amount: amount of money to add.
"""
if amount < 0:
raise Exception('Validation error.')
self.money_inside += amount
print(f'Balance: {self.money_inside}')
def purchase(self, item: str) -> None:
"""
User method to perform a purchase.
:param item: name of an item.
"""
if item not in self.item_prices.keys():
raise Exception(f'There is no item {item} in the price list.')
if self.money_inside < self.item_prices[item]:
raise Exception('Not enough money.')
for shelve, shelve_items in self.shelve_items.items():
if item in shelve_items:
shelve_items.remove(item)
self.user_balance -= self.item_prices[item]
print(f'Enjoy your {item}!')
return
raise Exception(f'No {item} left in the machine!')
def get_money_back(self) -> int:
"""
User method. Returns current deposit.
:return: deposit.
"""
change = self.user_balance
self.user_balance = 0
return change
def set_item_prices(self, new_item_prices: Dict[str, int]) -> None:
"""
Admin method. Set new item prices.
:param new_item_prices: guess what! new item prices!
"""
self.item_prices = new_item_prices
def add_shelve_items(self, new_shelve_items: Dict[str, List[str]]) -> None:
"""
Admin method. Add new shelve items.
:param new_shelve_items: it's pretty obvious, come on.
"""
for shelve, items in new_shelve_items.items():
if shelve not in self.shelve_items.keys():
raise Exception(f'There is no shelve {shelve} in the machine.')
else:
for item in items:
if item not in self.item_prices.keys():
raise Exception(f'There is no item {item} in the price list.')
self.shelve_items[shelve] += items
def add_money_inside(self, amount: int) -> None:
"""
Admin method. Add more money to the machine.
:param amount: amount of money.
"""
if amount < 0:
raise Exception('Validation error.')
self.money_inside += amount
def get_money(self, amount: int) -> int:
"""
Admin method. Returns money.
:param amount: amount of money.
"""
if amount < 0:
raise Exception('Validation error.')
if self.money_inside < amount:
raise Exception(f'Not enough money. Current amount: {self.money_inside}.')
else:
self.money_inside -= amount
return amount
| iaramer/algorithms | python/mipt/mipt_python course/homework/hw1/vending_machine.py | vending_machine.py | py | 4,205 | python | en | code | 0 | github-code | 13 |
73282801299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# flake8: noqa
from __future__ import unicode_literals, division, absolute_import, print_function
from .compatibility_utils import PY2, text_type, bchr, bord
import binascii
if PY2:
range = xrange
from itertools import cycle
def getLanguage(langID, sublangID):
mobilangdict = {
54 : {0 : 'af'}, # Afrikaans
28 : {0 : 'sq'}, # Albanian
1 : {0 : 'ar' , 5 : 'ar-dz' , 15 : 'ar-bh' , 3 : 'ar-eg' , 2 : 'ar-iq', 11 : 'ar-jo' , 13 : 'ar-kw' , 12 : 'ar-lb' , 4: 'ar-ly',
6 : 'ar-ma' , 8 : 'ar-om' , 16 : 'ar-qa' , 1 : 'ar-sa' , 10 : 'ar-sy' , 7 : 'ar-tn' , 14 : 'ar-ae' , 9 : 'ar-ye'},
# Arabic, Arabic (Algeria), Arabic (Bahrain), Arabic (Egypt), Arabic
# (Iraq), Arabic (Jordan), Arabic (Kuwait), Arabic (Lebanon), Arabic
# (Libya), Arabic (Morocco), Arabic (Oman), Arabic (Qatar), Arabic
# (Saudi Arabia), Arabic (Syria), Arabic (Tunisia), Arabic (United Arab
# Emirates), Arabic (Yemen)
43 : {0 : 'hy'}, # Armenian
77 : {0 : 'as'}, # Assamese
44 : {0 : 'az'}, # "Azeri (IANA: Azerbaijani)
45 : {0 : 'eu'}, # Basque
35 : {0 : 'be'}, # Belarusian
69 : {0 : 'bn'}, # Bengali
2 : {0 : 'bg'}, # Bulgarian
3 : {0 : 'ca'}, # Catalan
4 : {0 : 'zh' , 3 : 'zh-hk' , 2 : 'zh-cn' , 4 : 'zh-sg' , 1 : 'zh-tw'},
# Chinese, Chinese (Hong Kong), Chinese (PRC), Chinese (Singapore), Chinese (Taiwan)
26 : {0 : 'hr', 3 : 'sr'}, # Croatian, Serbian
5 : {0 : 'cs'}, # Czech
6 : {0 : 'da'}, # Danish
19 : {0: 'nl', 1 : 'nl' , 2 : 'nl-be'}, # Dutch / Flemish, Dutch (Belgium)
9 : {0: 'en', 1 : 'en' , 3 : 'en-au' , 40 : 'en-bz' , 4 : 'en-ca' , 6 : 'en-ie' , 8 : 'en-jm' , 5 : 'en-nz' , 13 : 'en-ph' ,
7 : 'en-za' , 11 : 'en-tt' , 2 : 'en-gb', 1 : 'en-us' , 12 : 'en-zw'},
# English, English (Australia), English (Belize), English (Canada),
# English (Ireland), English (Jamaica), English (New Zealand), English
# (Philippines), English (South Africa), English (Trinidad), English
# (United Kingdom), English (United States), English (Zimbabwe)
37 : {0 : 'et'}, # Estonian
56 : {0 : 'fo'}, # Faroese
41 : {0 : 'fa'}, # Farsi / Persian
11 : {0 : 'fi'}, # Finnish
12 : {0 : 'fr', 1 : 'fr' , 2 : 'fr-be' , 3 : 'fr-ca' , 5 : 'fr-lu' , 6 : 'fr-mc' , 4 : 'fr-ch'},
# French, French (Belgium), French (Canada), French (Luxembourg), French (Monaco), French (Switzerland)
55 : {0 : 'ka'}, # Georgian
7 : {0 : 'de', 1 : 'de' , 3 : 'de-at' , 5 : 'de-li' , 4 : 'de-lu' , 2 : 'de-ch'},
# German, German (Austria), German (Liechtenstein), German (Luxembourg), German (Switzerland)
8 : {0 : 'el'}, # Greek, Modern (1453-)
71 : {0 : 'gu'}, # Gujarati
13 : {0 : 'he'}, # Hebrew (also code 'iw'?)
57 : {0 : 'hi'}, # Hindi
14 : {0 : 'hu'}, # Hungarian
15 : {0 : 'is'}, # Icelandic
33 : {0 : 'id'}, # Indonesian
16 : {0 : 'it', 1 : 'it' , 2 : 'it-ch'}, # Italian, Italian (Switzerland)
17 : {0 : 'ja'}, # Japanese
75 : {0 : 'kn'}, # Kannada
63 : {0 : 'kk'}, # Kazakh
87 : {0 : 'x-kok'}, # Konkani (real language code is 'kok'?)
18 : {0 : 'ko'}, # Korean
38 : {0 : 'lv'}, # Latvian
39 : {0 : 'lt'}, # Lithuanian
47 : {0 : 'mk'}, # Macedonian
62 : {0 : 'ms'}, # Malay
76 : {0 : 'ml'}, # Malayalam
58 : {0 : 'mt'}, # Maltese
78 : {0 : 'mr'}, # Marathi
97 : {0 : 'ne'}, # Nepali
20 : {0 : 'no'}, # Norwegian
72 : {0 : 'or'}, # Oriya
21 : {0 : 'pl'}, # Polish
22 : {0 : 'pt', 2 : 'pt' , 1 : 'pt-br'}, # Portuguese, Portuguese (Brazil)
70 : {0 : 'pa'}, # Punjabi
23 : {0 : 'rm'}, # "Rhaeto-Romanic" (IANA: Romansh)
24 : {0 : 'ro'}, # Romanian
25 : {0 : 'ru'}, # Russian
59 : {0 : 'sz'}, # "Sami (Lappish)" (not an IANA language code)
# IANA code for "Northern Sami" is 'se'
# 'SZ' is the IANA region code for Swaziland
79 : {0 : 'sa'}, # Sanskrit
27 : {0 : 'sk'}, # Slovak
36 : {0 : 'sl'}, # Slovenian
46 : {0 : 'sb'}, # "Sorbian" (not an IANA language code)
# 'SB' is IANA region code for 'Solomon Islands'
# Lower Sorbian = 'dsb'
# Upper Sorbian = 'hsb'
# Sorbian Languages = 'wen'
10 : {0 : 'es' , 4 : 'es' , 44 : 'es-ar' , 64 : 'es-bo' , 52 : 'es-cl' , 36 : 'es-co' , 20 : 'es-cr' , 28 : 'es-do' ,
48 : 'es-ec' , 68 : 'es-sv' , 16 : 'es-gt' , 72 : 'es-hn' , 8 : 'es-mx' , 76 : 'es-ni' , 24 : 'es-pa' ,
60 : 'es-py' , 40 : 'es-pe' , 80 : 'es-pr' , 56 : 'es-uy' , 32 : 'es-ve'},
# Spanish, Spanish (Mobipocket bug?), Spanish (Argentina), Spanish
# (Bolivia), Spanish (Chile), Spanish (Colombia), Spanish (Costa Rica),
# Spanish (Dominican Republic), Spanish (Ecuador), Spanish (El
# Salvador), Spanish (Guatemala), Spanish (Honduras), Spanish (Mexico),
# Spanish (Nicaragua), Spanish (Panama), Spanish (Paraguay), Spanish
# (Peru), Spanish (Puerto Rico), Spanish (Uruguay), Spanish (Venezuela)
48 : {0 : 'sx'}, # "Sutu" (not an IANA language code)
# "Sutu" is another name for "Southern Sotho"?
# IANA code for "Southern Sotho" is 'st'
65 : {0 : 'sw'}, # Swahili
29 : {0 : 'sv' , 1 : 'sv' , 8 : 'sv-fi'}, # Swedish, Swedish (Finland)
73 : {0 : 'ta'}, # Tamil
68 : {0 : 'tt'}, # Tatar
74 : {0 : 'te'}, # Telugu
30 : {0 : 'th'}, # Thai
49 : {0 : 'ts'}, # Tsonga
50 : {0 : 'tn'}, # Tswana
31 : {0 : 'tr'}, # Turkish
34 : {0 : 'uk'}, # Ukrainian
32 : {0 : 'ur'}, # Urdu
67 : {0 : 'uz', 2 : 'uz'}, # Uzbek
42 : {0 : 'vi'}, # Vietnamese
52 : {0 : 'xh'}, # Xhosa
53 : {0 : 'zu'}, # Zulu
}
lang = "en"
if langID in mobilangdict:
subdict = mobilangdict[langID]
lang = subdict[0]
if sublangID in subdict:
lang = subdict[sublangID]
return lang
def toHex(byteList):
return binascii.hexlify(byteList)
# returns base32 bytestring
def toBase32(value, npad=4):
digits = b'0123456789ABCDEFGHIJKLMNOPQRSTUV'
num_string=b''
current = value
while current != 0:
next, remainder = divmod(current, 32)
rem_string = digits[remainder:remainder+1]
num_string = rem_string + num_string
current=next
if num_string == b'':
num_string = b'0'
pad = npad - len(num_string)
if pad > 0:
num_string = b'0' * pad + num_string
return num_string
# converts base32 string to value
def fromBase32(str_num):
if isinstance(str_num, text_type):
str_num = str_num.encode('latin-1')
scalelst = [1,32,1024,32768,1048576,33554432,1073741824,34359738368]
value = 0
j = 0
n = len(str_num)
scale = 0
for i in range(n):
c = str_num[n-i-1:n-i]
if c in b'0123456789':
v = ord(c) - ord(b'0')
else:
v = ord(c) - ord(b'A') + 10
if j < len(scalelst):
scale = scalelst[j]
else:
scale = scale * 32
j += 1
if v != 0:
value = value + (v * scale)
return value
# note: if decode a bytestring using 'latin-1' (or any other 0-255 encoding)
# in place of ascii you will get a byte to half-word or integer
# one to one mapping of values from 0 - 255
def mangle_fonts(encryption_key, data):
if isinstance(encryption_key, text_type):
encryption_key = encryption_key.encode('latin-1')
crypt = data[:1024]
key = cycle(iter(map(bord, encryption_key)))
# encrypt = ''.join([chr(ord(x)^key.next()) for x in crypt])
encrypt = b''.join([bchr(bord(x)^next(key)) for x in crypt])
return encrypt + data[1024:]
| BasioMeusPuga/Lector | lector/KindleUnpack/mobi_utils.py | mobi_utils.py | py | 8,654 | python | en | code | 1,479 | github-code | 13 |
9649659596 | from fastapi import HTTPException
from httpx import AsyncClient
class HTTPXDependency:
__slots__ = "_client"
def __init__(self, *, client: AsyncClient):
self._client = client
async def __call__(self):
try:
await self._client.get("https://google.com")
except Exception as e:
print(f"Error: {type(e)} {e}")
raise HTTPException(status_code=500)
| victoraugustolls/httpx-timeout | app/dependencies/httpx/dependency.py | dependency.py | py | 418 | python | en | code | 0 | github-code | 13 |
74288237456 | import numpy as np
import matplotlib.pyplot as plt
def canicas(coefbooleanos, estinicial):
# Dimensión de la matriz
n = len(coefbooleanos)
# Verificación para saber si la matriz es cuadrada
for fila in coefbooleanos:
if len(fila) != n:
print("La matriz no es cuadrada.")
# Verificación para saber si el estado inicial tiene el mismo tamaño que la matriz
if len(estinicial) != n:
print("El estado inicial no tiene el mismo tamaño que la matriz.")
# Cálculo del estado final de las canicas
estfinal = [False] * n
for j in range(n):
for i in range(n):
if coefbooleanos[i][j]:
estfinal[j] = estfinal[j] or estinicial[i]
return estfinal
def multrendijasclasico(nrendijas, ndetectores, probrendijas):
# Creación de la matriz
matriz = np.zeros((ndetectores, ndetectores))
# Multiplicación de las matrices de probabilidad para obtener la matriz de transición
for i in range(ndetectores):
for j in range(nrendijas):
for k in range(ndetectores):
matriz[i][k] += probrendijas[j][i] * probrendijas[j][k]
# Vector ket
ket = np.zeros((ndetectores,1))
ket[0] = 1
# Probabilidades
ketfinal = np.dot(matriz, ket)
probabilidades = np.abs(ketfinal)**2
return probabilidades
def multrendijascuantico(nrendijas, nubicaciones, matriztrans, estinicial):
# Cálculo de la matriz de transición ampliada
matrampliada = np.kron(np.eye(nrendijas), matriztrans)
# Cálculo de la matriz de proyección
matrproyeccion = np.zeros((nubicaciones, nrendijas * matriztrans.shape[0]))
for i in range(nubicaciones):
matrproyeccion[i, (i+1) * matriztrans.shape[0] - 1] = 1
# Cálculo del estado final de la partícula
estfinal = matrproyeccion @ matrampliada @ estinicial
return estfinal
def diagramabarras(probabilidades, vectorestados, archivo):
# Gráfica del diagrama de barras
plt.bar(range(len(probabilidades)), probabilidades)
# Ejes del diagrama
plt.xlabel('Estado')
plt.ylabel('Probabilidad')
# Titulo del diagrama
plt.title('Diagrama de barras de las probabilidades:')
# Guardar el diagrama y mostrarlo
plt.savefig(archivo)
plt.show() | Cristian5124/EstadosCuanticos | EstadosCuanticos.py | EstadosCuanticos.py | py | 2,401 | python | es | code | 1 | github-code | 13 |
38862319740 | import numpy as np
import torch
import sys
import pandas as pd
import os
from sklearn import preprocessing
# from keras_preprocessing.text import Tokenizer
import gc
gene_map = {
'A': [1, 0, 0, 0],
'C': [0, 1, 0, 0],
'G': [0, 0, 1, 0],
'T': [0, 0, 0, 1],
'N': [0, 0, 0, 0],
}
f = pd.read_csv('./weight_all_16_.csv')
f = f.to_dict('list')
def split_str(s, length, num):
str = []
for i in range(num):
str.append(s[i:i+length])
return str
def Get_Conservation_Score(TF_name):
a = pd.read_table('./data/conservation_score/'+TF_name+"_pos_cs.fasta", sep=' ', header=None)
a.iloc[:,-1] = a.mean(1)
b = pd.read_table('./data/conservation_score/' +TF_name+"_neg_1x_cs.fasta", sep=' ', header=None)
train = pd.concat([a,b]).iloc[:,1:-1].fillna(0) #nan 变为0
X_train = np.array(train,dtype="float32")
np.random.seed(1)
shuffle_ix = np.random.permutation(np.arange(len(X_train)))
data = X_train[shuffle_ix]
# data = np.array(np.zeros_like(data))
return data
def Get_DNase_Score(TF_name):
a = pd.read_table('./data/dnase/'+TF_name+"_Dnase.fasta", sep=' ', header=None)
b = pd.read_table('./data/dnase/' +TF_name+"_neg_1x_Dnase.fasta", sep=' ', header=None)
train = pd.concat([a,b]).iloc[:,3:-1].fillna(0) #nan 变为0
X_train = np.array(train,dtype="float32")
np.random.seed(1)
shuffle_ix = np.random.permutation(np.arange(len(X_train)))
data = X_train[shuffle_ix]
return data
def Get_Histone(TF_name):
file = ["H3K9me3","H3K27me3"]
# data = []
# f = os.listdir('./data/histone')
for name in file:
a = pd.read_table('./data/histone/'+TF_name+"_"+name+".fasta", sep=' ', header=None)
b = pd.read_table('./data/histone/' +TF_name+"_"+name+".fasta", sep=' ', header=None)
train = pd.concat([a,b]).iloc[:,3:-1].fillna(0) #nan 变为0
X_train = np.array(train,dtype="float32")
np.random.seed(1)
shuffle_ix = np.random.permutation(np.arange(len(X_train)))
data1 = X_train[shuffle_ix]
data1 = data1.reshape(data1.shape[0],1,data1.shape[1])
if name == "H3K9me3":
data = data1
else:
data = np.concatenate([data,data1],1)
return data
def read_seq():
label = pd.read_csv('./Train.csv',header=None).iloc[:,2]
return label.to_frame()
def read_shape(TF_Name,Shape):
with open("./data/shape/" + TF_Name + "_pos.data" ,'r') as file:
train_len = len(file.readlines())
with open("./data/shape/" + TF_Name + "_neg.data" ,'r') as file:
test_len = len(file.readlines())
num = len(Shape)
k = 0
shape_train = np.random.randn(num, train_len, 101)
shape_test = np.random.randn(num, test_len, 101)
for name in Shape:
train_shape = open("./data/se/" + TF_Name + "_pos_shape.data." + name,'r')
test_shape = open("./data/se/" + TF_Name + "_neg_shape.data." + name,'r')
n = 1
i = 0
j = 0
row = np.random.randn(101)
for line in train_shape:
line = line.strip('\n\r')
if line[0] == '>':
if n != 1:
i = 0
shape_train[k][j] = row
row = np.random.randn(101)
if name == "HelT" or name == "Roll"or name == "Rise"or name == "Shift"or name == "Slide"or name == "Roll"or name == "Tilt":
row[i] = 0
i += 1
j += 1
else:
n = 0
if name == "HelT" or name == "Roll"or name == "Rise"or name == "Shift"or name == "Slide"or name == "Roll"or name == "Tilt":
row[i] = 0
i += 1
continue
line = line.split(',')
for s in line:
if s == 'NA':
row[i] = 0
i += 1
else:
row[i] = float(s)
i += 1
shape_train[k][j] = row
n = 1
i = 0
j = 0
row = np.random.randn(101)
for line in test_shape:
line = line.strip('\n\r')
if line[0] == '>':
if n != 1:
i = 0
shape_test[k][j] = row
row = np.random.randn(101)
if name == "HelT" or name == "Roll"or name == "Rise"or name == "Shift"or name == "Slide"or name == "Roll"or name == "Tilt":
row[i] = 0
i += 1
j += 1
else:
n = 0
if name == "HelT" or name == "Roll"or name == "Rise"or name == "Shift"or name == "Slide"or name == "Roll"or name == "Tilt":
row[i] = 0
i += 1
continue
line = line.split(',')
for s in line:
if s == 'NA':
row[i] = 0
i += 1
else:
row[i] = float(s)
i += 1
shape_test[k][j] = row
k += 1
train_shape.close()
test_shape.close()
shape_train = np.append(shape_train, shape_test, 1)
shape_train = np.transpose(shape_train,(1,0,2))
np.random.seed(1)
shuffle_ix = np.random.permutation(np.arange(len(shape_train)))
data1 = shape_train[shuffle_ix]
return data1
def Get_DNA_Sequence(TF_name):
X_train = []
y_train = []
z_train = []
zero_vector = [0., 0., 0., 0., 0., 0., 0., 0.]
pos_file = open("./data/sequence/"+TF_name+"_pos.fasta",'r')
neg_file = open("./data/sequence/"+TF_name+"_neg_1x.fasta",'r')#TF_RXRA_Tissue_Liver_pos
sample = []
pos_num = 0
neg_num = 0
length = 101
number = 0
# print("READ DNA for %s" % (cell))
for line in pos_file:
size = 0
line = line.strip('\n\r')
if len(line) < 5:
break
if line[0] == ">":
i = 0
continue
else:
line = line.upper()
if i == 0:
content = line
i = i + 1
continue
else:
content = content+line
# if number < 511:
# number = number + 1
# continue
if len(content) > length:
size = length
else:
size = len(content)
# row = np.random.randn(101, 4)
content = 'N' + content + 'N'
content1 = split_str(content,3 ,101)
row = np.random.randn(101, 16)
for location, base in enumerate(range(0, size), start=0):
row[location] = f[content1[base]]
X_train.append(row)
pos_num = pos_num + 1
y_train.append(1)
for line in neg_file:
size = 0
line = line.strip('\n\r')
if len(line) < 5:
break
if line[0] == ">":
i = 0
continue
else:
if i == 0:
content = line
i = i + 1
continue
else:
content = content + line
if len(content) > length:
size = length
else:
size = len(content)
content = 'N' + content + 'N'
content1 = split_str(content,3 ,101)
row = np.random.randn(101, 16)
for location, base in enumerate(range(0, size), start=0):
row[location] = f[content1[base]]
X_train.append(row)
neg_num = neg_num + 1
y_train.append(0)
print("the number of positive train sample: %d" % pos_num)
print("the number of negative train sample: %d" % neg_num)
X_train = np.array(X_train,dtype="float32")
y_train = np.array(y_train,dtype="float32")
np.random.seed(1)
shuffle_ix = np.random.permutation(np.arange(len(X_train)))
data = X_train[ shuffle_ix ]
label = y_train[ shuffle_ix ]
return data, label
| ZhangLab312/GHTNet | read_data.py | read_data.py | py | 8,296 | python | en | code | 0 | github-code | 13 |
10341083646 | import pandas as pd
import requests
import datetime
# base data
now = datetime.datetime.now()
base_url = "https://en.wikipedia.org/wiki/Comparison_of_smartphones"
data = pd.DataFrame(columns=["Model", "Brand", "SoC/Processor", "CPU Spec", "GPU", "Storage", "Removable storage", "RAM", "OS", "Custom Launcher", "Dimensions", "Weight", "Battery",
"Charging", "Display", "Rear Camera", "Front Camera", "Video", "Fingerprint Sensor", "Facial recognition", "Networks", "Type", "Form Factor", "Data Inputs", "Connectivity", "Release Date"])
response = requests.get(base_url)
content = response.text
content = content.replace("“", "\"")
content = content.replace("”", "\"")
content = content.replace("</li><li>", "</li>,<>")
content = content.replace("<p>", ",<p>")
content = content.replace("<br />", "<br />,")
content = content.replace("\xa0", " ")
# Extract all tables from the wikipage
dfs = pd.read_html(content)
for i in range(len(dfs)):
year = now.year - (i - 1)
if year > now.year:
continue
if year >= 2017:
current_year_of_phones = dfs[i]
current_year_of_phones.replace(u'\xa0', u' ', regex=True, inplace=True)
for index, row in current_year_of_phones.iterrows():
phone = dict()
name = row["Model"]
brand, model = name.split(" ", 1)
phone["Model"] = model
phone["Brand"] = brand
try:
phone["SoC/Processor"] = [c.strip()
for c in row["SoC"].split(",")]
except:
phone["SoC/Processor"] = [c.strip()
for c in row["CPU"].split(",")]
print(phone)
# The last row is just the date of the last update
# df = df.iloc[:-1]
# print(df)
| tschaefermedia/SmartphoneDataWikipedia | src/main.py | main.py | py | 1,842 | python | en | code | 0 | github-code | 13 |
14812856358 | from flask_app import app
from flask import render_template, redirect, request, session, flash
from flask_app.models import order, user
# Once they pay they are taken to this screen, it's their receipt.
# Need to feed to the front a list of all the order items.
# Takes in guest email if still not logged in
@app.route('/orders/confirmation', methods=['POST'])
def confirmation():
if 'uuid' in session:
orders = order.Order.new_order()
data = {
'id': session['uuid']
}
users = user.User.select(data)
return render_template('order_confirmation.html', order = orders, user = users)
else:
data = {
'email': request.form['guest_email']
}
orders = order.Order.new_guest_order(data)
return redirect('order_confirmation.html', order = orders)
| Sal-Nunez/marketplace_schema | flask_app/controllers/orders.py | orders.py | py | 841 | python | en | code | 1 | github-code | 13 |
3047194371 | import hashlib
import random
import core.tree as tree
import os
import core.users as users
import logging
import core.acl as acl
from utils.utils import getMimeType, get_user_id, log_func_entry, dec_entry_log
from utils.fileutils import importFile, getImportDir, importFileIntoDir
from contenttypes.image import makeThumbNail, makePresentationFormat
from core.transition import httpstatus
log = logging.getLogger('editor')
from core.translation import lang, t
import core.db.mysqlconnector as mysqlconnector
def getContent(req, ids):
ret = ""
user = users.getUserFromRequest(req)
node = tree.getNode(ids[0])
update_error = False
access = acl.AccessData(req)
msg = "%s|web.edit.modules.files.getContend|req.fullpath=%r|req.path=%r|req.params=%r|ids=%r" % (get_user_id(req), req.fullpath, req.path, req.params, ids)
log.debug(msg)
if not access.hasWriteAccess(node) or "files" in users.getHideMenusForUser(user):
req.setStatus(httpstatus.HTTP_FORBIDDEN)
return req.getTAL("web/edit/edit.html", {}, macro="access_error")
if 'data' in req.params:
if req.params.get('data') == 'children': # get formated list of childnodes of selected directory
req.writeTAL("web/edit/modules/files.html", {'children': node.getChildren()}, macro="edit_files_popup_children")
if req.params.get('data') == 'additems': # add selected node as children
for childid in req.params.get('items').split(";"):
if childid.strip() != "":
childnode = tree.getNode(childid.strip())
for p in childnode.getParents():
p.removeChild(childnode)
node.addChild(childnode)
req.writeTAL("web/edit/modules/files.html", {'children': node.getChildren(), 'node': node}, macro="edit_files_children_list")
if req.params.get('data') == 'removeitem': # remove selected childnode node
try:
remnode = tree.getNode(req.params.get('remove'))
if len(remnode.getParents()) == 1:
users.getUploadDir(user).addChild(remnode)
node.removeChild(remnode)
except: # node not found
pass
req.writeTAL("web/edit/modules/files.html", {'children': node.getChildren(), 'node': node}, macro="edit_files_children_list")
if req.params.get('data') == 'reorder':
i = 0
for id in req.params.get('order').split(","):
if id != "":
n = tree.getNode(id)
n.setOrderPos(i)
i += 1
if req.params.get('data') == 'translate':
req.writeTALstr('<tal:block i18n:translate="" tal:content="msgstr"/>', {'msgstr': req.params.get('msgstr')})
return ""
if req.params.get("style") == "popup":
v = {"basedirs": [tree.getRoot('home'), tree.getRoot('collections')]}
id = req.params.get("id", tree.getRoot().id)
v["script"] = "var currentitem = '%s';\nvar currentfolder = '%s';\nvar node = %s;" %(id, req.params.get('parent'), id)
v["idstr"] = ",".join(ids)
v["node"] = node
req.writeTAL("web/edit/modules/files.html", v, macro="edit_files_popup_selection")
return ""
if "operation" in req.params:
op = req.params.get("operation")
if op == "delete":
for key in req.params.keys(): # delete file
if key.startswith("del|"):
filename = key[4:-2].split("|")
for file in node.getFiles():
if file.getName() == filename[1] and file.type == filename[0]:
# remove all files in directory
if file.getMimeType() == "inode/directory":
for root, dirs, files in os.walk(file.retrieveFile()):
for name in files:
try:
os.remove(root + "/" + name)
except:
pass
os.removedirs(file.retrieveFile()+"/")
if len([f for f in node.getFiles() if f.getName()==filename[1] and f.type==filename[0]]) > 1:
# remove single file from database if there are duplicates
node.removeFile(file, single=True)
else:
# remove single file
node.removeFile(file)
try:
os.remove(file.retrieveFile())
except:
pass
break
break
elif key.startswith("delatt|"):
for file in node.getFiles():
if file.getMimeType() == "inode/directory":
try:
os.remove(file.retrieveFile() + "/" + key.split("|")[2][:-2])
except:
pass
break
break
elif op=="change":
uploadfile = req.params.get("updatefile")
if uploadfile:
create_version_error = False
# Create new version when change file
if (req.params.get('generate_new_version') and not hasattr(node, "metaFields")):
if (req.params.get('version_comment', '').strip()==''
or req.params.get('version_comment', '').strip()==' '):
create_version_error = True
req.setStatus(httpstatus.HTTP_INTERNAL_SERVER_ERROR)
ret += req.getTAL("web/edit/modules/files.html", {}, macro="version_error")
else:
current = node
node = node.createNewVersion(user)
for attr, value in current.items():
if node.get(attr)!="": # do not overwrite attributes
pass
else:
node.set(attr, value)
req.setStatus(httpstatus.HTTP_MOVED_TEMPORARILY)
ret += req.getTAL("web/edit/modules/metadata.html", {'url':'?id='+node.id+'&tab=files', 'pid':None}, macro="redirect")
if req.params.get("change_file")=="yes" and not create_version_error: # remove old files
for f in node.getFiles():
if f.getType() in node.getSysFiles():
node.removeFile(f)
node.set("system.version.comment", '('+t(req, "edit_files_new_version_exchanging_comment")+')\n'+req.params.get('version_comment', ''))
if req.params.get("change_file")=="no" and not create_version_error:
node.set("system.version.comment", '('+t(req, "edit_files_new_version_adding_comment")+')\n'+req.params.get('version_comment', ''))
if req.params.get("change_file") in ["yes", "no"] and not create_version_error:
file = importFile(uploadfile.filename, uploadfile.tempname) # add new file
node.addFile(file)
logging.getLogger('usertracing').info(user.name+" changed file of node "+node.id+" to "+uploadfile.filename+" ("+uploadfile.tempname+")")
attpath = ""
for f in node.getFiles():
if f.getMimeType()=="inode/directory":
attpath = f.getName()
break
if req.params.get("change_file")=="attdir" and not create_version_error: # add attachmentdir
dirname = req.params.get("inputname")
if attpath=="": # add attachment directory
attpath = req.params.get("inputname")
if not os.path.exists(getImportDir() + "/" + attpath):
os.mkdir(getImportDir() + "/" + attpath)
node.addFile(tree.FileNode(name=getImportDir() + "/" + attpath, mimetype="inode/directory", type="attachment"))
file = importFileIntoDir(getImportDir() + "/" + attpath, uploadfile.tempname) # add new file
node.set("system.version.comment", '('+t(req, "edit_files_new_version_attachment_directory_comment")+')\n'+req.params.get('version_comment', ''))
pass
if req.params.get("change_file")=="attfile" and not create_version_error: # add file as attachment
if attpath=="":
# no attachment directory existing
file = importFile(uploadfile.filename, uploadfile.tempname) # add new file
file.mimetype = "inode/file"
file.type = "attachment"
node.addFile(file)
else:
# import attachment file into existing attachment directory
file = importFileIntoDir(getImportDir() + "/" + attpath, uploadfile.tempname) # add new file
node.set("system.version.comment", '('+t(req, "edit_files_new_version_attachment_comment")+')\n'+req.params.get('version_comment', ''))
pass
elif op == "addthumb": # create new thumbanil from uploaded file
uploadfile = req.params.get("updatefile")
if uploadfile:
thumbname = os.path.join(getImportDir(), hashlib.md5(str(random.random())).hexdigest()[0:8]) + ".thumb"
file = importFile(thumbname, uploadfile.tempname) # add new file
makeThumbNail(file.retrieveFile(), thumbname)
makePresentationFormat(file.retrieveFile(), thumbname + "2")
if os.path.exists(file.retrieveFile()): # remove uploaded original
os.remove(file.retrieveFile())
for f in node.getFiles():
if f.type in ["thumb", "presentation", "presentati"]:
if os.path.exists(f.retrieveFile()):
os.remove(f.retrieveFile())
node.removeFile(f)
node.addFile(tree.FileNode(name=thumbname, type="thumb", mimetype="image/jpeg"))
node.addFile(tree.FileNode(name=thumbname + "2", type="presentation", mimetype="image/jpeg"))
logging.getLogger('usertracing').info(user.name + " changed thumbnail of node " + node.id)
elif op == "postprocess":
if hasattr(node, "event_files_changed"):
try:
node.event_files_changed()
logging.getLogger('usertracing').info(user.name + " postprocesses node " + node.id)
except:
update_error = True
v = {"id": req.params.get("id", "0"), "tab": req.params.get("tab", ""), "node": node, "update_error": update_error,
"user": user, "files": filter(lambda x: x.type != 'statistic', node.getFiles()),
"statfiles": filter(lambda x: x.type == 'statistic', node.getFiles()),
"attfiles": filter(lambda x: x.type == 'attachment', node.getFiles()), "att": [], "nodes": [node], "access": access}
for f in v["attfiles"]: # collect all files in attachment directory
if f.getMimeType() == "inode/directory":
for root, dirs, files in os.walk(f.retrieveFile()):
for name in files:
af = tree.FileNode(root + "/" + name, "attachmentfile", getMimeType(name)[0])
v["att"].append(af)
return req.getTAL("web/edit/modules/files.html", v, macro="edit_files_file")
| hibozzy/mediatum | web/edit/modules/files.py | files.py | py | 12,075 | python | en | code | null | github-code | 13 |
22798976861 | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.metrics import l2_norm
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super(BasicBlock, self).__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion))
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
shortcut = self.shortcut(x)
out = self.residual_function(x)
out = out + shortcut
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_block, embedding_size=256):
super(ResNet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, embedding_size)
def _make_layer(self, block, out_channels, num_blocks, stride=1):
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.maxpool(output)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
output = l2_norm(output)
return output
def resnet18(embedding_size=256):
""" return a ResNet 18 object
"""
return ResNet(BasicBlock, [2, 2, 2, 2], embedding_size)
| factzero/pytorch_jaguarface_examples | recognitionFace/core/resnet.py | resnet.py | py | 3,323 | python | en | code | 2 | github-code | 13 |
4998367230 | # Build the top-open-subtitles-sentences repository
import os
import shutil
import time
import zipfile
import gzip
import re
import itertools
from collections import Counter
import pandas as pd
import requests
###############################################################################
# Settings
# languages (see valid_langcodes)
langcodes = ["af", "ar", "bg", "bn", "br", "bs", "ca", "cs", "da", "de",
"el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "gl",
"he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "ka",
"kk", "ko", "lt", "lv", "mk", "ml", "ms", "nl", "no", "pl",
"pt", "pt_br", "ro", "ru", "si", "sk", "sl", "sq", "sr",
"sv", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi",
"ze_en", "ze_zh", "zh_cn", "zh_tw"]
# type of corpus data to use as source
source_data_type = "raw" # "raw", "text", "tokenized"
# parts to run
get_source_data = True
redownload_source_data = False
get_parsed_text = True
get_sentences = True
get_words = True
get_words_using_tokenized = False
get_summary_table = False
delete_tmpfile = True
delete_source_data = True
always_keep_raw_data = True
# parsing settings (only in effect when source_data_type = "raw")
year_min = 0 # lowest: 0
year_max = 2018 # largest: 2018
# performance
download_chunk_size = 1000000
min_count = 5
lines_per_chunk = 10000000
# finetuning
original_language_only = False
one_subtitle_per_movie = False
use_regex_tokenizer = False
regex_tokenizer_pattern = "\w+|[^\w\s]+"
linestrip_pattern = " /-–\n\t\""
lowcase_cutoff = 0.08 # set to 0.5 to get words faster
md_summary_table = True
# output settings
n_top_sentences = 10000
n_top_words = 30000
###############################################################################
# Info
# Valid langcodes:
# See the variable 'valid_langcodes' below. 'languages' contains a key.
# Note that 'ze' signifies files containing dual Chinese and English subtitles.
# Storage requirements:
# With 'delete_source_data', source data of type "text" and "tokenized" is
# deleted after extracting top sentences/words. These source files are smaller
# than those for 'source_data_type' = "raw". If additionally
# 'always_keep_raw_data' is 'False', "raw" data is also deleted.
# Size of the extracted 'raw' corpus data:
# "all 62 languages": 427.6GB, "en": 54.2GB, "pt_br": 32.0GB, "pl": 29.5GB,
# "es": 27.1GB, "ro": 24.4GB, "tr": 21.8Gb
# Memory requirements:
# With raw data, langcode = "en", year_min = 0, and year_max = 2018, the corpus
# is parsed into a file of 13GB. This file is then loaded 'lines_per_chunk'
# lines at a time into a Counter (dict subclass) object which at its peak takes
# 26GB of memory. By setting 'min_count', entries with count less than that can
# be omitted to save memory, but this only happens after the whole tempfile has
# been loaded (otherwise the final counts would not be correct).
# Download time:
# Variable 'download_chunk_size' influences the download time. The default
# works well with a bandwidth of 50MB/s, bringing the download speed close to
# that. The zipped raw corpus for a large language like "en" is around 13GB
# and hence takes around 4 minutes to download at that rate.
# Runtime:
# Runtime excluding data download (on M1 MBP) with the default settings:
# "all 62 languages": 18h, "pl": 1h11min, "ar": 29min, "fr": 30min.
# Runtime is substantially faster without 'get_words', or when using another
# datatype than 'raw' via 'source_data_type' or 'get_words_using_tokenized'.
# The drawback is that this allows no control over the years and subtitle
# files to include, or the tokenization. With 'use_regex_tokenizer' a faster
# bare-bones tokenizer is always used instead of spaCy (normally it is only
# used as fallback for 'langs_not_in_spacy').
###############################################################################
# Constants etc.
valid_langcodes = ["af", "ar", "bg", "bn", "br", "bs", "ca", "cs", "da", "de",
"el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "gl",
"he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "ka",
"kk", "ko", "lt", "lv", "mk", "ml", "ms", "nl", "no", "pl",
"pt", "pt_br", "ro", "ru", "si", "sk", "sl", "sq", "sr",
"sv", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi",
"ze_en", "ze_zh", "zh_cn", "zh_tw"]
languages = {"af": "Afrikaans", "ar": "Arabic", "bg": "Bulgarian", "bn":
"Bengali", "br": "Breton", "bs": "Bosnian", "ca": "Catalan",
"cs": "Czech", "da": "Danish", "de": "German", "el": "Greek",
"en": "English", "eo": "Esperanto", "es": "Spanish", "et":
"Estonian", "eu": "Basque", "fa": "Persian", "fi": "Finnish",
"fr": "French", "gl": "Galician", "he": "Hebrew", "hi": "Hindi",
"hr": "Croatian", "hu": "Hungarian", "hy": "Armenian", "id":
"Indonesian", "is": "Icelandic", "it": "Italian", "ja":
"Japanese", "ka": "Georgian", "kk": "Kazakh", "ko": "Korean",
"lt": "Lithuanian", "lv": "Latvian", "mk": "Macedonian", "ml":
"Malayalam", "ms": "Malay", "nl": "Dutch", "no": "Norwegian",
"pl": "Polish", "pt": "Portuguese", "pt_br": "Portuguese, Brazil",
"ro": "Romanian", "ru": "Russian", "si": "Sinhala", "sk":
"Slovak", "sl": "Slovenian", "sq": "Albanian", "sr": "Serbian",
"sv": "Swedish", "ta": "Tamil", "te": "Telugu", "th": "Thai",
"tl": "Tagalog", "tr": "Turkish", "uk": "Ukrainian", "ur": "Urdu",
"vi": "Vietnamese", "ze_en": "English, ze", "ze_zh":
"Chinese, ze", "zh_cn": "Chinese", "zh_tw": "Chinese, Taiwan"}
langs_not_in_spacy = ['br', 'bs', 'eo', 'gl', 'ka', 'kk', 'ms', 'no', 'ko']
#TODO 'ko' should work but dependency not installing and config buggy
non_latin_langs = ['ar', 'bg', 'bn', 'el', 'fa', 'he', 'hi', 'hy', 'ja', 'ka',
'kk', 'ko', 'mk', 'ml', 'ru', 'si', 'ta', 'te', 'th', 'uk',
'ur', 'ze_zh', 'zh_cn', 'zh_tw']
def source_zipfile(langcode, source_data_type):
if source_data_type == "raw":
return ("https://opus.nlpl.eu/download.php?f="
+ f"OpenSubtitles/v2018/raw/{langcode}.zip")
if source_data_type == "text":
return ("https://opus.nlpl.eu/download.php?f=" +
f"OpenSubtitles/v2018/mono/OpenSubtitles.raw.{langcode}.gz")
if source_data_type == "tokenized":
return ("https://opus.nlpl.eu/download.php?f="
+ f"OpenSubtitles/v2018/mono/OpenSubtitles.{langcode}.gz")
else:
raise Exception(f"Error: {source_data_type} not a valid " +
"source_data_type.")
basedatadir = "src/data"
def rawdatadir(langcode):
return f"{basedatadir}/{langcode}/raw"
def parsedfile(langcode, source_data_type):
if source_data_type == "raw":
return f"bld/tmp/{langcode}_raw.txt"
if source_data_type == "text":
return f"src/data/{langcode}/{langcode}_text.txt"
if source_data_type == "tokenized":
return f"src/data/{langcode}/{langcode}_tokenized.txt"
def tmpfile(langcode):
return f"bld/tmp/{langcode}_raw.txt"
def sentence_outfile(langcode):
return f"bld/top_sentences/{langcode}_top_sentences.csv"
def word_outfile(langcode):
return f"bld/top_words/{langcode}_top_words.csv"
def extra_sentences_to_exclude():
return (pd.read_csv(f"src/extra_settings/extra_sentences_to_exclude.csv")
.to_dict('list'))
total_counts_sentences_file = "bld/total_counts_sentences.csv"
total_counts_words_file = "bld/total_counts_words.csv"
###############################################################################
# Functions
def download_data_and_extract(basedatadir, langcode, source_data_type):
print("Downloading data:")
if not os.path.exists(basedatadir):
os.makedirs(basedatadir)
f = download_data_file(source_zipfile(langcode, source_data_type),
basedatadir, langcode)
extension = os.path.splitext(f)[1]
if source_data_type in ["text", "tokenized"]:
with gzip.open(f, 'rb') as f_in:
with open(parsedfile(langcode, source_data_type), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
with zipfile.ZipFile(f, 'r') as zip_ref:
#zip_ref.extractall(os.path.join(basedatadir, f"{langcode}"))
zip_ref.extractall(os.path.join(basedatadir, f"{langcode}/raw"))
os.remove(f)
def download_data_file(url, basedatadir, langcode):
extension = os.path.splitext(url)[1]
local_filename = os.path.join(basedatadir, f"{langcode}{extension}")
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_length = int(r.headers.get('content-length'))
print(f" downloading {total_length/1e6:.1f} MB...")
print_period = max(round(total_length/download_chunk_size/10), 1)
download_length = 0
i = 1
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=download_chunk_size):
download_length += len(chunk)
f.write(chunk)
if i % print_period == 0:
print(f" {download_length/total_length*100:.0f}% done")
i += 1
return local_filename
def parse_rawdatadir_to_tmpfile(langcode, rawdatadir, tmpfile,
year_min, year_max):
print("Parsing data:")
if os.path.exists(tmpfile):
os.remove(tmpfile)
if not os.path.exists("bld/tmp"):
os.makedirs("bld/tmp")
global n_subfiles, n_original_info, n_matching_original
n_subfiles = 0
n_original_info = 0
n_matching_original = 0
yeardatadir = os.path.join(rawdatadir, f"OpenSubtitles/raw/{langcode}")
fout = open(tmpfile, 'a')
for ydir in os.listdir(yeardatadir):
try:
if int(ydir) >= year_min and int(ydir) <= year_max:
print(f" {ydir}")
outtext = ""
for mdir in os.listdir(os.path.join(yeardatadir, ydir)):
mdirfull = os.path.join(yeardatadir, ydir, mdir)
if os.path.isdir(mdirfull):
if one_subtitle_per_movie:
# sort to make deterministic and take last
fname = sorted([f for f in os.listdir(mdirfull)
if not f.startswith('.')])[-1]
fpathfull = os.path.join(yeardatadir, ydir,
mdir, fname)
n_subfiles += 1
outtext += parse_xmlfile(fpathfull)
else:
for fname in os.listdir(mdirfull):
if not fname.startswith('.'):
fpathfull = os.path.join(yeardatadir, ydir,
mdir, fname)
n_subfiles += 1
if original_language_only:
if check_if_original(fpathfull,
langcode):
n_matching_original += 1
outtext += parse_xmlfile(fpathfull)
else:
outtext += parse_xmlfile(fpathfull)
fout.write(outtext)
except ValueError:
pass
fout.close()
print(f" files parsed: {n_subfiles}")
if original_language_only:
print(f" {n_original_info/n_subfiles:.0%} with original info")
print(f" {n_matching_original/n_subfiles:.0%} "
+ "match original language")
def check_if_original(infile, langcode):
global n_original_info
fin = open(infile, 'r')
intext = fin.read()
fin.close()
m = re.search("<original>(.*?)</original>", intext)
if m:
n_original_info += 1
#print(f" {m.group(1)}")
if languages[langcode].split(",")[0] in m.group(1):
return True
else:
return False
else:
return False
def parse_xmlfile(infile):
fin = open(infile, 'r')
text = ""
for line in fin.readlines():
if not (line.startswith('<')):
if not (line.startswith(' ')):
text += line.strip(linestrip_pattern) + "\n"
fin.close()
return text
def parsedfile_to_top_sentences(parsedfile, outfile,
langcode, source_data_type):
print("Getting top sentences:")
# Chunking is faster once the tmpfile is too large to fit in RAM
# and only slightly slower when it fits in RAM.
# The below section takes around 5min with 'es' and all years.
if not os.path.exists("bld/top_sentences"):
os.makedirs("bld/top_sentences")
with open(parsedfile, 'br') as f:
nlines = sum(1 for i in f)
if nlines == 0:
print(f" No lines to process.")
return
else:
print(f" processing {nlines} lines...")
d = Counter()
chunks_done = 0
with open(parsedfile, 'r') as f:
for lines in itertools.zip_longest(*[f] * min(nlines, lines_per_chunk),
fillvalue=""):
if source_data_type != "raw":
lines = [l.strip(linestrip_pattern) for l in lines]
d += Counter(lines)
chunks_done += 1
print(f" {chunks_done * min(nlines, lines_per_chunk)} " +
"lines done")
# remove empty entries
d.pop(None, None)
d.pop("", None)
# remove punctuation and numbers
# remove 'sentences' starting with parenthesis
# remove 'sentences' ending with colon
# remove entries with latin characters
punctuation_and_numbers_regex = r"^[ _\W0-9]+$"
parenthesis_start_regex = r"^[(\[{]"
colon_end_regex = r":$"
pattern = "|".join([punctuation_and_numbers_regex,
parenthesis_start_regex, colon_end_regex])
if langcode in non_latin_langs:
latin_regex = "[a-zA-Zà-üÀ-Ü]"
pattern = "|".join([pattern, latin_regex])
d = Counter({k:v for (k, v) in d.items()
if (not re.search(pattern, k))})
# save total counts
if os.path.exists(total_counts_sentences_file):
total_counts = (pd.read_csv(total_counts_sentences_file)
.to_dict('records'))[0]
else:
total_counts = dict()
total_counts[langcode] = sum(d.values())
(pd.DataFrame(total_counts, index=[0])
.to_csv(total_counts_sentences_file, index=False))
# remove less common items to save memory
if not (min_count == None or min_count == 0):
d = Counter({key:value for key, value in d.items()
if value >= min_count})
# "/n" is still at the end of every sentence
if source_data_type == "raw":
d = Counter({k.strip(): v for (k, v) in d.items()})
# this takes the same time as instead running below:
# d['sentence'] = d['sentence'].str.strip()
# TODO try doing everything in a dictionary instead of using pandas
d = pd.DataFrame(d.most_common(), columns=['sentence', 'count'])
d['count'] = pd.to_numeric(d['count'],
downcast="unsigned") # saves ~50% memory
d = d.astype({"sentence":"string[pyarrow]"}) # saves ~66% memory
d = collapse_if_only_ending_differently(d, "sentence", "count")
try:
d = d[~d['sentence'].isin(extra_sentences_to_exclude()[langcode])]
except KeyError as e:
print(" no extra sentences to exclude")
(d
.head(n_top_sentences)
.to_csv(outfile, index=False))
def collapse_if_only_ending_differently(df, sentence, count):
return (df
.sort_values(by=[count], ascending=False)
.assign(Sm1=df[sentence].str.strip(" .?!¿¡"))
.groupby('Sm1', as_index=False)
.agg({sentence:'first', count:'sum'})
.drop(columns=['Sm1'])
.sort_values(by=[count], ascending=False)
.reset_index(drop=True))
def parsedfile_to_top_words(parsedfile, outfile, langcode, source_data_type):
print("Getting top words:")
if not os.path.exists("bld/top_words"):
os.makedirs("bld/top_words")
with open(parsedfile, 'br') as f:
nlines = sum(1 for i in f)
if nlines == 0:
print(f" No lines to process.")
return
else:
print(f" processing {nlines} lines...")
d = Counter()
chunks_done = 0
with open(parsedfile, 'r') as f:
for lines in itertools.zip_longest(*[f] * min(nlines, lines_per_chunk),
fillvalue=""):
d += tokenize_lines_and_count(lines, langcode)
chunks_done += 1
print(f" {chunks_done * min(nlines, lines_per_chunk)} "
+ "lines done")
# 6 min per 10,000,000 lines ("nl" has 107,000,000 lines)
# TODO parallelize
# remove empty entries
d.pop("", None)
d.pop(None, None)
# remove punctuation and numbers
# remove entries with latin characters
punctuation_and_numbers_regex = r"^[ _\W0-9]+$"
pattern = punctuation_and_numbers_regex
if langcode in non_latin_langs:
latin_regex = "[a-zA-Zà-üÀ-Ü]"
pattern = "|".join([pattern, latin_regex])
d = Counter({k:v for (k, v) in d.items() if not
re.search(pattern, k)})
# save total counts
if os.path.exists(total_counts_words_file):
total_counts = (pd.read_csv(total_counts_words_file)
.to_dict('records'))[0]
else:
total_counts = dict()
total_counts[langcode] = sum(d.values())
(pd.DataFrame(total_counts, index=[0])
.to_csv(total_counts_words_file, index=False))
# remove less common items to save memory
if not (min_count == None or min_count == 0):
d = Counter({key:value for key, value in d.items()
if value >= min_count})
d = pd.DataFrame(d.most_common(), columns=['word', 'count'])
d['count'] = pd.to_numeric(d['count'],
downcast="unsigned") # saves ~50% memory
d = d.astype({"word":"string[pyarrow]"}) # saves ~66% memory
d = collapse_case(d, "word", "count", "wordlow", lowcase_cutoff)
#TODO add more cleaning steps from google-books-ngram-frequency repo
(d
.head(n_top_words)
.to_csv(outfile, index=False))
def tokenize_lines_and_count(lines, langcode):
if source_data_type == "tokenized":
# no tokenizer needed
dt = map(lambda l: l.strip(linestrip_pattern).split(" "), lines)
elif (use_regex_tokenizer
or normalized_langcode(langcode) in langs_not_in_spacy):
# use regex tokenizer
if source_data_type == "raw":
dt = map(lambda l: re.findall(regex_tokenizer_pattern, l), lines)
elif source_data_type == "text":
dt = map(lambda l: re.findall(regex_tokenizer_pattern,
l.strip(linestrip_pattern)), lines)
else:
# use spacy tokenizer
import spacy
nlp = spacy.blank(normalized_langcode(langcode))
if source_data_type == "raw":
dt = map(lambda l: [w.text.strip("-") for w in nlp(l)], lines)
elif source_data_type == "text":
dt = map(lambda l: [w.text.strip("-") for w in
nlp(l.strip(linestrip_pattern))], lines)
return Counter(itertools.chain.from_iterable(dt))
def normalized_langcode(langcode):
if langcode == "ze_en" or langcode == "ze_zh":
return langcode.split("_")[1]
else:
return langcode.split("_")[0]
def collapse_case(df, word, count, wordlow, cutoff=0.5):
if cutoff == 0.5:
return (df
.sort_values(by=[count], ascending=False)
.assign(wordlow=df[word].str.lower())
.groupby(wordlow, as_index=False)
.agg({word:'first', count:'sum'})
.drop(columns=[wordlow])
.sort_values(by=[count], ascending=False)
.reset_index(drop=True))
else:
return (df
.assign(wordlow=df["word"].str.lower())
.groupby(wordlow, as_index=False)
.apply(wordcase_by_cutoff, word, count, wordlow, cutoff)
.drop(columns=[wordlow])
.sort_values(by=[count], ascending=False)
.reset_index(drop=True))
def wordcase_by_cutoff(df, word, count, wordlow, cutoff):
"""Return series of word case and count based on a cutoff value.
If it exists, the lowercase version of 'word' is returned as long
as its share of all words in 'df' is larger than 'cutoff'.
Else the most common version is returned.
"""
group_count = sum(df[count])
share = df[count]/group_count
is_low = df[word] == df[wordlow]
if (is_low & (share > cutoff)).any():
return pd.Series([df.loc[(is_low & (share > cutoff)).idxmax(), wordlow],
group_count], index=[word, count])
else:
return pd.Series([df.loc[share.idxmax(), word],
group_count], index=[word, count])
def run_one_langcode(langcode, source_data_type):
t0 = time.time()
check_cwd()
print("\nLanguage:", langcode)
if get_source_data:
if ((source_data_type == "raw" and
(not os.path.exists(rawdatadir(langcode))
or redownload_source_data)) or
(source_data_type != "raw" and
(not os.path.exists(parsedfile(langcode, source_data_type))
or redownload_source_data))):
download_data_and_extract(basedatadir, langcode, source_data_type)
if (get_words_using_tokenized and
source_data_type != "tokenized" and
(not os.path.exists(parsedfile(langcode, "tokenized"))
or redownload_source_data)):
download_data_and_extract(basedatadir, langcode, "tokenized")
if get_parsed_text and source_data_type == "raw":
parse_rawdatadir_to_tmpfile(langcode, rawdatadir(langcode),
tmpfile(langcode),
year_min, year_max)
if get_sentences:
parsedfile_to_top_sentences(parsedfile(langcode, source_data_type),
sentence_outfile(langcode),
langcode, source_data_type)
if get_words:
if not get_words_using_tokenized:
parsedfile_to_top_words(parsedfile(langcode, source_data_type),
word_outfile(langcode),
langcode,
source_data_type)
else:
parsedfile_to_top_words(parsedfile(langcode, "tokenized"),
word_outfile(langcode),
langcode,
"tokenized")
if delete_tmpfile:
if os.path.exists(tmpfile(langcode)):
os.remove(tmpfile(langcode))
if not os.listdir("bld/tmp"):
os.rmdir(f"bld/tmp")
if delete_source_data:
if source_data_type == "raw" and not always_keep_raw_data:
if os.path.exists(rawdatadir(langcode)):
shutil.rmtree(rawdatadir(langcode))
if source_data_type == "text" or source_data_type == "tokenized":
if os.path.exists(parsedfile(langcode, source_data_type)):
os.remove(parsedfile(langcode, source_data_type))
if get_words_using_tokenized:
if os.path.exists(parsedfile(langcode, "tokenized")):
os.remove(parsedfile(langcode, "tokenized"))
if os.path.exists(f"basedatadir/{langcode}"):
if not os.listdir(f"basedatadir/{langcode}"):
os.rmdir(f"basedatadir/{langcode}")
t1 = time.time()
print(f"Total time (s): {t1-t0:.1f}\n")
def check_cwd():
if not os.path.isfile('src/top_open_subtitles_sentences.py'):
print("Warning: 'src/top_open_subtitles_sentences.py' not found "
+ "where expected. Trying to switch to parent directory.")
os.chdir("..")
if not os.path.isfile('src/top_open_subtitles_sentences.py'):
raise Exception("Error: Working directory is not the repository "
+ "base directory. "
+ "'src/top_open_subtitles_sentences.py' "
+ "not found.")
def check_langcodes():
for langcode in langcodes:
if langcode not in valid_langcodes:
raise Exception(f"Error: Not a valid langcode: {langcode}")
def summary_table():
sc = pd.read_csv(total_counts_sentences_file)
wc = pd.read_csv(total_counts_words_file)
if md_summary_table:
(pd.DataFrame({"code": langcodes})
.assign(language=[languages[l] for l in st['code']])
.assign(sentences=[f"[{sc[l][0]:,}]({sentence_outfile(l)})"
for l in st['code']])
.assign(words=[f"[{wc[l][0]:,}]({word_outfile(l)})"
for l in st['code']])
.to_markdown("bld/summary_table.md", index=False,
colalign=["left", "left", "right", "right"]))
else:
(pd.DataFrame({"code": langcodes})
.assign(language=[languages[l] for l in st['code']])
.assign(sentences=[sc[l][0] for l in st['code']])
.assign(words=[wc[l][0] for l in st['code']])
.to_csv("bld/summary_table.csv", index=False))
def main():
check_langcodes()
check_cwd()
if os.path.exists(total_counts_sentences_file):
os.remove(total_counts_sentences_file)
if os.path.exists(total_counts_words_file):
os.remove(total_counts_words_file)
for langcode in langcodes:
run_one_langcode(langcode, source_data_type)
if get_summary_table:
summary_table()
###############################################################################
# Run
if __name__ == "__main__":
main()
| orgtre/top-open-subtitles-sentences | src/top_open_subtitles_sentences.py | top_open_subtitles_sentences.py | py | 26,644 | python | en | code | 10 | github-code | 13 |
19905709517 | class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
#
nums.sort()
#O(n)time O(1)space
#trace index
i = j = 0
for k in xrange(len(nums)):
v = nums[k]
nums[k] = 2
if v < 2:
nums[j] = 1
j += 1
if v == 0:
nums[i] = 0
i += 1
s = Solution()
a = s.uniquePathsWithObstacles([[0,0,0],[0,1,0],[0,0,0]])
print(a) | littleliona/leetcode | medium/75.sort_colors.py | 75.sort_colors.py | py | 597 | python | en | code | 0 | github-code | 13 |
40698551405 | #!/usr/bin/python
# -*- coding:utf-8 -*-
# python3环境
# 界面演示示例实现 安装rancher基础环境
import os
import json
import time
import shutil
import re
# import io
import sys
# reload(sys)
# sys.setdefaultencoding("utf-8")
# 使用linux系统交互输入信息时,
# 会出现backspace无法删除乱码的情况;
# 导入readline模块可以消除这种乱码情况。
# 需要取消注释即可
# import readline
# 选择项
xuanze = '''对应编号:
------------部署准备------------
1. 机器性能调优(谨慎使用,勿重复使用)
2. 老集群删除
------------部署集群------------
3. docker安装
4. rancher服务端部署
5. 待定
6. 待定
7. 安装harbor
--------------其它--------------
h. 显示帮助help
q. quit/退出选项'''
KernelEdit = '''
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward=1
net.ipv4.conf.all.forwarding=1
net.ipv4.neigh.default.gc_thresh1=4096
net.ipv4.neigh.default.gc_thresh2=6144
net.ipv4.neigh.default.gc_thresh3=8192
net.ipv4.neigh.default.gc_interval=60
net.ipv4.neigh.default.gc_stale_time=120
# 参考 https://github.com/prometheus/node_exporter#disabled-by-default
kernel.perf_event_paranoid=-1
#sysctls for k8s node config
net.ipv4.tcp_slow_start_after_idle=0
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.softlockup_all_cpu_backtrace=1
kernel.softlockup_panic=1
fs.file-max=2097152
fs.inotify.max_user_instances=8192
fs.inotify.max_queued_events=16384
vm.max_map_count=262144
fs.may_detach_mounts=1
net.core.netdev_max_backlog=16384
net.ipv4.tcp_wmem=4096 12582912 16777216
net.core.wmem_max=16777216
net.core.somaxconn=32768
net.ipv4.ip_forward=1
net.ipv4.tcp_max_syn_backlog=8096
net.ipv4.tcp_rmem=4096 12582912 16777216
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
kernel.yama.ptrace_scope=0
vm.swappiness=0
# 可以控制core文件的文件名中是否添加pid作为扩展。
kernel.core_uses_pid=1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route=0
net.ipv4.conf.all.accept_source_route=0
# Promote secondary addresses when the primary address is removed
net.ipv4.conf.default.promote_secondaries=1
net.ipv4.conf.all.promote_secondaries=1
# Enable hard and soft link protection
fs.protected_hardlinks=1
fs.protected_symlinks=1
# 源路由验证
# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets=5000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_fin_timeout=30
net.ipv4.tcp_synack_retries=2
kernel.sysrq=1'''
# COMMON通用方法函数集合
class CommonFun():
def __init__(self):
pass
# 遍历文件目录,获取路径下所有文件
def getfilelist(self, filepath):
filelist = os.listdir(filepath)
files = []
for i in range(len(filelist)):
child = os.path.join('%s/%s' % (filepath, filelist[i]))
if os.path.isdir(child):
files.extend(self.getfilelist(child))
else:
files.append(child)
return files
# sed 替换功能函数
def alter(self, file, old_str, new_str):
"""
替换文件中的字符串
:param file:文件名
:param old_str:旧字符串
:param new_str:新字符串
:return:
"""
file_data = ""
with open(file, "r") as f:
for line in f:
if old_str in line:
line = line.replace(old_str,new_str)
file_data += line
with open(file,"w") as f:
f.write(file_data)
# 使用的选项菜单事项类
class Things():
def __init__(self, username='none'):
self.username = username
self.commonfun = CommonFun()
# 内核性能调优
def KernelPerformanceTuning(self):
print("KernelPerformanceTuning Begin ...")
print("内核参数调优,请不要重复执行!!!!")
time.sleep(1)
print("1、>>>>>>>>>>> Begin sysctl.conf...")
cmd_sysctl = "echo " + '''"{}"'''.format(KernelEdit) + " >> /etc/sysctl.conf"
cmd_sysctl_ok = "sysctl -p"
print(cmd_sysctl.decode("utf-8"))
print(cmd_sysctl_ok)
try:
# os.popen(cmd_sysctl).read()
# os.popen(cmd_sysctl_ok).read()
print("sysctl.conf is OK")
except Exception as e:
print("sysctl error: {}".format(e))
print("2、>>>>>>>>>>> Begin limits.conf...")
cmd_limits = '''cat >> /etc/security/limits.conf <<EOF
* soft nofile 65535
* hard nofile 65536
EOF'''
print(cmd_limits)
try:
# os.popen(cmd_limits).read()
print("limits.conf is OK")
except Exception as e:
print(e)
print("3、>>>>>>>>>>> Begin selinux disable...")
try:
with open(r"/etc/selinux/config", "r") as f:
readfile = f.read()
if "SELINUX=disabled" in readfile:
print("selinux is disabled before.")
else:
# self.commonfun.alter("/etc/selinux/config","SELINUX=enforcing","SELINUX=disabled")
# os.popen("setenforce 0").read()
print("selinux disable success")
except Exception as e:
print("error: {}".format(e))
print("4、>>>>>>>>>>> Begin firewalld stop...")
cmd_firewalld_stop = "systemctl stop firewalld && systemctl disable firewalld"
try:
# os.popen(cmd_firewalld_stop).read()
print("firewalld stop success")
except Exception as e:
print(e)
print("5、>>>>>>>>>>> Begin NetworkManager stop...")
cmd_networkmanager_stop = "systemctl stop NetworkManager && systemctl disable NetworkManager"
try:
# os.popen(cmd_networkmanager_stop).read()
print("NetworkManager stop success")
except Exception as e:
print(e)
print(">>>>>>>>>>> Begin base yum install...")
def YumInstallBase():
actions = input("是否yum安装基础组件(Y/N):>>> ");
if actions == "N" or actions == "n":
print("exit to Menu ing...")
time.sleep(2)
Menu().run()
elif actions == "Y" or actions == "y":
cmd_yum_install = "yum install -y ntp ntpdate nfs-utils lrzsz zip unzip sshpass dnsmasq vim gcc c++ net-tools cmake lsof"
print(cmd_yum_install)
try:
# os.popen(cmd_yum_install).read()
print("yum base-tools success")
except Exception as e:
print(e)
else:
print("Y/N IN again")
YumInstallBase()
YumInstallBase()
print("KernelPerformanceTuning success, To Menu ing...")
time.sleep(1)
Menu().run()
print("reboot??")
# 老集群删除操作
def OldKubernetesClusterDel(self):
try:
actions = input("请输入操作: ");
if actions == "h":
Menu().run()
except Exception as e:
print("请确保有项目目录; Error: {}".format(e))
# DockerInstallOK
def DockerInstallOK(self):
print(">>>>>>>>>>> Begin install Docker...")
cmd_docker_conf = '''cat <<EOF > /etc/docker/daemon.json
{
"registry-mirrors": ["https://7h0c2lco.mirror.aliyuncs.com"],
"graph": "/data/docker",
"insecure-registries": ["xxxx"],
"selinux-enabled": false,
"log-driver": "json-file",
"log-opts": {
"max-size": "30m",
"max-file": "10"
}
}
EOF
'''
try:
print("输入 h 返回菜单Menu\n 输入 q 退出程序")
actions = input("安装Docker版本(default: 18.06.1):>>> ");
if actions == "h":
Menu().run()
elif actions == "q":
Menu().quit()
elif actions == "":
commands = ["yum install -y yum-utils device-mapper-persistent-data lvm2",
"yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
"yum install -y docker-ce-18.06.1 --skip-broken",
"systemctl enable docker",
"systemctl start docker",
"{}".format(cmd_docker_conf),
"systemctl restart docker"]
for i in commands:
print("begin command: {}".format(i))
# os.popen(i).read()
elif re.findall("\d.\d",actions):
commands = ["yum install -y yum-utils device-mapper-persistent-data lvm2",
"yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo",
"yum install -y docker-ce-{} --skip-broken".format(actions),
"systemctl enable docker",
"systemctl start docker",
"{}".format(cmd_docker_conf),
"systemctl restart docker"]
for i in commands:
print("begin command: {}".format(i))
# os.popen(i).read()
else:
print("input err. please input again...")
self.DockerInstallOK()
print("Docker Install success, To Menu ing...")
time.sleep(1)
Menu().run()
except Exception as e:
print("Output Error: {}".format(e))
# print e
# RancherInstallOK
def RancherInstallOK(self):
print(">>>>>>>>>>> Begin install Rancher...")
print("请在部署Rancher主机上运行安装..")
try:
print("输入 h 返回菜单Menu\n 输入 q 退出程序")
actions = input("是否安装Rancher服务端(Y/N):>>> ");
if actions == "h":
Menu().run()
elif actions == "q":
Menu().quit()
elif actions == "Y" or actions == "y":
print("请确认映射路径正确!!!")
PathDir = input("Rancher映射路径(/data/rancher/):>>> ")
command = '''docker run -d --restart=unless-stopped \
-p 80:80 -p 443:443 \
-v {}:/var/lib/rancher/ \
-v /root/var/log/auditlog:/var/log/auditlog \
-e CATTLE_SYSTEM_CATALOG=bundled \
-e AUDIT_LEVEL=3 \
rancher/rancher:2.4.0'''.format(PathDir)
print("############################################")
print("{}".format(command))
print("############################################")
if input("请确认映射路径正确!!??正确请按ENTER回车>>>") == "":
print("command: {}".format(command))
# os.popen(command)
else:
print("input err. please input again...")
self.RancherInstallOK()
elif actions == "N" or actions == "n":
print("exit to Menu ing...")
time.sleep(1)
Menu().run()
else:
print("input err. please input again...")
self.RancherInstallOK()
print("Rancher Install success, To Menu ing...")
time.sleep(1)
Menu().run()
except Exception as e:
print("Error: {}".format(e))
# Rancher Api create resource
def RancherToAPIApplyResource(self):
pass
# 菜单menu界面化
class Menu():
def __init__(self):
self.thing = Things()
self.choices = {
"1": self.thing.KernelPerformanceTuning,
"2": self.thing.OldKubernetesClusterDel,
"3": self.thing.DockerInstallOK,
"4": self.thing.RancherInstallOK,
"q": self.quit
}
def display_menu(self):
# linux命令 clear
# windows CMD调试请使用 cls
os.system('clear')
# os.system('cls')
# 解码 utf-8 适用于windows调试
# python2 >>> .decode("utf-8")
print(xuanze)
def run(self):
while True:
self.display_menu()
try:
choice = input("Enter an option >>> ")
except Exception as e:
print("Please input a valid option!");
continue
choice = str(choice).strip()
action = self.choices.get(choice)
if action:
action()
stopword = "h"
print(">>> ",end="")
for line in iter(input, stopword): # 输入为"h",表示输入结束
if line == "q":
self.quit()
print(">>> ",end="")
else:
print("{0} is not a valid choice".format(choice))
#time.sleep(1)
def quit(self):
print("--------------\nThank you for using this script!\n--------------")
sys.exit(0)
if __name__ == '__main__':
Menu().run() | sheldon-lu/Python_all | small_tools/terminal_GUI/baseEnv.py | baseEnv.py | py | 13,641 | python | en | code | 1 | github-code | 13 |
38197941046 | #!/usr/bin/env python
# coding: utf-8
# # shivani tiwari 03
# In[ ]:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# In[5]:
df=pd.read_csv('D:/shivani tiwari/bml/placement.csv')
# In[6]:
df.head()
plt.scatter(df['cgpa'],df['package'])
# In[17]:
import seaborn as sns
corr=df.corr()
# In[18]:
sns.heatmap(corr,annot=True)
# In[19]:
x=df['cgpa']
y=df['package']
# In[24]:
x=df.iloc[:,0:1]
y=df.iloc[:,-1]
# In[25]:
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test =train_test_split(x,y,test_size=0.2,random_state=2)
# In[26]:
from sklearn.linear_model import LinearRegression
# In[27]:
lr= LinearRegression()
# In[29]:
lr.fit(x_train,y_train)
lr.predict(x_test.iloc[0].values.reshape(1,1))
# In[30]:
plt.scatter(df['cgpa'],df['package'])
plt.xlabel('cgpa')
plt.ylabel('package')
# In[31]:
m=lr.coef_
m
# In[32]:
b=lr.intercept_
b
# In[33]:
#y=mx+b
m*8.58+b
# In[34]:
m*9.5+b
# In[35]:
m*100+b
# In[ ]:
| shivani926/ML | practical2.py | practical2.py | py | 1,033 | python | en | code | 0 | github-code | 13 |
41642418795 | # Method: Use Dynamic Programming (2D) with 1 extra row and col
# TC: O(n * m)
# SC: O(n * m)
from typing import List
class Solution:
def maxDotProduct(self, nums1: List[int], nums2: List[int]) -> int:
n, m = len(nums1), len(nums2)
dp = [[float('-inf')] * (m + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, m + 1):
curr_prod = nums1[i-1] * nums2[j-1]
# Max of dp[i][j], curr_prod, left, above, and (curr_prod + aboveleft)
dp[i][j] = max(dp[i][j], curr_prod, dp[i-1][j],
dp[i][j-1], curr_prod + dp[i-1][j-1])
return dp[n][m]
| ibatulanandjp/Leetcode | #1458_MaxDotProductOfTwoSubsequences/solution1.py | solution1.py | py | 696 | python | en | code | 1 | github-code | 13 |
21278163650 | import os
import sys
import glob
import shutil
import numpy as np
import scipy as sp
from scipy import *
import scipy.spatial
import numpy.linalg as LA
from numpy import cross, eye, dot
from scipy.linalg import expm, norm
import pandas as pd
import itertools
import re
import time
import argparse
from Bio.PDB import *
pdb_parser = PDBParser()
parser = argparse.ArgumentParser(description='Program')
parser.add_argument('-a', '--Analysis', action='store', type=str, required=False,
default='PolymerDssp', help='Name of the analysis you want to perform on the data, can specify multiple options in camelcase: Polymer / Dssp')
parser.add_argument('-i', '--InputStructures', action='store', type=str, required=False,
help='Structure name or search name for import of structures')
parser.add_argument('-i2', '--InputStructures2', action='store', type=str, required=False,
help='Structure name or search name for comparison structures, such as wild-type')
parser.add_argument('-o', '--OutputName', action='store', type=str, required=False,
help='Prefix of outputs')
parser.add_argument('-contact', '--ContactDistance', action='store', type=float, required=False,
default=10.0, help='Distance cutoff for assigning and interresidue contact')
parser.add_argument('-residue', '--FocalResidue', action='store', type=int, required=False,
help='Specific residue of interest for performing additional analyses')
parser.add_argument('-domain', '--FocalDomain', action='store', type=str, required=False,
help='Specific domain of interest for performing additional analyses identified as start_residue,end_residue')
parser.add_argument('-fold', '--FoldRef', action='store', type=int, required=False,
help='Single reference structure for counting number of similar contacts')
args = parser.parse_args()
aa_acc_max = { \
'A': 129.0, 'R': 274.0, 'N': 195.0, 'D': 193.0,\
'C': 167.0, 'Q': 225.0, 'E': 223.0, 'G': 104.0,\
'H': 224.0, 'I': 197.0, 'L': 201.0, 'K': 236.0,\
'M': 224.0, 'F': 240.0, 'P': 159.0, 'S': 155.0,\
'T': 172.0, 'W': 285.0, 'Y': 263.0, 'V': 174.0}
def collectPDBCoords(pdb_file):
pdb_coords = []
pdb_lines = filter(lambda s: ' CA ' in s, open(pdb_file).readlines())
for line in pdb_lines:
if 'HETATOM' in line:
line_info = line.split()
pdb_coords.append([float(line_info[6]), float(line_info[7]), float(line_info[8])])
elif 'ATOM' in line:
pdb_coords.append([float(line[30:38]), float(line[39:46]), float(line[47:55])])
return pdb_coords
''' open_files = open(pdb_file)
pdb_lines = open_file.readlines()
pdb_coords = []
for line in pdb_lines:
if 'ATOM' in line or 'HET' in line:
line_info = line.split()
if line_info[2] == 'CA':
pdb_coords.append([float(line_info[6]), float(line_info[7]), float(line_info[8])])'''
def computeDistanceContactInfo(pdb_coords):
n_residues = len(pdb_coords)
distance_map = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(pdb_coords))
contact_map = where(distance_map < args.ContactDistance, ones_like(distance_map), zeros_like(distance_map))
contact_number = np.sum(contact_map)
rad_gyr = np.sqrt(np.sum(distance_map**2)/(2*n_residues**2))
return distance_map, contact_map, contact_number, rad_gyr, n_residues
''' distance_map = np.zeros([len(pdb_coords),len(pdb_coords)])
contact_map = np.zeros([len(pdb_coords),len(pdb_coords)])
contact_number = 0
for pdb_coord_idx_1, pdb_coord_item_1 in enumerate(pdb_coords):
for pdb_coord_idx_2, pdb_coord_item_2 in enumerate(pdb_coords):
interresidue_distance = np.linalg.norm(np.array(pdb_coord_item_1) - np.array(pdb_coord_item_2))
distance_map[pdb_coord_idx_1][pdb_coord_idx_2] = interresidue_distance
if interresidue_distance < contact_distance:
contact_map[pdb_coord_idx_1][pdb_coord_idx_2] = 1.0
contact_number += 1'''
def computePolymerInfo(input_structure_list):
distance_map_set = []
contact_map_set = []
contact_number_set = []
radii_gyr = []
end_to_end_dist = []
for structure in input_structure_list:
structure_coords = collectPDBCoords(structure)
distance_map,contact_map,contact_number,rad_gyr,n_residues = computeDistanceContactInfo(structure_coords)
distance_map_set.append(distance_map); contact_map_set.append(contact_map); contact_number_set.append(contact_number); radii_gyr.append(rad_gyr); end_to_end_dist.append(distance_map[0][0])
avg_distance_map = np.average(distance_map_set, axis=0)
std_distance_map = np.std(distance_map_set, axis=0)
avg_contact_map = np.average(contact_map_set, axis=0)
avg_distance_scaling = []
std_distance_scaling = []
avg_contact_scaling = []
for residue_offset in range(n_residues):
avg_distance_scaling.append([residue_offset,np.average(diagonal(avg_distance_map, residue_offset))])
std_distance_scaling.append([residue_offset,np.sqrt(np.average(diagonal(np.square(std_distance_map), residue_offset)))])
avg_contact_scaling.append([residue_offset,np.average(diagonal(avg_contact_map, residue_offset))])
return avg_distance_map, std_distance_map, avg_distance_scaling, std_distance_scaling, avg_contact_scaling, avg_contact_map, contact_number_set, radii_gyr, end_to_end_dist
def residuePolymerInfo(avg_distance_map, std_distance_map, avg_contact_map):
up_avg_distance_scaling = []; down_avg_distance_scaling = []; up_std_distance_scaling = []; down_std_distance_scaling = []
up_contact_scaling = []; down_contact_scaling = []
for residue_offset in range(args.FocalResidue):
up_avg_distance_scaling.append([residue_offset,np.average(diagonal(avg_distance_map[:args.FocalResidue,:args.FocalResidue], residue_offset))])
up_std_distance_scaling.append([residue_offset,np.sqrt(np.average(diagonal(np.square(std_distance_map[:args.FocalResidue,:args.FocalResidue]), residue_offset)))])
up_contact_scaling.append([residue_offset,np.average(diagonal(avg_contact_map[:args.FocalResidue,:args.FocalResidue], residue_offset))])
for residue_offset in range(args.FocalResidue,len(avg_distance_map),1):
down_avg_distance_scaling.append([residue_offset-args.FocalResidue,np.average(diagonal(avg_distance_map[args.FocalResidue:,args.FocalResidue:], residue_offset-args.FocalResidue))])
down_std_distance_scaling.append([residue_offset-args.FocalResidue,np.sqrt(np.sum(diagonal(np.square(std_distance_map[args.FocalResidue:,args.FocalResidue:]), residue_offset-args.FocalResidue)))])
down_contact_scaling.append([residue_offset-args.FocalResidue,np.average(diagonal(avg_contact_map[args.FocalResidue:,args.FocalResidue:], residue_offset-args.FocalResidue))])
return up_avg_distance_scaling, down_avg_distance_scaling, up_std_distance_scaling, down_std_distance_scaling, up_contact_scaling, down_contact_scaling
def domainPolymerInfo(avg_distance_map, std_distance_map, avg_contact_map):
domain_start, domain_end = args.FocalDomain.split(',')
domain_start = int(domain_start); domain_end = int(domain_end)
up_avg_distance_scaling = []; down_avg_distance_scaling = []; up_std_distance_scaling = []; down_std_distance_scaling = []
up_contact_scaling = []; down_contact_scaling = []
for residue_offset in range(domain_start):
up_avg_distance_scaling.append([residue_offset,np.average(diagonal(avg_distance_map[:domain_start,:domain_start], residue_offset))])
up_std_distance_scaling.append([residue_offset,np.sqrt(np.average(diagonal(np.square(std_distance_map[:domain_start,:domain_start]), residue_offset)))])
up_contact_scaling.append([residue_offset,np.average(diagonal(avg_contact_map[:domain_start,:domain_start], residue_offset))])
for residue_offset in range(domain_end,len(avg_distance_map),1):
down_avg_distance_scaling.append([residue_offset-domain_end,np.average(diagonal(avg_distance_map[domain_end:,domain_end:], residue_offset-domain_end))])
down_std_distance_scaling.append([residue_offset-domain_end,np.sqrt(np.average(diagonal(np.square(std_distance_map[domain_end:,domain_end:]), residue_offset-domain_end)))])
down_contact_scaling.append([residue_offset-domain_end,np.average(diagonal(avg_contact_map[domain_end:,domain_end:], residue_offset-domain_end))])
return up_avg_distance_scaling, down_avg_distance_scaling, up_std_distance_scaling, down_std_distance_scaling, up_contact_scaling, down_contact_scaling
def contactFoldMatching(input_structure_list, avg_contact_map):
ref_struct_coods = collectPDBCoords(args.FoldRef)
ref_distance_map,ref_contact_map,ref_contact_number,ref_rad_gyr,ref_n_residues = conputeDistanceContactInfo(ref_structure_coords)
matching_contact_prob = ref_contact_map * avg_contact_map
matching_contact_number = (np.sum(matching_contact_prob)/2)*len(input_structure_list)
return matching_contact_prob, matching_contact_number
def computeDSSPInfo(input_structure_list):
struct = pdb_parser.get_structure('test', input_structure_list[0])
n_residues = 0
for residue in struct.get_residues():
n_residues += 1
percent_ss2 = np.zeros([n_residues, 3])
phi_list = []
psi_list = []
TASA_list = []
for structure in input_structure_list:
struct = pdb_parser.get_structure('test', structure)
model = struct[0]
struct_TASA_list = []
dssp = DSSP(model, structure)
for resi_dssp_idx, resi_dssp in enumerate(dssp):
dssp_idx = resi_dssp[0]
AA = resi_dssp[1]
SS = resi_dssp[2]
if SS == 'E' or SS == 'B':
percent_ss2[resi_dssp_idx][1] += 1
elif SS == 'H' or SS == 'G' or SS == 'I':
percent_ss2[resi_dssp_idx][0] += 1
else:
percent_ss2[resi_dssp_idx][2] += 1
RASA = resi_dssp[3]
TASA = RASA*aa_acc_max[AA]
struct_TASA_list.append(TASA)
Phi = resi_dssp[4]
phi_list.append(Phi)
Psi = resi_dssp[5]
psi_list.append(Psi)
TASA_list.append(sum(struct_TASA_list))
percent_ss2 /= len(input_structure_list)
phi_psi_hist, phi_psi_hist_x, phi_psi_hist_y = np.histogram2d(phi_list, psi_list, bins=121, range=[[-180,180], [-180,180]])
phi_psi_hist /= (n_residues*len(input_structure_list))
return percent_ss2, phi_psi_hist, TASA_list, n_residues
def computeSizeLandscape(input_structure_list, radii_gyr, TASA_list):
size_landscape, size_landscape_x, size_landscape_y = np.histogram2d(radii_gyr, TASA_list, bins=101, range=[[min(radii_gyr)-5,max(radii_gyr)+5], [min(TASA_list)-100,max(TASA_list)+100]])
size_landscape /= float(len(input_structure_list))
return size_landscape, size_landscape_x, size_landscape_y
##### Running
## Importing and Analyzing a Single Ensemble
ins = str(args.InputStructures) + '*.pdb'
input_structure_list = glob.glob(ins)
if 'Polymer' in args.Analysis:
avg_distance_map, std_distance_map, average_distance_scaling, std_distance_scaling, avg_contact_scaling, avg_contact_map, contact_number_set, radii_gyr, end_to_end_dist = computePolymerInfo(input_structure_list)
if 'Dssp' in args.Analysis:
percent_ss2, phi_psi_hist, TASA_list, n_residues = computeDSSPInfo(input_structure_list)
if 'Dssp' in args.Analysis and 'Polymer' in args.Analysis:
size_landscape, size_landscape_x, size_landscape_y = computeSizeLandscape(input_structure_list, radii_gyr, TASA_list)
## Performing Analysis against a Comparative Ensemble
if args.InputStructures2:
ins_2 = str(args.InputStructures2) + '*.pdb'
input_structure_list_2 = glob.glob(ins_2)
if 'Polymer' in args.Analysis:
avg_distance_map_2, std_distance_map_2, average_distance_scaling_2, std_distance_scaling_2, avg_contact_scaling_2, avg_contact_map_2, contact_number_set_2, radii_gyr_2, end_to_end_dist_2 = computePolymerInfo(input_structure_list_2)
diff_distance_map = avg_distance_map - avg_distance_map_2
diff_contact_map = avg_contact_map - avg_contact_map_2
if 'Dssp' in args.Analysis:
percent_ss2_2, phi_psi_hist_2, TASA_list_2, n_residues_2 = computeDSSPInfo(input_structure_list_2)
diff_phi_psi_hist = phi_psi_hist - phi_psi_hist_2
diff_ss2 = percent_ss2 - percent_ss2_2
if 'Dssp' in args.Analysis and 'Polymer' in args.Analysis:
size_landscape_2, size_landscape_x_2, size_landscape_y_2 = computeSizeLandscape(input_structure_list_2, radii_gyr_2, TASA_list_2)
## Performing Analysis for a specific focal residue
if args.FocalResidue:
if 'Polymer' in args.Analysis:
up_avg_distance_scaling, down_avg_distance_scaling, up_std_distance_scaling, down_std_distance_scaling, up_contact_scaling, down_contact_scaling = residuePolymerInfo(avg_distance_map, std_distance_map, avg_contact_map)
if args.InputStructures2:
up_avg_distance_scaling_2, down_avg_distance_scaling_2, up_std_distance_scaling_2, down_std_distance_scaling_2, up_contact_scaling_2, down_contact_scaling_2 = residuePolymerInfo(avg_distance_map_2, std_distance_map_2, avg_contact_map_2)
## Performing Analysis for a specific focal domain
if args.FocalDomain:
if 'Polymer' in args.Analysis:
up_avg_distance_scaling, down_avg_distance_scaling, up_std_distance_scaling, down_std_distance_scaling, up_contact_scaling, down_contact_scaling = domainPolymerInfo(avg_distance_map, std_distance_map, avg_contact_map)
if args.InputStructures2:
up_avg_distance_scaling_2, down_avg_distance_scaling_2, up_std_distance_scaling_2, down_std_distance_scaling_2, up_contact_scaling_2, down_contact_scaling_2 = domainPolymerInfo(avg_distance_map_2, std_distance_map_2, avg_contact_map_2)
## Performing Analysis for a specific contact set based on a single structure
if args.FoldRef:
if 'Polymer' in args.Analysis:
matching_contact_prob, matching_contact_number = contactFoldMatching(input_structure_list, avg_contact_map)
if args.InputStructures2:
matching_contact_prob_2, matching_contact_number_2 = contactFoldMatching(input_structure_list_2, avg_contact_map_2)
## Final analyses
if 'Polymer' in args.Analysis:
rg_hist, rg_hist_axis = np.histogram(radii_gyr, bins=50, range=[0,100])
if args.InputStructures2:
rg_hist_2, rg_hist_axis_2 = np.histogram(radii_gyr_2, bins=50, range=[0,100])
if 'Dssp' in args.Analysis:
TASA_hist, TASA_hist_axis = np.histogram(TASA_list, bins=50, range=[min(TASA_list),max(TASA_list)])
if args.InputStructures2:
TASA_hist_2, TASA_hist_axis_2 = np.histogram(TASA_list_2, bins=50, range=[min(TASA_list_2),max(TASA_list_2)])
## Writing the Outputs
if 'Polymer' in args.Analysis:
if args.OutputName:
np.savetxt(args.OutputName + '_Polymer_Avg_Distance_Map.txt', avg_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Std_Distance_Map.txt', std_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Avg_Distance_Scaling.txt', average_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Std_Distance_Scaling.txt', std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Avg_Contact_Scaling.txt', avg_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Avg_Contact_Map.txt', avg_contact_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Contacts_Per_Structure.txt', contact_number_set, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Rg_Histogram.txt', rg_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Polymer_Rg_Histogram_Axis.txt', rg_hist_axis, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Polymer_Avg_Distance_Map.txt', avg_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Std_Distance_Map.txt', std_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Avg_Distance_Scaling.txt', average_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Std_Distance_Scaling.txt', std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Avg_Contact_Scaling.txt', avg_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Avg_Contact_Map.txt', avg_contact_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Contacts_Per_Structure.txt', contact_number_set, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Rg_Histogram.txt', rg_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Polymer_Rg_Histogram_Axis.txt', rg_hist_axis, fmt='%s', delimiter=' ', newline='\n')
if 'Dssp' in args.Analysis:
if args.OutputName:
np.savetxt(args.OutputName + '_Dssp_Percent_SS2.txt', percent_ss2, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Dssp_Phi_Psi_Histogram.txt', phi_psi_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Dssp_TASA_Histogram.txt', TASA_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Dssp_TASA_Histogram_Axis.txt', TASA_hist_axis, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Dssp_Percent_SS2.txt', percent_ss2, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Dssp_Phi_Psi_Histogram.txt', phi_psi_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Dssp_TASA_Histogram.txt', TASA_hist, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Dssp_TASA_Histogram_Axis.txt', TASA_hist_axis, fmt='%s', delimiter=' ', newline='\n')
if 'Dssp' in args.Analysis and 'Dssp' in args.Analysis:
if args.OutputName:
np.savetxt(args.OutputName + '_Size_Landscape.txt', size_landscape, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Size_Landscape_X.txt', size_landscape_x, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Size_Landscape_Y.txt', size_landscape_y, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Size_Landscape.txt', size_landscape, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Size_Landscape_X.txt', size_landscape_x, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Size_Landscape_Y.txt', size_landscape_y, fmt='%s', delimiter=' ', newline='\n')
if args.FocalDomain:
if args.OutputName:
np.savetxt(args.OutputName + '_Domain_Up_Distance_Avg_Scaling.txt', up_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Domain_Down_Distance_Avg_Scaling.txt', down_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Domain_Up_Distance_Std_Scaling.txt', up_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Domain_Down_Distance_Std_Scaling.txt', down_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Domain_Up_Contact_Scaling.txt', up_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Domain_Down_Contact_Scaling.txt', down_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Domain_Up_Distance_Avg_Scaling.txt', up_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Domain_Down_Distance_Avg_Scaling.txt', down_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Domain_Up_Distance_Std_Scaling.txt', up_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Domain_Down_Distance_Std_Scaling.txt', down_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Domain_Up_Contact_Scaling.txt', up_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Domain_Down_Contact_Scaling.txt', down_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
if args.FocalResidue:
if args.OutputName:
np.savetxt(args.OutputName + '_Residue_Up_Distance_Avg_Scaling.txt', up_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Residue_Down_Distance_Avg_Scaling.txt', down_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Residue_Up_Distance_Std_Scaling.txt', up_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Residue_Down_Distance_Std_Scaling.txt', down_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Residue_Up_Contact_Scaling.txt', up_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Residue_Down_Contact_Scaling.txt', down_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Residue_Up_Distance_Avg_Scaling.txt', up_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Residue_Down_Distance_Avg_Scaling.txt', down_avg_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Residue_Up_Distance_Std_Scaling.txt', up_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Residue_Down_Distance_Std_Scaling.txt', down_std_distance_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Residue_Up_Contact_Scaling.txt', up_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Residue_Down_Contact_Scaling.txt', down_contact_scaling, fmt='%s', delimiter=' ', newline='\n')
if args.InputStructures2:
if args.OutputName:
np.savetxt(args.OutputName + '_Difference_Distance_Map.txt', diff_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Difference_Contact_Map.txt', diff_contact_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt(args.OutputName + '_Difference_SS2.txt', diff_ss2, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Difference_Distance_Map.txt', diff_distance_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Difference_Contact_Map.txt', diff_contact_map, fmt='%s', delimiter=' ', newline='\n')
np.savetxt('Difference_SS2.txt', diff_ss2, fmt='%s', delimiter=' ', newline='\n')
if args.FoldRef:
if args.OutputName:
np.savetxt(args.OutputName + '_Matched_Contact_Probability_Map.txt', matching_contact_prob, fmt='%s', delimiter=' ', newline='\n')
else:
np.savetxt('Matched_Contact_Probability_Map.txt', matching_contact_prob, fmt='%s', delimiter=' ', newline='\n')
''' size_landscape_x, size_landscape_y
np.savetxt('Polymer_Landscape.txt', end_to_end_dist, fmt='%s', delimiter=' ', newline='\n')
radii_gyr
end_to_end_dist
np.savetxt(rewrite_file, rmsd_array, fmt='%s', delimiter=' ', newline='\n')
if args.InputStructures2:
elif args.FocalResidue:
elif args.FocalDomain:
elif args.FoldRef:
if 'Dssp' in args.Analysis:
if args.InputStructures2:''' | ejp-lab/EJPLab_Computational_Projects | AbInitioVO-and-FastFloppyTail/Demo_fast/PolymerDsspAnalysis.py | PolymerDsspAnalysis.py | py | 22,239 | python | en | code | 9 | github-code | 13 |
12061449123 | import math
def merge_sort(srcList, sl_idx, sr_idx):
if sr_idx > sl_idx:
middle = (sl_idx + sr_idx) // 2
merge_sort(srcList, sl_idx, middle)
merge_sort(srcList, middle + 1, sr_idx)
merge(srcList, sl_idx, middle, sr_idx)
def merge(srcList, l_idx, middle, r_idx):
r_len = (middle - l_idx) + 1
l_len = (r_idx - middle)
left = [None] * (l_len + 1)
right = [None] * (r_len + 1)
for idx in range(l_len):
left[idx] = srcList[l_idx + 1]
for idx in range(r_len):
right[idx] = srcList[middle + idx + 1]
left[-1] = math.inf
right[-1] = math.inf
i = 0
j = 0
| MurylloEx/Data-Structures-and-Algorithms | Week_3/merge_sort.py | merge_sort.py | py | 648 | python | en | code | 0 | github-code | 13 |
23988844369 | """Este programa reliza la resta algebraica de dos imagenes, cuidando de que no haya saturación.
Luego la compara con la resta obtenida por openCV."""
#se importan librerias
import numpy as np
import cv2
#se lee y almacena las imagenes
img1 = cv2.imread("imagen1.jpg")
img2 = cv2.imread("imagen2.jpg")
#Se limita el valor de los pixeles de las imagenes a 200 como máximo, esto para no saturar la imagen en la resta
img1 = np.clip(img1, 0, 200)
img2 = np.clip(img2, 0, 200)
#se normalizan las imagenes a 255
img1_norm = img1 / 255.0
img2_norm = img2 / 255.0
#se realiza la resta directa
img_res1 = img1_norm - img2_norm
img_res2 = img2_norm - img1_norm
#se vuelve a multiplicar la matriz por 255, limitando sus valores entre 0 y 255 y ademas dejando el formato
#de la matriz en uint8 para ser leida por openCV
img_res1 = np.clip(img_res1 * 255, 0, 255).astype(np.uint8)
img_res2 = np.clip(img_res2 * 255, 0, 255).astype(np.uint8)
#se realiza la resta con openCV
restaOCV1 = cv2.subtract(img1,img2)
restaOCV2 = cv2.subtract(img2,img1)
#se muestran las imagenes
cv2.imshow("Imagen 1", img1)
cv2.imshow("Imagen 2", img2)
cv2.imshow("Resta img1 - img2", img_res1)
cv2.imshow("Resta img2 - img1", img_res2)
cv2.imshow("resta con cv2.substract() img1 - img2", restaOCV1)
cv2.imshow("resta con cv2.substract() img2 - img1", restaOCV2)
#cv2.imwrite("resta1.jpg",img_res1)
#cv2.imwrite("resta2.jpg",img_res2)
#cv2.imwrite("restaocv1.jpg",restaOCV1)
#cv2.imwrite("restaocv2.jpg",restaOCV2)
#espera un input y destruye las ventanas
cv2.waitKey(0)
cv2.destroyAllWindows() | Atrabilis/UACH | Vision artificial/Tarea 2/P3b.py | P3b.py | py | 1,571 | python | es | code | 1 | github-code | 13 |
11008776610 | # Задание 3
# Улучшаем задачу 2.
# Добавьте возможность запуска функции “угадайки” из модуля в командной строке терминала.
# Строка должна принимать от 1 до 3 аргументов: параметры вызова функции.
# Для преобразования строковых аргументов командной строки в числовые параметры используйте генераторное выражение.
from modules.guess import func_ugadai as guess_game
import sys
args = list (map (int, sys.argv[1:]))
if len(args) == 3:
min = args[0]
max = args[1]
count = args[2]
elif len(args) == 2:
min = args[0]
max = args[1]
count = 6
elif len(args) == 1:
min = 0
max = args[0]
count = 6
else:
min = 0
max = 100
count = 6
guess_game (min, max, count) | Vladimirs77nt/Python_diving | Task_06/Task_06_3.py | Task_06_3.py | py | 961 | python | ru | code | 0 | github-code | 13 |
41977756792 | import sys
from typing import *
import collections
input=sys.stdin.readline
N=int(input())
queue:Deque=collections.deque()
for _ in range(N):
cmd=input().split()
if cmd[0]=='push':
queue.append(int(cmd[1]))
elif cmd[0]=='front':
if len(queue)==0:
print(-1)
else:
print(queue[0])
elif cmd[0]=='back':
if len(queue)==0:
print(-1)
else:
print(queue[-1])
elif cmd[0]=='empty':
if len(queue)==0:
print(1)
else:
print(0)
elif cmd[0] == 'pop':
if len(queue) == 0:
print(-1)
else:
print(queue.popleft())
elif cmd[0] == 'size':
print(len(queue))
| honghyeong/python-problem-solving | BOJ/step18_queue&deque/18258.py | 18258.py | py | 744 | python | en | code | 0 | github-code | 13 |
71497023059 | # 언어 : Python
# 날짜 : 2021.08.24
# 문제 : BOJ > A→B(https://www.acmicpc.net/problem/16953)
# 티어 : 실버 1
# ======================================================================
import heapq
def solution():
queue = [[A, 1]]
count = 0
while queue:
node = heapq.heappop(queue)
cur_num = node[0]
cur_cnt = node[1]
if cur_num == B:
count = cur_cnt
break
elif cur_num > B:
break
heapq.heappush(queue, [cur_num * 2, cur_cnt + 1])
heapq.heappush(queue, [int(str(cur_num) + "1"), cur_cnt + 1])
print(count) if count != 0 else print(-1)
A, B = map(int, input().split())
solution()
| eunseo-kim/Algorithm | BOJ/최고빈출 DFS, BFS 기본문제/06_A→B.py | 06_A→B.py | py | 703 | python | en | code | 1 | github-code | 13 |
3720558050 | # 내 풀이
N = int(input())
a,b = 1,1
for i in range(1,N):
a,b = b,a+b
print(a)
''' for문 방법 1
N = int(input())
li = [
0 for _ in range(N)
]
li[0],li[1] = 1,1
for i in range(2,N):
li[i] = li[i-1] + li[i-2]
print(li[N-1])
'''
''' 재귀적 방법 - 메모이제이션 추가 활용
N = int(input())
memo = [
-1 for _ in range(N+1)
]
def fibo(n):
if memo[n] != -1:
return memo[n]
if n == 1 or n == 2:
memo[n] = 1
else :
memo[n] = fibo(n-1) + fibo(n-2)
return memo[n]
print(fibo(N))
''' | JaeEon-Ryu/Coding_test | LeeBrosCode/DP/1_ subproblem을 그대로 합치면 되는 DP/1) 피보나치 수.py | 1) 피보나치 수.py | py | 556 | python | en | code | 1 | github-code | 13 |
37914603148 | import AthenaCommon.Constants as Lvl
from AthenaCommon.Configurable import *
from AthenaCommon import CfgMgr
###
class PyComponents(object):
"""@c PyComponents is a placeholder where all factories for the python
components will be collected and stored for easy look-up and call from
the C++ side.
The @a PyComponents.instances dictionary will store the instance
e.g.:
PyComponents.instances[ 'alg1' ] = <PyAthena::Alg/alg1 instance>
All this boilerplate code will of course be done automatically for the user
as soon as she uses the (python) @PyConfigurable classes.
"""
instances = {}
pass
### helper methods ------------------------------------------------------------
def _get_prop_value(pycomp, propname):
v = pycomp.properties()[propname]
if v == pycomp.propertyNoValue:
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if propname == 'OutputLevel':
# special case of OutputLevel...
v = getattr(svcMgr.MessageSvc, propname)
else:
v = pycomp.getDefaultProperty(propname)
pass
return v
def declare_property (o, kw, property_name,
dflt_value=None, attr_name=None,
doc=None):
"""mimic the C++ declareProperty method.
@param `kw` is a dictionary of properties where properties' values are
looked for
@param `property_name` is the property to look for
@param `dflt_value` is the default value to set in case the property is
not in the dictionary of properties
@param `attr_name` is the name of the property inside the python class.
if None, then `property_name` will be used
example:
>>> declare_property (self, kw, 'TopAlg', dflt_value=list(),
... attr_name='_top_alg')
>>> assert self._top_alg == list()
>>> self.TopAlg = [1]
>>> assert self._top_alg == [1]
>>> assert self.TopAlg == [1]
"""
if doc is None:
doc=''
val = kw.get (property_name, dflt_value)
n = attr_name if attr_name is not None else property_name
setattr (o, n, val)
if attr_name is not None:
setattr (o, attr_name, val)
# install a property dispatch too
def fget (self): return getattr(self, attr_name)
def fset (self,v): return setattr(self, attr_name, v)
_p_obj = property(fget=fget,
fset=fset,
doc=doc)
setattr(o.__class__, property_name, _p_obj)
return
### Configurable base class for PyAlgorithms ----------------------------------
class CfgPyAlgorithm( ConfigurableAlgorithm ):
def __init__( self, name, **kw ):
super( CfgPyAlgorithm, self ).__init__(name)
for n,v in kw.iteritems():
setattr(self,n,v)
# pickling support
def __getstate__( self ):
dic = super(CfgPyAlgorithm, self).__getstate__()
dic.update(self.__dict__)
if 'msg' in dic:
del dic['msg'] # logger cannot be pickled
return dic
@property
def msg(self):
import AthenaCommon.Logging as _L
return _L.logging.getLogger( self.getJobOptName() )
def getGaudiType( self ): return 'Algorithm'
def getType(self): return 'PyAthena::Alg'
def getDlls(self): return 'AthenaPython'
def getHandle(self): return None
def setup(self):
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if not hasattr( svcMgr, 'PyComponentMgr' ):
svcMgr += CfgMgr.PyAthena__PyComponentMgr('PyComponentMgr')
import PyAthena
## populate the PyComponents instances repository
name = self.getJobOptName()
o = PyComponents.instances.get(name, None)
if not (o is None) and not (o is self):
err = "A python component [%r] has already been "\
"registered with the PyComponents registry !" % o
raise RuntimeError(err)
PyComponents.instances[name] = self
setattr(PyAthena.algs, name, self)
## special case of the OutputLevel: take the value from the
## svcMgr.MessageSvc if none already set by user
setattr(self, 'OutputLevel', _get_prop_value (self, 'OutputLevel') )
return super(CfgPyAlgorithm, self).setup()
pass # class CfgPyAlgorithm
### Configurable base class for PyServices ------------------------------------
class CfgPyService( ConfigurableService ):
def __init__( self, name, **kw ):
super( CfgPyService, self ).__init__(name)
for n,v in kw.iteritems():
setattr(self,n,v)
# pickling support
def __getstate__( self ):
dic = super(CfgPyService, self).__getstate__()
dic.update(self.__dict__)
if 'msg' in dic:
del dic['msg'] # logger cannot be pickled
return dic
@property
def msg(self):
import AthenaCommon.Logging as _L
return _L.logging.getLogger( self.getJobOptName() )
def getGaudiType( self ): return 'Service'
def getType(self): return 'PyAthena::Svc'
def getDlls(self): return 'AthenaPython'
def getHandle(self): return None
def setup(self):
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if not hasattr( svcMgr, 'PyComponentMgr' ):
svcMgr += CfgMgr.PyAthena__PyComponentMgr('PyComponentMgr')
import PyAthena
## populate the PyComponents instances repository
name = self.getJobOptName()
o = PyComponents.instances.get(name, None)
if not (o is None) and not (o is self):
err = "A python component [%r] has already been "\
"registered with the PyComponents registry !" % o
raise RuntimeError(err)
PyComponents.instances[name] = self
setattr(PyAthena.services,name,self)
## special case of the OutputLevel: take the value from the
## svcMgr.MessageSvc if none already set by user
setattr(self, 'OutputLevel', _get_prop_value (self, 'OutputLevel') )
return super(CfgPyService, self).setup()
pass # class CfgPyService
### Configurable base class for PyAlgTools ------------------------------------
class CfgPyAlgTool( ConfigurableAlgTool ):
def __init__( self, name, **kw ):
super( CfgPyAlgTool, self ).__init__(name)
for n,v in kw.iteritems():
setattr(self,n,v)
# pickling support
def __getstate__( self ):
dic = super(CfgPyAlgTool, self).__getstate__()
dic.update(self.__dict__)
if 'msg' in dic:
del dic['msg'] # logger cannot be pickled
return dic
@property
def msg(self):
import AthenaCommon.Logging as _L
return _L.logging.getLogger( self.getJobOptName() )
def getGaudiType( self ): return 'AlgTool'
def getType(self): return 'PyAthena::Tool'
def getDlls(self): return 'AthenaPython'
def getHandle(self): return None
def setup(self):
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if not hasattr( svcMgr, 'PyComponentMgr' ):
svcMgr += CfgMgr.PyAthena__PyComponentMgr('PyComponentMgr')
## populate the PyComponents instances repository
name = self.getJobOptName()
o = PyComponents.instances.get(name, None)
if not (o is None) and not (o is self):
err = "A python component [%r] has already been "\
"registered with the PyComponents registry !" % o
raise RuntimeError(err)
PyComponents.instances[name] = self
## special case of the OutputLevel: take the value from the
## svcMgr.MessageSvc if none already set by user
setattr(self, 'OutputLevel', _get_prop_value (self, 'OutputLevel') )
return super(CfgPyAlgTool, self).setup()
pass # class CfgPyAlgTool
### Configurable base class for PyAud -----------------------------------------
class CfgPyAud( ConfigurableAuditor ):
def __init__( self, name, **kw ):
super( CfgPyAud, self ).__init__(name)
for n,v in kw.iteritems():
setattr(self,n,v)
# pickling support
def __getstate__( self ):
dic = super(CfgPyAud, self).__getstate__()
dic.update(self.__dict__)
if 'msg' in dic:
del dic['msg'] # logger cannot be pickled
return dic
@property
def msg(self):
import AthenaCommon.Logging as _L
return _L.logging.getLogger( self.getJobOptName() )
def getGaudiType( self ): return 'Auditor'
def getType(self): return 'PyAthena::Aud'
def getDlls(self): return 'AthenaPython'
def getHandle(self): return None
def setup(self):
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
if not hasattr( svcMgr, 'PyComponentMgr' ):
svcMgr += CfgMgr.PyAthena__PyComponentMgr('PyComponentMgr')
## populate the PyComponents instances repository
name = self.getJobOptName()
o = PyComponents.instances.get(name, None)
if not (o is None) and not (o is self):
err = "A python component [%r] has already been "\
"registered with the PyComponents registry !" % o
raise RuntimeError(err)
PyComponents.instances[name] = self
## special case of the OutputLevel: take the value from the
## svcMgr.MessageSvc if none already set by user
setattr(self, 'OutputLevel', _get_prop_value (self, 'OutputLevel') )
return super(CfgPyAud, self).setup()
pass # class CfgPyAud
## add the declare_property method...
for klass in (CfgPyAlgorithm, CfgPyService, CfgPyAlgTool, CfgPyAud):
setattr(klass, 'declare_property', declare_property)
del declare_property
##
### -----
class _PyCompHandle(object):
"""a class to mimic the gaudi C++ {Tool,Svc}Handle classes: automatic call
to `initialize` when __getattr__ is called on the instance.
"""
def __init__(self, parent, attr_name):
msg = parent.msg.verbose
msg('installing py-comp-handle for [%s.%s]...',
parent.name(), attr_name)
self.__dict__.update({
'_parent': parent,
'_name': attr_name,
'_attr': getattr(parent, attr_name),
'_init_called': False,
})
msg('installing py-comp-handle for [%s.%s]... [done]',
parent.name(), attr_name)
return
def __getattribute__(self, n):
if n.startswith('_'):
return super(_PyCompHandle, self).__getattribute__(n)
obj = self.__dict__['_attr']
if not self.__dict__['_init_called']:
parent = self.__dict__['_parent']
# FIXME: should we raise something in case initialize failed ?
obj.initialize()
self.__dict__['_init_called'] = True
# replace the handle with the proxied object
setattr(parent, self.__dict__['_name'], obj)
return getattr(obj, n)
def __setattr__(self, n, v):
if n.startswith('_'):
return super(_PyCompHandle, self).__setattr__(n,v)
obj = self.__dict__['_attr']
if not self.__dict__['_init_called']:
parent = self.__dict__['_parent']
# FIXME: should we raise something in case initialize failed ?
obj.initialize()
self.__dict__['_init_called'] = True
# replace the handle with the proxied object
setattr(parent, self.__dict__['_name'], obj)
return setattr(obj, n, v)
pass # _PyCompHandle
def _is_pycomp(obj):
return isinstance(
obj,
(CfgPyAlgorithm, CfgPyService, CfgPyAlgTool, CfgPyAud)
)
def _install_fancy_attrs():
"""loop over all pycomponents, inspect their attributes and install
a handle in place of (sub) pycomponents to trigger the auto-initialize
behaviour of (C++) XyzHandles.
"""
import PyUtils.Logging as L
msg = L.logging.getLogger('PyComponentMgr').verbose
comps = PyComponents.instances.items()
ncomps = len(comps)
msg('installing fancy getattrs... (%i)', ncomps)
for k,comp in comps:
msg('handling [%s]...', k)
for attr_name, attr in comp.__dict__.iteritems():
if _is_pycomp(attr):
msg(' ==> [%s]...', attr_name)
setattr(comp, attr_name,
_PyCompHandle(parent=comp, attr_name=attr_name))
msg(' ==> [%s]... [done]', attr_name)
msg('installing fancy getattrs... (%i) [done]',ncomps)
return
| rushioda/PIXELVALID_athena | athena/Control/AthenaPython/python/Configurables.py | Configurables.py | py | 12,878 | python | en | code | 1 | github-code | 13 |
74844271056 | import math
t = int(input())
#t = 1
def reverse(n):
s = 0
while(n > 0):
s = s*10 + n%10
n //= 10
return s
def isPrime(n):
if(n < 2):
return False
i = 2
while(i*i <= n):
if(n%i == 0):
return False
i += 1
return True
def sumDigit(n):
s = 0
while(n > 0):
s += n%10
n //= 10
return s
def mulDigit(n):
s = 1
while(n > 0):
if(n%10 != 0):
s *= n % 10
n //= 10
return s
def solve() :
x = input()
for i in range(0,len(x),1):
if(isPrime(i) == True and isPrime(int(x[i])) == False):
print("NO")
return
if(isPrime(i) == False and isPrime(int(x[i])) == True):
print("NO")
return
print("YES")
for i in range(1,t+1):
solve()
| DucAnhNg2002/SourceCode | Codeptit - Python/PY01057 - VỊ TRÍ NGUYÊN TỐ.py | PY01057 - VỊ TRÍ NGUYÊN TỐ.py | py | 847 | python | en | code | 1 | github-code | 13 |
19975348424 | from time import sleep
from kivy.lib import osc
from kivy.logger import Logger
from service.cacher import Cacher
from service.androidwrap import AndroidWrap
from service.comms import Comms
from jnius import autoclass
from jnius import cast
import os
import copy
# Activity related classes.
JPythonActivity = autoclass('org.kivy.android.PythonActivity')
# Service related classes.
JCachingService = autoclass('{}.Service{}'.format('com.youmacro.browser', 'Downloader'))
JPythonService = autoclass('org.kivy.android.PythonService')
j_python_service = JPythonService.mService
# Intent related classes.
JContext = autoclass('android.content.Context')
JIntent = autoclass('android.content.Intent')
JPendingIntent = autoclass('android.app.PendingIntent')
# Notification classes.
JNotificationBuilder = autoclass('android.app.Notification$Builder')
JAction = autoclass('android.app.Notification$Action')
# Basic classes.
JFile = autoclass('java.io.File')
JString = autoclass('java.lang.String')
JArrayList = autoclass('java.util.ArrayList')
JUri = autoclass('android.net.Uri')
JBundle = autoclass('android.os.Bundle')
# Icon related classes.
JDimen = autoclass("android.R$dimen")
JBitmap = autoclass("android.graphics.Bitmap")
JDrawable = autoclass("{}.R$drawable".format(j_python_service.getPackageName()))
class CachingService(object):
def __init__(self):
self._cacher = Cacher(AndroidWrap.get_download_dir(), [self.on_progress])
self._current_notification_id = 0
self._current_pending_intent_id = 0
self._large_icon = CachingService.get_scaled_icon('icon')
def on_progress(self, info):
print('on_progress: self: <%s> obj: <%s>' % (self, str(info)))
status = info['status']
# Form the title.
filepath = info['filename']
filename = os.path.basename(filepath)
title = filename
print('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFf file path: %s' % filepath)
# Form the message.
message = ''
if status == 'downloading':
percent_string = info['_percent_str']
total_bytes_str = info['_total_bytes_str']
speed_str = info['_speed_str']
message = percent_string + ' of ' + total_bytes_str + ' at ' + speed_str
self.post_progress(title, message)
elif status == 'finished':
elapsed_str = info['_elapsed_str']
total_bytes_str = info['_total_bytes_str']
message = 'downloaded ' + total_bytes_str + ' in ' + elapsed_str
self.post_finished(title, message, filepath)
def extract_and_download_old(self, message, *args):
"""This method first extracts the url for the video on the page. Then it downloads it using Android's download
manager app. Note that this only works when the extracted url points to a single video file such as mp4. When
the url points to streaming formats like dash, Android's download manager app will not be able to downloading
this as the data comes in through multiple files which then need to be concatenated. So do not use this method.
It is only kept for reference purposes."""
try:
# Get the current page's url.
page_url = message[2]
# Extract the download url.
url, filename = self._cacher.get_media_url(page_url)
if url == '':
return
print('download_url: ' + url)
# Create a name for the downloaded file.
path = AndroidWrap.get_download_dir() + filename
# Start the download.
AndroidWrap.download_from_service(url, path)
except Exception as e:
print("Exception: %s" % e)
def extract_and_download(self, message, *args):
self.post_starting()
print ("EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE")
print("message: %s" % message)
"""This method uses youtube_dl to download the video, given the webpage's url."""
try:
# Get the current page's url.
page_url = message[2]
self._cacher.cache_media([page_url])
except Exception as e:
print("EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEee error occurred during extract and download")
print("Exception: %s" % e)
def post_starting(self):
intent = self.create_self_intent()
self.post(u'Processing Web Page', u'Looking for links in the web page.', intent)
def post_progress(self, title, message):
intent = self.create_self_intent()
self.post(title, message, intent)
def post_finished(self, title, message, filepath):
intent = self.create_playback_intent(filepath)
self.post(title, message, intent)
# Update our current notification id.
self._current_notification_id += 1
@staticmethod
def create_self_intent():
"""Create an intent which launches our main activity."""
context = j_python_service.getApplication().getApplicationContext()
intent = JIntent(context, JPythonActivity)
intent.setFlags(
JIntent.FLAG_ACTIVITY_CLEAR_TOP | JIntent.FLAG_ACTIVITY_SINGLE_TOP | JIntent.FLAG_ACTIVITY_NEW_TASK)
intent.setAction(JIntent.ACTION_MAIN)
intent.addCategory(JIntent.CATEGORY_LAUNCHER)
return intent
@staticmethod
def create_playback_intent(filepath):
"""Create an intent which uses the user's default app to play video."""
# Convert some python str types into java strings.
filepath = u'file://' + filepath
filepath = JString(filepath.encode('utf-16'), 'utf-16')
intent = JIntent(JIntent.ACTION_VIEW)
mime_type = JString(u'video/*'.encode('utf-16'), 'utf-16')
intent.setDataAndType(JUri.parse(filepath), mime_type)
return intent
def post(self, title, message, intent = None):
# Convert some python str types into java strings.
title = JString(title.encode('utf-16'), 'utf-16')
message = JString(message.encode('utf-16'), 'utf-16')
# Setup our notification builder.
builder = JNotificationBuilder(j_python_service)
builder.setContentTitle(title)
builder.setContentText(message)
builder.setSmallIcon(JDrawable.icon)
builder.setLargeIcon(self._large_icon)
if intent:
# Wrap the intent into a pending intent.
pending_intent = JPendingIntent.getActivity(j_python_service, self._current_pending_intent_id, intent, 0)
# Increment the pending intent id counter.
self._current_pending_intent_id += 1
# Configure the builder to use the pending intent.
builder.setContentIntent(pending_intent)
# Build our notification.
notification = builder.getNotification()
# Use the notification service to post it.
notification_service = j_python_service.getSystemService(JContext.NOTIFICATION_SERVICE)
notification_service.notify(self._current_notification_id, notification)
@staticmethod
def get_scaled_icon(icon):
"""
icon : name of icon file (png) without extension
"""
scaled_icon = getattr(JDrawable, icon)
scaled_icon = cast("android.graphics.drawable.BitmapDrawable",
j_python_service.getResources().getDrawable(scaled_icon))
scaled_icon = scaled_icon.getBitmap()
res = j_python_service.getResources()
height = res.getDimension(JDimen.notification_large_icon_height)
width = res.getDimension(JDimen.notification_large_icon_width)
return JBitmap.createScaledBitmap(scaled_icon, width, height, False)
@staticmethod
def intent_callback(context, intent, *args):
''' Notification Button Callback
If everything was working correctly, this function would be called
when the user press a notification button.
'''
# context, intent
Logger.warning("captured intent")
Logger.warning("%s" % context)
Logger.warning("%s" % intent)
Logger.warning("%s" % args)
if __name__ == '__main__':
caching_service = CachingService()
osc.init()
oscid = osc.listen(ipAddr='127.0.0.1', port=Comms.service_port)
osc.bind(oscid, caching_service.extract_and_download, '/extract_and_download')
while True:
osc.readQueue(oscid)
print('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBbb background service')
sleep(.1)
| nodegraph/youmacro | service/main.py | main.py | py | 8,559 | python | en | code | 0 | github-code | 13 |
7318551416 | import swapper
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from openwisp_users.api.mixins import FilterSerializerByOrgManaged
from openwisp_utils.api.serializers import ValidatedModelSerializer
from ..swapper import load_model
BatchUpgradeOperation = load_model('BatchUpgradeOperation')
Build = load_model('Build')
Category = load_model('Category')
FirmwareImage = load_model('FirmwareImage')
UpgradeOperation = load_model('UpgradeOperation')
DeviceFirmware = load_model('DeviceFirmware')
Device = swapper.load_model('config', 'Device')
class BaseMeta:
read_only_fields = ['created', 'modified']
class BaseSerializer(FilterSerializerByOrgManaged, ValidatedModelSerializer):
pass
class CategorySerializer(BaseSerializer):
class Meta(BaseMeta):
model = Category
fields = '__all__'
class CategoryRelationSerializer(BaseSerializer):
class Meta:
model = Category
fields = ['name', 'organization']
class FirmwareImageSerializer(BaseSerializer):
def validate(self, data):
data['build'] = self.context['view'].get_parent_queryset().get()
return super().validate(data)
class Meta(BaseMeta):
model = FirmwareImage
fields = '__all__'
read_only_fields = BaseMeta.read_only_fields + ['build']
class BuildSerializer(BaseSerializer):
category_relation = CategoryRelationSerializer(read_only=True, source='category')
class Meta(BaseMeta):
model = Build
fields = '__all__'
class UpgradeOperationSerializer(serializers.ModelSerializer):
class Meta:
model = UpgradeOperation
fields = ('id', 'device', 'image', 'status', 'log', 'modified', 'created')
class DeviceUpgradeOperationSerializer(serializers.ModelSerializer):
class Meta:
model = UpgradeOperation
fields = ('id', 'device', 'image', 'status', 'log', 'modified')
class BatchUpgradeOperationListSerializer(BaseSerializer):
build = BuildSerializer(read_only=True)
class Meta:
model = BatchUpgradeOperation
fields = '__all__'
class BatchUpgradeOperationSerializer(BatchUpgradeOperationListSerializer):
progress_report = serializers.CharField(max_length=200)
success_rate = serializers.IntegerField(read_only=True)
failed_rate = serializers.IntegerField(read_only=True)
aborted_rate = serializers.IntegerField(read_only=True)
upgradeoperations = UpgradeOperationSerializer(
read_only=True, source='upgradeoperation_set', many=True
)
class Meta:
model = BatchUpgradeOperation
fields = '__all__'
class DeviceFirmwareSerializer(ValidatedModelSerializer):
class Meta:
model = DeviceFirmware
fields = ('id', 'image', 'installed', 'modified')
read_only_fields = ('installed', 'modified')
def validate(self, data):
if not data.get('device'):
device_id = self.context.get('device_id')
device = self._get_device_object(device_id)
data.update({'device': device})
image = data.get('image')
device = data.get('device')
if (
image
and device
and image.build.category.organization != device.organization
):
raise ValidationError(
{
'image': _(
'The organization of the image doesn\'t '
'match the organization of the device'
)
}
)
return super().validate(data)
def _get_device_object(self, device_id):
try:
device = Device.objects.get(id=device_id)
return device
except Device.DoesNotExist:
return None
| openwisp/openwisp-firmware-upgrader | openwisp_firmware_upgrader/api/serializers.py | serializers.py | py | 3,843 | python | en | code | 40 | github-code | 13 |
38352618546 | from django.shortcuts import render
from rest_framework import generics
from rest_framework.views import APIView
from .serializers import *
from .models import *
from rest_framework.decorators import api_view
from django.http.response import JsonResponse
# Create your views here.
class CampaignView:
@api_view(['GET'])
def getCampaignsOfSpecificUser(request,user_id):
campaign = Campaigns.objects.filter(user_id = user_id)
campaign_serializer = CampaignSerailizer(campaign,many=True)
return JsonResponse(campaign_serializer.data,safe=False)
@api_view(['GET'])
def getCampaignsForSpecificSegment(request,segment_id):
campaign = Campaigns.objects.filter(segment_id = segment_id)
campaign_serializer = CampaignSerailizer(campaign,many=True)
return JsonResponse(campaign_serializer.data,safe=False)
@api_view(['POST'])
def createCampaign(request):
serializer = CampaignSerailizer(data= request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return JsonResponse(serializer.data,safe=False)
@api_view(['POST'])
def test(request):
import requests
print(request.data)
# print('hi',request.data['segment'])
if request.data['segment'] == 1:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/748f5d45ae93459585208ecf3be9f063/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
# https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/76b67d060dde4316968d4d7e70e2ae94/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a
elif request.data['segment'] == 5:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/76b67d060dde4316968d4d7e70e2ae94/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
elif request.data['segment'] == 3:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/b5e86d587f35438e806c6c0dcb76cf75/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
elif request.data['segment'] == 2:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/a435e70c8d1743a4a9899ad8b58422e0/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
elif request.data['segment'] == 4:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/09e1f432044340ffb162fbe274ae9801/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
elif request.data['segment'] == 10:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/afe7af902cf444159b050da39f82531c/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
else:
res = requests.post('https://deloitte.webhook.office.com/webhookb2/d0bc1cb2-9bea-470f-a46e-21e7fdfe1114@36da45f1-dd2c-4d1f-af13-5abe46b99921/IncomingWebhook/da947cbbae1e4918887baf53bd047529/50d6bf1b-a7bb-48e3-b631-b8e06b58ad3a',json=request.data)
return JsonResponse({"msg" :res.text})
@api_view(['DELETE'])
def removeCampaign(request,campaign_id):
campaign = Campaigns.objects.filter(id=campaign_id)
campaign.delete()
return JsonResponse({"msg":"Successfully deleted "})
| GoyalAnuj973/Target-Marketing-Tool | TargetMarketingTools-backend/Tmarket/campaigns/views.py | views.py | py | 3,865 | python | en | code | 0 | github-code | 13 |
3128766035 | import sys
from PyQt6.QtWidgets import QApplication, QMainWindow, QWidget, QLabel, QLineEdit, QPushButton, QTableWidget, QTableWidgetItem, QVBoxLayout, QVBoxLayout, QHBoxLayout, QTextEdit, QInputDialog, QMessageBox, QDialog, QDialogButtonBox, QFileDialog,QAbstractItemView, QScrollArea, QComboBox
from PyQt6.QtGui import QFont
from PyQt6.QtCore import Qt, QRect
from TreeOfLife import *
class LifeHeirarchi(QWidget):
default_font = QFont("B Nazanin", 12)
class Dialog(QDialog):
def __init__(self, windowTitle : str ,parent=None):
super().__init__(parent)
self.setWindowTitle(windowTitle)
self.setModal(True)
self.adjustSize()
def text_dialog(self, input_txt):
vbox = QVBoxLayout()
label = QLabel(input_txt)
label.setFont(LifeHeirarchi.default_font)
vbox.addWidget(label)
self.setLayout(vbox)
def searchDialog(self):
self.vbox = QVBoxLayout()
superSet_hbox = QHBoxLayout()
superSet_text = QLabel("Enter Super-Group (optional):")
superSet_text.setFont(LifeHeirarchi.default_font)
superSet = QLineEdit()
superSet.setFont(LifeHeirarchi.default_font)
superSet_hbox.addWidget(superSet_text)
superSet_hbox.addWidget(superSet)
self.vbox.addLayout(superSet_hbox)
query_hbox = QHBoxLayout()
query_text = QLabel("Search Query (optional): ")
query_text.setFont(LifeHeirarchi.default_font)
query = QLineEdit()
query.setFont(LifeHeirarchi.default_font)
query_hbox.addWidget(query_text)
query_hbox.addWidget(query)
combo_hbox = QHBoxLayout()
combo_txt = QLabel("Choose Matching Type : ")
combo_txt.setFont(LifeHeirarchi.default_font)
combo_hbox.addWidget(combo_txt)
combo_box = QComboBox()
combo_box.setFont(LifeHeirarchi.default_font)
combo_box.addItem("inclusive")
combo_box.addItem("exact")
combo_box.addItem("regular expression")
combo_box.setCurrentIndex(0)
combo_hbox.addWidget(combo_box)
self.vbox.addLayout(combo_hbox)
self.vbox.addLayout(query_hbox)
self.filter_box = QVBoxLayout()
self.vbox.addLayout(self.filter_box)
self.filterNum = 0
self.filters = dict()
def addfilter():
self.filterNum
self.filterNum += 1
vbox = QVBoxLayout()
filterlabel = QLabel(f"-Filter{self.filterNum}----------------")
filterlabel.setFont(LifeHeirarchi.default_font)
vbox.addWidget(filterlabel)
hbox1 = QHBoxLayout()
label1 = QLabel("Attribute Name : ")
label1.setFont(LifeHeirarchi.default_font)
attr = QLineEdit()
attr.setFont(LifeHeirarchi.default_font)
hbox1.addWidget(label1)
hbox1.addWidget(attr)
vbox.addLayout(hbox1)
hbox2 = QHBoxLayout()
label2 = QLabel("Operation : ")
label2.setFont(LifeHeirarchi.default_font)
combo_box = QComboBox()
combo_box.setFont(LifeHeirarchi.default_font)
combo_box.addItem("exact")
combo_box.addItem("range")
combo_box.addItem("lt")
combo_box.addItem("lte")
combo_box.addItem("gt")
combo_box.addItem("gte")
combo_box.setCurrentIndex(0)
hbox2.addWidget(combo_box)
vbox.addLayout(hbox2)
hbox3 = QHBoxLayout()
label3 = QLabel("Value : ")
label3.setFont(LifeHeirarchi.default_font)
val = QLineEdit()
val.setFont(LifeHeirarchi.default_font)
hbox3.addWidget(label3)
hbox3.addWidget(val)
vbox.addLayout(hbox3)
self.filters[self.filterNum] = [attr, combo_box, val]
self.filter_box.addLayout(vbox)
self.adjustSize()
add_filter_btn = QPushButton("Add New Filter")
add_filter_btn.setFont(LifeHeirarchi.default_font)
add_filter_btn.clicked.connect(addfilter)
self.vbox.addWidget(add_filter_btn)
search_btn = QPushButton("Search")
search_btn.setFont(LifeHeirarchi.default_font)
self.vbox.addWidget(search_btn)
self.table = QTableWidget()
self.table.setFont(LifeHeirarchi.default_font)
self.search_results = dict()
def search():
filters = dict()
for val in self.filters.values():
filters[val[0]] = (val[1], val[2])
self.search_results = Group.advancedSearch(superSet.text(), query.text(), combo_box.currentText(), filters)
row = 0
self.table.setColumnCount(2)
self.table.setRowCount(len(self.search_results.keys()))
self.table.setHorizontalHeaderLabels(["Type", "Name"])
for type, name in self.search_results.keys():
self.table.setItem(row, 0, QTableWidgetItem(type))
self.table.setItem(row, 1, QTableWidgetItem(name))
row += 1
self.adjustSize()
search_btn.clicked.connect(search)
def clicked_on_item(item):
row = item.row()
txt = LifeHeirarchi.Dialog("Item Info", self)
key = (self.table.item(row, 0).text(), self.table.item(row, 1).text())
instance = self.search_results[key]
txt.text_dialog(instance.info)
txt.exec()
self.table.setSelectionBehavior(QAbstractItemView.SelectionBehavior.SelectRows)
self.table.setSelectionMode(QAbstractItemView.SelectionMode.SingleSelection)
self.table.itemClicked.connect(clicked_on_item)
scroll_area = QScrollArea()
scroll_area.setWidget(self.table)
layout = QVBoxLayout()
layout.addWidget(scroll_area)
self.vbox.addLayout(layout)
self.setLayout(self.vbox)
self.adjustSize()
self.setMaximumHeight(460)
def addDialog(self):
self.vbox = QVBoxLayout()
input_text = QTextEdit()
input_text.setFont(LifeHeirarchi.default_font)
input_text.setPlaceholderText('Enter the new group:\nformat : (type="...", name="...", superSet=("type", "name"), age=..., weight=..., size=..., info="...", extraAttr=(key="value")')
self.vbox.addWidget(input_text)
def clicked_on():
Group.createNew(input_text.toPlainText())
self.close()
btn = QPushButton("Add")
btn.setFont(LifeHeirarchi.default_font)
btn.clicked.connect(clicked_on)
self.vbox.addWidget(btn)
self.setLayout(self.vbox)
def editDialog(self):
self.vbox = QVBoxLayout()
self.setLayout(self.vbox)
group_combo = QComboBox()
group_combo.setFont(LifeHeirarchi.default_font)
attr_combo = QComboBox()
attr_combo.setFont(LifeHeirarchi.default_font)
for instance in Group._instances.keys():
group_combo.addItem(str(instance))
group_combo.setCurrentIndex(0)
group_btn = QPushButton("Choose Group")
group_btn.setFont(LifeHeirarchi.default_font)
def group_clicked():
attr_combo.clear()
instance = Group._instances[list(Group._instances.keys())[group_combo.currentIndex()]]
try:
for attr in instance.attributes.keys():
attr_combo.addItem(attr)
except:
pass
group_btn.clicked.connect(group_clicked)
h1 = QHBoxLayout()
h1.addWidget(group_combo)
h1.addWidget(group_btn)
replace = QLineEdit()
replace.setPlaceholderText("Enter the new value for attribute:")
replace.setFont(LifeHeirarchi.default_font)
btn = QPushButton("Edit")
btn.setFont(LifeHeirarchi.default_font)
def edit():
instance = Group._instances[list(Group._instances.keys())[group_combo.currentIndex()]]
newData= (attr_combo.currentText(), replace.text())
instance.info = newData
self.close()
btn.clicked.connect(edit)
self.vbox.addLayout(h1)
self.vbox.addWidget(attr_combo)
self.vbox.addWidget(replace)
self.vbox.addWidget(btn)
self.adjustSize()
self.setFixedWidth(530)
def deleteDialog(self):
self.vbox = QVBoxLayout()
self.setLayout(self.vbox)
group_combo = QComboBox()
group_combo.setFont(LifeHeirarchi.default_font)
for instance in Group._instances.keys():
group_combo.addItem(str(instance))
group_combo.setCurrentIndex(0)
del_btn = QPushButton("Choose Group")
del_btn.setFont(LifeHeirarchi.default_font)
def delete_group():
ins = list(Group._instances.keys())[group_combo.currentIndex()]
Group.delete(ins[0], ins[1])
self.close()
del_btn.clicked.connect(delete_group)
self.vbox.addWidget(group_combo)
self.vbox.addWidget(del_btn)
self.adjustSize()
self.setFixedWidth(350)
def __init__(self):
super().__init__()
self.mainPage()
def mainPage(self):
self.setWindowTitle("Life Heirarchi")
tree_font = QFont("Times New Roman", 18)
self.tree_label = QLabel('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
self.tree_label.setFont(tree_font)
input_layout = QHBoxLayout()
file_button = QPushButton("Read From Text File")
file_button.setFont(self.default_font)
file_button.clicked.connect(self.fileDialog)
input_layout.addWidget(file_button)
edit_layout = QHBoxLayout()
add_group_btn = QPushButton("Add Group")
add_group_btn.setFont(self.default_font)
add_group_btn.clicked.connect(self.addGroup)
edit_layout.addWidget(add_group_btn)
edit_group_btn = QPushButton("Edit Group")
edit_group_btn.setFont(self.default_font)
edit_group_btn.clicked.connect(self.editGroup)
edit_layout.addWidget(edit_group_btn)
del_group_btn = QPushButton("Delete Group")
del_group_btn.setFont(self.default_font)
del_group_btn.clicked.connect(self.removeGroup)
edit_layout.addWidget(del_group_btn)
search_btn = QPushButton("Search")
search_btn.setFont(self.default_font)
search_btn.clicked.connect(self.search)
self.main_layout = QVBoxLayout()
self.main_layout.addWidget(self.tree_label)
self.main_layout.addLayout(input_layout)
self.main_layout.addLayout(edit_layout)
self.main_layout.addWidget(search_btn)
self.main_layout.setAlignment(Qt.AlignmentFlag.AlignCenter)
# Create a central widget and set the main layout
self.setLayout(self.main_layout)
self.adjustSize()
self.setFixedSize(self.size().width(), self.size().height())
def fileDialog(self):
file_dialog = QFileDialog()
file_dialog.setNameFilter("Text Files (*.txt)")
if file_dialog.exec() == QFileDialog.DialogCode.Accepted:
selected_file = file_dialog.selectedFiles()[0]
file = selected_file
Group.readFromFile(file)
if Group._instances:
self.tree_label.setText(str(Group.fullTreeView()))
else:
self.tree_label.setText('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
def addGroup(self):
dialog = self.Dialog("Add Group", self)
dialog.addDialog()
dialog.exec()
if Group._instances:
self.tree_label.setText(str(Group.fullTreeView()))
else:
self.tree_label.setText('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
def editGroup(self):
dialog = self.Dialog("Delete", self)
dialog.editDialog()
dialog.exec()
if Group._instances:
self.tree_label.setText(str(Group.fullTreeView()))
else:
self.tree_label.setText('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
def removeGroup(self):
dialog = self.Dialog("Delete", self)
dialog.deleteDialog()
dialog.exec()
if Group._instances:
self.tree_label.setText(str(Group.fullTreeView()))
else:
self.tree_label.setText('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
def search(self):
dialog = self.Dialog("Search", self)
dialog.searchDialog()
dialog.exec()
if Group._instances:
self.tree_label.setText(str(Group.fullTreeView()))
else:
self.tree_label.setText('Life\n└── Domain\n └── Kingdom\n └── Phylum\n └── Class\n └── Order\n └── Family\n └── Genus\n └── Species\n')
if __name__ == "__main__":
app = QApplication(sys.argv)
window = LifeHeirarchi()
window.show()
sys.exit(app.exec()) | mahdi-mahmoudkhani/Encyclopedia-of-Animal-Species | GUI.py | GUI.py | py | 15,559 | python | en | code | 1 | github-code | 13 |
35296154458 | from collections import defaultdict
n = int(input())
a = list(map(int, input().split()))
d = defaultdict(int)
for i in a:
d[i] += 1
d2 = sorted(d.items(), reverse=True)
h = [] # 2本以上ある辺を2つ取得(大きいものから)
h2 = [] # 4本以上ある(正方形を作れる)ものを1つ取得(大きいものから)
for i in d2:
if (len(h)==2) and (len(h)==1):
break
if (len(h2)<1) and (i[1]>=4):
h2.append(i[0])
if (len(h)<2) and (i[1]>=2):
h.append(i[0])
if len(h)<2:
ans1 = 0
else:
ans1 = h[0] * h[1]
if len(h2)==0:
ans2 = 0
else:
ans2 = h2[0] ** 2
print(max(ans1, ans2, 0)) | nozomuorita/atcoder-workspace-python | abc/abc071/c.py | c.py | py | 653 | python | en | code | 0 | github-code | 13 |
20097369424 | #AULA 1: CONDICIONAIS IF, IDENTAÇÃO E COMO FUNCIONA IF DENTRO DE IF
meta = 50000
qtde_vendas = 150000
rendimento = 0.5
preco = 1500
custo = qtde_vendas*rendimento*preco
faturamento = qtde_vendas*preco
if(qtde_vendas > 5*meta) | (rendimento < 0.7):
print('A meta foi batida, quantidade vendida {} {}' .format(qtde_vendas, meta))
if(qtde_vendas < 100000):
print('A quantidade vendida foi {}, logo, nao chegamos no valor de 100.000' .format(qtde_vendas))
else:
print('Alem da meta ser batida, conseguimos bater o record de 100.000 vendidos')
else:
print('Nao batemos a meta')
#AULA 2: ELIF
meta = 20000
vendas = 10000
if vendas > 3*meta:
print('Bateu record')
elif vendas > 2*meta:
print('Dobrou a meta {}' .format(2*meta))
elif vendas > 1.5*meta:
print('Superou a meta em 50% mas nao dobrou')
elif vendas > meta:
print('Bateu a meta')
else:
print('Nao bateu a meta')
#AULA 3: COMPARADORES
faturamento1 = 1500
faturamento2 = 1500
email = 'vitor.yagocpgmail.com'
if faturamento1 == faturamento1:
print('Faturamentos iguais')
else:
print('Faturamentos diferentes')
if not '@' in email:
print('Email invalido')
else:
print('Email valido')
#AULA 4: EXERCÍCIOS
vendas_funcionario1 = int(input('Qual foi as vendas do funcionario 1?'))
vendas_funcionario2 = int(input('Qual foi as vendas do funcionario 2?'))
vendas_funcionario3 = int(input('Qual foi as vendas do funcionario 3?'))
if vendas_funcionario1 >= 1000 or vendas_funcionario3 >= 5000:
if vendas_funcionario1 >= 2000:
print('O funcionario 1 ganhou {} de bonus.' .format(0.15*vendas_funcionario1))
else:
print('O funcionario 1 ganhou {} de bonus. E o funcionario 3 ficou rico' .format(0.1*vendas_funcionario1))
else:
print('O funcionario 1 nao ganhou bonus.')
if vendas_funcionario2 >= 1000:
if vendas_funcionario2 >= 2000:
print('O funcionario 2 ganhou {} de bonus.' .format(0.15*vendas_funcionario2))
else:
print('O funcionario 2 ganhou {} de bonus.' .format(0.1*vendas_funcionario2))
else:
print('O funcionario 2 nao ganhou bonus.')
if vendas_funcionario3 >= 1000:
if vendas_funcionario3 >= 2000:
print('O funcionario 3 ganhou {} de bonus.' .format(0.15*vendas_funcionario3))
else:
print('O funcionario 3 ganhou {} de bonus.' .format(0.1*vendas_funcionario3))
else:
print('O funcionario 3 nao ganhou bonus.')
nome_produto = input('Digite o nome do produto:')
categoria = input('Digite a categoria do produto:')
quantidade = int(input('Digite a quantidade em estoque:'))
if categoria and nome_produto and quantidade:
if categoria == 'alimentos' and quantidade <= 50:
print('Solicitar {} à equipe de compras, temos apenas {} unidades em estoque.' .format(nome_produto, quantidade))
elif categoria == 'bebidas' and quantidade <= 75:
print('Solicitar {} à equipe de compras, temos apenas {} unidades em estoque.' .format(nome_produto, quantidade))
elif categoria == 'limpeza' and quantidade <= 30:
print('Solicitar {} à equipe de compras, temos apenas {} unidades em estoque.' .format(nome_produto, quantidade))
else:
print('Preencha corretamente.') | vitoryago/Python_Studies | Aula_01_condicionais.py | Aula_01_condicionais.py | py | 3,308 | python | pt | code | 0 | github-code | 13 |
74718340816 | import inspect
from dataclasses import dataclass, field
from typing import Optional, Type
from boto3.dynamodb.types import TypeDeserializer
from marshy import ExternalType, get_default_context
from marshy.marshaller.marshaller_abc import MarshallerABC
from marshy.marshaller_context import MarshallerContext
from marshy.types import ExternalItemType
from servey.action.action import Action
from servey.servey_aws.event_handler.event_handler_abc import (
EventHandlerABC,
EventHandlerFactoryABC,
)
from persisty.trigger.after_create_trigger import AfterCreateTrigger
from persisty.trigger.after_delete_trigger import AfterDeleteTrigger
from persisty.trigger.after_update_trigger import AfterUpdateTrigger
@dataclass
class DynamodbPostProcessEventHandler(EventHandlerABC):
"""
Event handler for events in dynamodb format
"""
action: Action
item_marshaller: MarshallerABC
def is_usable(self, event: ExternalItemType, context) -> bool:
# noinspection PyBroadException
try:
# noinspection PyTypeChecker
result = bool(event["Records"][0]["dynamodb"])
return result
except Exception:
return False
def handle(self, event: ExternalItemType, context) -> ExternalType:
deserializer = TypeDeserializer()
for record in event["Records"]:
# noinspection PyTypeChecker
new_image = record["NewImage"]
if new_image:
new_image = deserializer.deserialize(new_image)
new_image = self.item_marshaller.load(new_image)
# noinspection PyTypeChecker
old_image = record["OldImage"]
if old_image:
old_image = deserializer.deserialize(old_image)
old_image = self.item_marshaller.load(old_image)
if old_image and new_image:
if _has_trigger(self.action, AfterUpdateTrigger):
self.action.fn(old_image, new_image)
elif new_image:
if _has_trigger(self.action, AfterCreateTrigger):
self.action.fn(new_image)
elif _has_trigger(self.action, AfterDeleteTrigger):
self.action.fn(old_image)
@dataclass
class DynamodbPostProcessEventHandlerFactory(EventHandlerFactoryABC):
marshaller_context: MarshallerContext = field(default_factory=get_default_context)
def create(self, action: Action) -> Optional[EventHandlerABC]:
if (
_has_trigger(action, AfterCreateTrigger)
or _has_trigger(action, AfterUpdateTrigger)
or _has_trigger(action, AfterDeleteTrigger)
):
sig = inspect.signature(action.fn)
item_type = next(iter(sig.parameters.values())).annotation
item_marshaller = self.marshaller_context.get_marshaller(item_type)
return DynamodbPostProcessEventHandler(action, item_marshaller)
def _has_trigger(action_: Action, trigger_type: Type):
result = next(
(True for t in action_.triggers if isinstance(t, trigger_type)), False
)
return result
| tofarr/persisty | persisty/trigger/dynamodb_post_process_event_handler.py | dynamodb_post_process_event_handler.py | py | 3,122 | python | en | code | 1 | github-code | 13 |
6609284409 | from training import get_data
import networkx as nx
from node2vec import Node2Vec
import numpy as np
import os
data_dict = get_data('ml-100k')
n_items = data_dict['n_items']
train_data = data_dict['train_data']
vad_data_tr = data_dict['vad_data_tr']
vad_data_te = data_dict['vad_data_te']
user_info = data_dict['user_info']
userId_map = data_dict['userId_map']
vad_userId_map = data_dict['vad_userId_map']
n2v_vectors = data_dict['n2v_vectors']
data_tr = train_data.astype(np.float32)
data_tr_dense = data_tr.todense()
my_graph1 = nx.Graph()
my_graph1.add_nodes_from(userId_map.values())
for i in range(data_tr_dense.shape[0]):
for j in range(i+1,data_tr_dense.shape[0]):
weightCK = np.inner(data_tr_dense[i],data_tr_dense[j]).item()
if weightCK >0:
my_graph1.add_edge(userId_map[i],userId_map[j],weight = weightCK)
node2vec1 = Node2Vec(graph=my_graph1, # target graph
dimensions=50, # embedding dimension
walk_length=10, # number of nodes in each walks
p = 1, # return hyper parameter
q = 0.0001, # inout parameter, q값을 작게 하면 structural equivalence를 강조하는 형태로 학습됩니다.
weight_key='weight', # if weight_key in attrdict
num_walks=10,
workers=2,
temp_folder = '/content/gdrive/My Drive/n2vtmp'
)
model1 = node2vec1.fit(window=2)
model1.wv.save(os.path.join('.','data','ml-100k','node2vec_model_vectors'))
user_info_cut = user_info[user_info['userId'].isin(userId_map.values())]
my_graph2 = nx.Graph()
my_graph2.add_nodes_from(userId_map.values())
for i in range(user_info_cut.values.shape[0]):
for j in range(i+1,user_info_cut.values.shape[0]):
weightCK = np.inner(user_info_cut.values[i],user_info_cut.values[j]).item()
if weightCK >0:
my_graph2.add_edge(userId_map[i],userId_map[j],weight = weightCK)
node2vec2 = Node2Vec(graph=my_graph2, # target graph
dimensions=50, # embedding dimension
walk_length=10, # number of nodes in each walks
p = 1, # return hyper parameter
q = 0.0001, # inout parameter, q값을 작게 하면 structural equivalence를 강조하는 형태로 학습됩니다.
weight_key='weight', # if weight_key in attrdict
num_walks=10,
workers=2,
temp_folder = '/content/gdrive/My Drive/n2vtmp'
)
model2 = node2vec2.fit(window=2)
model2.wv.save(os.path.join('.','data','ml-100k','node2vec_userInfo_vectors'))
| e406hsy/ConditionalRaCT | setup_side_data.py | setup_side_data.py | py | 2,796 | python | en | code | 0 | github-code | 13 |
39852694349 | #!/usr/bin/env python3
"""
celcius_conversion version 1.4
Python 3.7
"""
def to_fahrenheit():
"""Convert Celsius to Fahrenheit."""
degree_sign = "\N{DEGREE SIGN}"
try:
fahrenheit_convert = int(input("Enter temperature in Fahrenheit. "))
except ValueError:
print("Please enter a number.")
to_fahrenheit()
else:
conversion_formula = (fahrenheit_convert - 32) / 1.8
print(
f"{fahrenheit_convert}{degree_sign}F is {round(conversion_formula, 2)}{degree_sign}C."
)
if __name__ == "__main__":
to_fahrenheit()
| mcmxl22/Weather | celsius_conversion.py | celsius_conversion.py | py | 618 | python | en | code | 0 | github-code | 13 |
22195268040 | # Uses Python3
import time
import numpy as np
# The fastest O(1)
def fibonacci_formula(n):
phi = 0.5 * (np.sqrt(5) + 1)
psi = 1 - phi
#fn = round((phi**n - psi**n)/sqrt(5))
fn = int(np.round(phi ** n / np.sqrt(5)))
return fn
# Fastest O(n)
def fibonacci_iterative(n):
if n in [0,1]: return n
fn_1 = 1
fn_2 = 0
for n in range(2,n+1):
temp = fn_1 + fn_2
fn_2 = fn_1
fn_1 = temp
return fn_1
def fibonacci_memoized_recursion(n,fib_array):
if n in [0,1]: return
if fib_array[n] == 0:
if fib_array[n-1] == 0:
fibonacci_memoized_recursion(n-1,fib_array)
if fib_array[n-2] == 0:
fibonacci_memoized_recursion(n-2,fib_array)
fib_array[n] = fib_array[n-1] + fib_array[n-2]
return
def fibonacci_memoisation(n):
if n in [0,1]: return n
fib_array = [0]*(n+1)
fib_array[1] = 1
fibonacci_memoized_recursion(n,fib_array)
return fib_array[n]
def fibonacci_recursive(n):
if n in [0,1]:
return n
elif n > 1:
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
def main():
n = int(input())
assert n >= 0
start_time = time.time()
#fn = fibonacci_recursive(n)
fn = fibonacci_iterative(n)
#fn = fibonacci_memoisation(n)
#fn = fibonacci_formula(n)
end_time = time.time()
print(fn)
#print(end_time - start_time)
if __name__ == '__main__':
main()
| sandeeppalakkal/Algorithmic_Toolbox_UCSD_Coursera | Programming_Challenges_Solutions/week2_algorithmic_warmup/1_fibonacci_number/fibonacci.py | fibonacci.py | py | 1,466 | python | en | code | 0 | github-code | 13 |
27970074153 | import datetime
import logging
from .client import LumberjackClient
_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" # Logstash/Golang parsable
class LumberjackHandler(logging.Handler):
"""
Logging handler for pushing log events Logstash
"""
def __init__(self, host, port):
logging.Handler.__init__(self)
self._client = LumberjackClient(host, port)
self._client.connect()
def close(self):
self._client.close()
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
event = {
"@timestamp": datetime.datetime.utcfromtimestamp(record.created).strftime(_TIME_FORMAT),
"message": self.format(record),
"python.lineno": record.lineno,
"python.level": record.levelname,
"python.filename": record.filename,
"python.funcName": record.funcName
}
events = [event]
self._client.send(events)
| jackric/pylumberbeats | pylumberbeats/handlers.py | handlers.py | py | 1,089 | python | en | code | 0 | github-code | 13 |
19348427692 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 17:57:07 2017
@author: Julian
"""
from selenium import webdriver
import matplotlib.pyplot as plt
import re
from nltk.corpus import stopwords
import os
import pysentiment as ps
#from wordcloud import WordCloud
#path_direct = os.getcwd()
#os.chdir(path_direct + '/pyning')
p = "C:/Users/Julian/pyning"
os.chdir(p)
# Start Selenium
browser = webdriver.Firefox()
url = "http://teachingamericanhistory.org/library/document/what-to-the-slave-is-the-fourth-of-july/"
# Extract text
browser.get(url)
id = "doc-tabs-full"
text = browser.find_element_by_id(id).text
browser.close()
browser.stop_client()
# find and delete <...> combinations
# find and delete /...> combinations
# Takes only lists as input
# Returns list as output
"""
def SearchAndReplaceSeq(html, opensign, closesign):
openbool = False
nText = range(len(text))
for i in nText:
print("i = ", i)
print("outer loop")
if text[i] in opensign:
loc = opensign.index('<')
openbool = True
print("deleting ", text[i])
text[i] = "" # delete
while openbool:
print("inner loop")
if text[i] != closesign[loc]:
print(i, "deleting ", text[i])
text[i] = ""
i += 1
else:
print(i, "deleting ", text[i])
text[i] = ""
openbool = False
i += 1
#continue # switch to next mark, first one is always open
else:
print("keeping", text[i])
print("outer loop down")
continue #i += 1
return(text);
textout = SearchAndReplaceSeq(html = text, opensign = ['<', '/', '{'], closesign = ['>', '>', '}'])
s = "".join(textout)
"""
# Some expressions still left
# Differ between quotes!
expression = "[()]|(\“)|(\”)|(\“)|(\”)|(\,|\.|-|\;|\<|\>)|(\\n)|(\\t)|(\=)|(\|)|(\-)|(\')|(\’)"
cleantextCAP = re.sub(expression, '', text)
cleantext = cleantextCAP.lower()
# Count and create dictionary
dat = list(cleantext.split())
dict1 = {}
for i in range(len(dat)):
print(i)
word = dat[i]
dict1[word] = dat.count(word)
continue
# Filter Stopwords
keys = list(dict1)
filtered_words = [word for word in keys if word not in stopwords.words('english')]
dict2 = dict((k, dict1[k]) for k in filtered_words if k in filtered_words)
#keys in stopwords.words("english")
# Resort in list
# Reconvert to dictionary
def valueSelection(dictionary, length, startindex = 0): # length is length of highest consecutive value vector
# Test input
lengthDict = len(dictionary)
if length > lengthDict:
return print("length is longer than dictionary length");
else:
d = dictionary
items = [(v, k) for k, v in d.items()]
items.sort()
items.reverse()
itemsOut = [(k, v) for v, k in items]
highest = itemsOut[startindex:startindex + length]
dd = dict(highest)
wanted_keys = dd.keys()
dictshow = dict((k, d[k]) for k in wanted_keys if k in d)
return dictshow;
dictshow = valueSelection(dictionary = dict2, length = 7, startindex = 0)
# Save dictionaries for wordcloud
text_file = open("Output.txt", "w")
text_file.write(str(cleantext))
text_file.close()
# Plot
n = range(len(dictshow))
plt.bar(n, dictshow.values(), align='center')
plt.xticks(n, dictshow.keys())
plt.title("Most frequent Words")
plt.savefig("plot.png")
# Overview
overview = valueSelection(dictionary = dict2, length = 1000, startindex = 0)
nOverview = range(len(overview.keys()))
plt.bar(nOverview, overview.values(), color = "g", tick_label = "")
plt.title("Word Frequency Overview")
plt.xticks([])
plt.savefig("overview.png")
# Sentiment Analysis
hiv4 = ps.HIV4()
tokens = hiv4.tokenize(cleantext)
score = hiv4.get_score(tokens)
print(score)
# Polarity
# Formula: (Positive - Negative)/(Positive + Negative)
# Subjectivity
# Formula: (Positive + Negative)/N
"""
# Wordcloud
inputWordcloud = str(dict2.keys())
# Generate a word cloud image
wordcloud = WordCloud(path.join("symbola.ttf")).generate(inputWordcloud)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
wordcloud2 = WordCloud(max_font_size=40).generate(inputWordcloud)
plt.figure()
plt.imshow(wordcloud2, interpolation="bilinear")
plt.axis("off")
plt.show()
dict2.index("would")
dict2.get("would")
dict2.keys()
"""
| PQJHU/DEDA_2017 | DEDA_Projects/DEDA_WebScrapingAndWordFrequency/DEDA_WebScrapingAndWordFrequency.py | DEDA_WebScrapingAndWordFrequency.py | py | 4,967 | python | en | code | 3 | github-code | 13 |
33220060986 | # -*- encoding:utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
def update_position(num, data, plts):
x1, y1, n = data
theta = np.arange(2* num *np.pi,2* (num+1)*n*np.pi,np.pi/50)
x2=x1[num]+1/n * np.cos(theta * n)
y2=y1[num]+1/n * np.sin(theta * n)
circular, scatter = plts
x3, y3 =scatter.get_data()
x3, y3 =np.asarray(x3),np.asarray(y3)
x3, y3 =np.append(x3, [x2[-num]]),np.append(y3,[y2[-num]])
scatter.set_data(x3,y3)
return circular,scatter
fig=plt.figure(figsize=(8,8))
plt.ylim([-1.5,1.5])
plt.xlim([-1.5,1.5])
plt.grid(True)
ax=plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
n=3
theta = np.arange(0,2*np.pi, np.pi/50)
x0=(n+1)/n * np.cos(theta)
y0=(n+1)/n * np.sin(theta)
plt.plot(x0,y0)
x1= np.cos(theta)
y1= np.sin(theta)
plt.plot(x1,y1,'--')
data=[x1,y1,n]
ani = animation.FuncAnimation(fig,update_position,50,data,plt)
plt.show() | aarongis/pythondev | Python-learn/matplotlab/test-plot3.py | test-plot3.py | py | 1,184 | python | en | code | 0 | github-code | 13 |
8343887866 | fees = [180, 5000, 10, 600]
records = ["05:34 5961 IN", "06:00 0000 IN", "06:34 0000 OUT", "07:59 5961 OUT", "07:59 0148 IN", "18:59 0000 IN", "19:09 0148 OUT", "22:59 5961 IN", "23:00 5961 OUT"]
import math
parking = []
car = [[] for _ in range(10000)]
def solution(fees, records):
time = dict()
for i in range(len(records)):
rec = records[i].split(" ")
carN = int(rec[1])
if rec[2] == "IN":
car[carN].append(rec[0])
parking.append(carN)
else:
_in = car[carN].pop().split(":")
_out = rec[0].split(":")
check(carN, _in, _out, time)
leng = len(parking)
end = "23:59".split(":")
if leng > 0:
for i in range(leng):
start = car[parking[0]][0].split(":")
check(parking[0], start, end, time)
total = []
tim = dict(sorted(time.items()))
for key in tim.keys():
if tim[key] <= fees[0]:
pay = fees[1]
else:
pay = fees[1] + math.ceil((tim[key]-fees[0])/fees[2])*fees[3]
total.append(pay)
return total
def check(carN, _in, _out, time):
global parking
parking.remove(carN)
inH = int(_in[0])
inM = int(_in[1])
outH = int(_out[0])
outM = int(_out[1])
total = (outH - inH)*60 + (outM - inM)
if not carN in time:
time[carN] = total
else:
time[carN] += total
solution(fees, records) | rohujin97/Algorithm_Study | test/0911/PM/3.py | 3.py | py | 1,444 | python | en | code | 0 | github-code | 13 |
71083961939 | prices_list = [ # before or on 15th december / after 15th december
[24, 28.70], # Cake
[6.66, 9.80], # Souffle
[12.60, 16.98] # Baklava
] # prices are lv./pc
sweet_type = input()
n_sweets = int(input())
day_number = int(input()) # day in december
sweet_price = 0
if sweet_type == "Cake":
prices = prices_list[0]
elif sweet_type == "Souffle":
prices = prices_list[1]
elif sweet_type == "Baklava":
prices = prices_list[2]
if day_number > 15:
sweet_price = prices[1]
else:
sweet_price = prices[0]
total_cost = sweet_price * n_sweets
if day_number < 23:
if total_cost > 200:
total_cost *= .75
elif total_cost >= 100:
total_cost *= .85
if day_number < 16:
total_cost *= .9
print(f'{total_cost:.2f}')
| bobsan42/SoftUni-Learning-42 | ProgrammingBasics/17myexam/03pastryshop.py | 03pastryshop.py | py | 765 | python | en | code | 0 | github-code | 13 |
6948676324 | from typing import *
class Solution:
def maxProfit(self, prices: List[int]) -> int:
res=0
for i in range(1,len(prices)):
#只要第二天比第一天多,就加上,可以买卖无数次
if prices[i]>prices[i-1]:
res+=(prices[i]-prices[i-1])
return res
if __name__ == '__main__':
sol=Solution()
nums=[7,1,5,3,6,4]
print(sol.maxProfit(nums))
| Xiaoctw/LeetCode1_python | 数组/买卖股票的最佳时机2_122.py | 买卖股票的最佳时机2_122.py | py | 422 | python | en | code | 0 | github-code | 13 |
23265043575 | import argparse
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from datasets.register_coco import register_coco_dataset
from datasets.register_out_of_context import register_out_of_context_dataset
from tasks import task_a, task_b, task_c, task_d, task_e
def _parse_args() -> argparse.Namespace:
usage_message = """
Week3 - Challenges of Object Detection and Instance Segmentation.
"""
parser = argparse.ArgumentParser(usage=usage_message)
parser.add_argument("--mode", "-m", type=str, default="draw_dataset",
help="Mode (task_a, task_b, task_c, task_d, task_e)")
parser.add_argument("--seed", "-s", type=int, default=42,
help="Seed")
parser.add_argument("--output_dir", "-o", type=str, default="output",
help="Output directory")
# Model settings
parser.add_argument("--model", "-mo", type=str, default="mask_rcnn",
help="Model (mask_rcnn, faster_rcnn)")
parser.add_argument("--checkpoint", "-ch", type=str, default=None,
help="Model weights path")
# Dataset settings
parser.add_argument("--load_dataset", "-tr", type=str, default="coco_2017",
help="Load dataset")
# Other
parser.add_argument("--sequence", "-seq", type=str, default="0000",
help="Sequence to draw in draw_sequence mode")
return parser.parse_args()
def get_base_cfg(args):
cfg = get_cfg()
if args.model == "mask_rcnn":
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"))
elif args.model == "faster_rcnn":
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"))
else:
raise ValueError("Unknown model.")
cfg.DATALOADER.NUM_WORKERS = 0
if args.checkpoint is None:
if args.model == "mask_rcnn":
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
elif args.model == "faster_rcnn":
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
else:
cfg.MODEL.WEIGHTS = args.checkpoint
cfg.MODEL.DEVICE = "cuda"
cfg.OUTPUT_DIR = args.output_dir
return cfg
def main(args: argparse.Namespace):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cfg = get_base_cfg(args)
if args.load_dataset == "coco":
register_coco_dataset(cfg)
elif args.load_dataset == "out_of_context":
register_out_of_context_dataset(cfg)
if args.mode == "task_a":
task_a.run(cfg, args)
elif args.mode == "task_b":
task_b.run(cfg, args)
elif args.mode == "task_c":
task_c.run(cfg, args)
elif args.mode == "task_d":
task_d.run(cfg, args)
elif args.mode == "task_e":
task_e.run(cfg, args)
if __name__ == "__main__":
args = _parse_args()
main(args)
| Atenrev/M5-Visual-Recognition | week3/main.py | main.py | py | 3,112 | python | en | code | 0 | github-code | 13 |
559886064 | import pygame
from constants import *
from utils import *
from stack import Stack
from card import Card
from random import shuffle
from assetloader import AssetLoader
from difficulty import Difficulty
class Board:
instance: "Board" = None
board_top = None
board_main = None
def quality(self):
if self.cached_quality is None:
q = self.depth
q -= 2 * sum([not ((not len(stack.cards)) or stack.complete) for stack in self.top_stacks])
q += 3 * sum([((not len(stack.cards)) or stack.complete) for stack in self.stacks])
q += 3 * sum([stack.complete for stack in self.top_stacks])
q += 15 * sum([stack.complete for stack in self.bottom_stacks])
uniq = 0
for stack in self.stacks:
lastn = -1
for card in stack.cards:
if lastn != card.number:
lastn = card.number
uniq += 1
q -= uniq
self.cached_quality = q
return self.cached_quality
def __repr__(self):
return f"<Board(stacks={self.stacks})>"
def __init__(self):
self.y = 83
self.derived = None
self.cached_quality = None
self.depth = 0
if Board.board_top is None:
Board.board_top = load_image("backboard-top.png")
if Board.board_main is None:
Board.board_main = load_image("backboard-main.png")
self.top_stacks: list[Stack] = [
Stack(302, 109, [], True, True),
Stack(430, 109, [], True, True),
Stack(430+128, 109, [], True, True),
Stack(430+128+128, 109, [], True, True)
]
self.bottom_stacks = [
Stack(46, 223+83, []),
Stack(174, 223+83, []),
Stack(128*1+174, 223+83, []),
Stack(128*2+174, 223+83, []),
Stack(128*3+174, 223+83, []),
Stack(128*4+174, 223+83, []),
Stack(128*5+174, 223+83, []),
Stack(128*6+174, 223+83, [])
]
self.stacks = []
self.stacks.extend(self.bottom_stacks)
self.stacks.extend(self.top_stacks)
Board.instance = self
def set_difficulty(self, difficulty: Difficulty):
for t in self.top_stacks:
t.locked = True
# noinspection PyTypeChecker
for i in range(difficulty.value):
self.top_stacks[i].locked = False
def copy(self):
b = Board()
b.top_stacks = [ts.copy() for ts in self.top_stacks]
b.bottom_stacks = [bs.copy() for bs in self.bottom_stacks]
b.stacks = []
b.stacks.extend(b.bottom_stacks)
b.stacks.extend(b.top_stacks)
b.derived = self.derived
return b
def randomize_game(self):
for stack in self.stacks:
stack.cards = []
new_cards = []
AssetLoader.play_sound(sound=AssetLoader.deal_sound, volume=1)
groups = list(range(1, 11))
anims = [_*0.426 for _ in list(range(40))]
shuffle(anims)
for n in groups:
for _ in range(4):
i = anims[0]
new_cards.append(Card(0, 0, n, -i))
anims.pop(0)
shuffle(new_cards)
for stack in self.bottom_stacks:
for _ in range(5):
if not len(new_cards):
continue
stack.cards.append(new_cards[0])
new_cards = new_cards[1:]
def hash(self) -> str:
return sha256("".join([stack.hash() for stack in self.stacks]).encode("utf-8")).hexdigest()
def unlock_stack(self):
for top in self.top_stacks:
if top.locked:
top.locked = False
return
def draw(self, screen: pygame.Surface):
screen.blit(Board.board_top, (0, self.y))
screen.blit(Board.board_main, (0, self.y+202))
for stack in self.stacks:
stack.draw(screen)
cards = sorted([a for b in self.stacks for a in b], key=lambda _: _.grabbed*3000+_.y)
for card in cards:
card.draw(screen)
Board.instance = self
def handle_event(self, event: pygame.event.Event):
def complete_set(cs_: list[Card], m=0): return len(cs_) == 4-m and list(set(map(lambda card23: card23.number, cs_))).__len__() == 1
animing_rn = any([any([card.anim != 1 for card in stack]) for stack in self.stacks])
if animing_rn:
return
for stack in self.stacks:
revstack = list(stack.__reversed__())
for index, card in enumerate(revstack):
# grab card
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if card.rect.collidepoint(mp()):
# check if all cards above it are same number
if any([check2 for check2 in revstack[:index] if check2.number != card.number]):
return
for also in revstack[:index]:
also.grab()
card.grab()
AssetLoader.play_sound(AssetLoader.pickup_sound, 0.6)
return
# let go of card
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
if card.grabbed:
card.grabbed = False
# check if card can go ontop of other stack
overlapping_stacks = [check for check in self.stacks
if check.top_rect.collidepoint(mp())]
if len(overlapping_stacks):
check = overlapping_stacks[0]
# stack has no cards or stack top has similar number
if (not len(check.cards)) or check.cards[len(check.cards)-1].number == card.number:
# top row handling
selected = [card for card in stack if card.grabbed]
if check.show_free: # is top row
if len(check.cards): # cards already exist there
for select in selected:
select.grabbed = False
continue
if len(selected): # multiple cards dropped
if not complete_set(selected, m=1): # it wasn't a set of 4
for select in selected:
select.grabbed = False
continue
if check.locked:
for select in selected:
select.grabbed = False
continue
stack.remove_cards([card])
stack.remove_cards(selected)
check.add_cards([card])
check.add_cards(selected)
AssetLoader.play_sound(AssetLoader.place_sound, 0.6)
for select in selected:
select.grabbed = False
# if stack was completed and is in bottom row then unlock
if check.complete:
if check not in self.top_stacks:
self.unlock_stack()
| quasar098/kabufuda-solitaire | board.py | board.py | py | 7,992 | python | en | code | 2 | github-code | 13 |
20500016597 | '''
Quiz) 표준 체중을 구하는 프로그램을 작성하시오
*표준 체중 : 각 개인의 키에 적당한 체중
(성별에 따른 공식)
남자 : 키(m)^2 X 22
여자 : 키(m)^2 X 21
조건1 : 표준 체중은 별도의 함수 내에서 계산
*함수명 : std_weight
*전달값 : 키(height), 성별(gender)
조건2 : 표준 체중은 소수점 둘째자리까지 표시
(출력 예제)
키 175cm 남자의 표준 체중은 67.38kg 입니다.
'''
# 현재 BMI 지수 출력하기
# 남자와 여자 표준체중 구할 때, 각 22,21에서 23,22로 수정
def std_weight(height:int,gender:int):
height *= 0.01
if gender == 3:
return '남자의 표준 체중은 %.2fkg 입니다.'%(height**2 * 23)
else:
return '여자의 표준 체중은 %.2fkg 입니다.'%(height**2 * 22)
height,gender,weight = map(int, input('키와 성별, 체중을 입력해주세요 (남자면 3, 여자면 4 / 예. 175 3 67) : ').split())
print('키 {0}cm '.format(height) + std_weight(height,gender) )
bmi=weight/(height*0.01)**2
print('현재 체중의 BMI지수는 {:.3f}입니다.'.format(bmi)) | PKTOSE/2022_1PG | HW3/source4.py | source4.py | py | 1,163 | python | ko | code | 0 | github-code | 13 |
21553939869 | from typing import List
from ariadne import QueryType
from resolvers.directives import AuthDirective
from classes import Error
QUERY = QueryType()
@QUERY.field("checkMacaroon")
async def r_macaroon_check(_, info, caveats: List[str]):
def extract_macaroon(info):
auth = info.context["request"].headers["Authorization"]
if not auth:
return None
return auth.replace("Bearer ", "")
directive = AuthDirective("authchecker", {"caveats": caveats}, None, None, None)
directive.get_macaroon = extract_macaroon
res = directive.check_auth(info)
if isinstance(res, Error):
return res
return None
| FeatherLightApp/FeatherLight-API | server/featherlight/resolvers/query/check_macaroon.py | check_macaroon.py | py | 656 | python | en | code | 3 | github-code | 13 |
16755991395 | """Brauer states."""
import numpy as np
from toqito.matrix_ops import tensor
from toqito.perms import perfect_matchings, permute_systems
from toqito.states import max_entangled
def brauer(dim: int, p_val: int) -> np.ndarray:
r"""
Produce all Brauer states [WikBrauer]_.
Produce a matrix whose columns are all of the (unnormalized) "Brauer" states: states that are
the :code:`p_val`-fold tensor product of the standard maximally-entangled pure state on
:code:`dim` local dimensions. There are many such states, since there are many different ways to
group the :code:`2 * p_val` parties into :code:`p_val` pairs (with each pair corresponding to
one maximally-entangled state).
The exact number of such states is:
>>> import numpy as np
>>> np.factorial(2 * p_val) / (np.factorial(p_val) * 2**p_val)
which is the number of columns of the returned matrix.
This function has been adapted from QETLAB.
Examples
==========
Generate a matrix whose columns are all Brauer states on 4 qubits.
>>> from toqito.states import brauer
>>> brauer(2, 2)
[[1. 1. 1.]
[0. 0. 0.]
[0. 0. 0.]
[1. 0. 0.]
[0. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]
[0. 0. 0.]
[0. 0. 0.]
[0. 0. 1.]
[0. 1. 0.]
[0. 0. 0.]
[1. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]
[1. 1. 1.]]
References
==========
.. [WikBrauer] Wikipedia: Brauer algebra
https://en.wikipedia.org/wiki/Brauer_algebra
:param dim: Dimension of each local subsystem
:param p_val: Half of the number of parties (i.e., the state that this function computes will
live in :math:`(\mathbb{C}^D)^{\otimes 2 P})`
:return: Matrix whose columns are all of the unnormalized Brauer states.
"""
# The Brauer states are computed from perfect matchings of the complete graph. So compute all
# perfect matchings first.
phi = tensor(max_entangled(dim, False, False), p_val)
matchings = perfect_matchings(2 * p_val)
num_matchings = matchings.shape[0]
state = np.zeros((dim ** (2 * p_val), num_matchings))
# Turn these perfect matchings into the corresponding states.
for i in range(num_matchings):
state[:, i] = permute_systems(
phi, matchings[i, :], dim * np.ones((1, 2 * p_val), dtype=int)[0]
)
return state
| vprusso/toqito | toqito/states/brauer.py | brauer.py | py | 2,375 | python | en | code | 118 | github-code | 13 |
72087528978 | from typing import Callable
import jax
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
from newton_smoothers.base import MVNStandard, FunctionalModel
from newton_smoothers.batch.utils import (
log_posterior_cost,
residual_vector,
block_diag_matrix,
line_search_update,
)
def _gauss_newton_step(x: jnp.ndarray, residual: Callable, weights: jnp.ndarray):
r = residual(x)
J = jax.jacobian(residual)(x)
W = weights
dx = -jnp.linalg.solve(J.T @ W @ J, jnp.dot(J.T @ W, r))
return dx
def _line_search_gauss_newton(
x0: jnp.ndarray, fun: Callable, residual: Callable, weights: jnp.ndarray, k: int
):
def body(carry, _):
x = carry
dx = _gauss_newton_step(x, residual, weights)
xn = line_search_update(x, dx, fun)
return xn, fun(xn)
xn, fn = jax.lax.scan(body, x0, jnp.arange(k))
return xn, fn
def line_search_iterated_batch_gauss_newton_smoother(
init_nominal: jnp.ndarray,
observations: jnp.ndarray,
init_dist: MVNStandard,
transition_model: FunctionalModel,
observation_model: FunctionalModel,
nb_iter: int = 10,
):
flat_init_nominal, _unflatten = ravel_pytree(init_nominal)
def _flat_log_posterior_cost(flat_state):
_state = _unflatten(flat_state)
return log_posterior_cost(
_state,
observations,
init_dist,
transition_model,
observation_model,
)
def _flat_residual_vector(flat_state):
_state = _unflatten(flat_state)
return residual_vector(
_state,
observations,
init_dist,
transition_model,
observation_model,
)
weight_matrix = block_diag_matrix(
init_nominal,
observations,
init_dist,
transition_model,
observation_model,
)
init_cost = _flat_log_posterior_cost(flat_init_nominal)
flat_nominal, costs = _line_search_gauss_newton(
x0=flat_init_nominal,
fun=_flat_log_posterior_cost,
residual=_flat_residual_vector,
weights=weight_matrix,
k=nb_iter,
)
return _unflatten(flat_nominal), jnp.hstack((init_cost, costs))
| hanyas/second-order-smoothers | newton_smoothers/batch/ls_gauss_newton.py | ls_gauss_newton.py | py | 2,237 | python | en | code | 3 | github-code | 13 |
10136003424 | # encoding: utf-8
from PIL import ImageGrab
import os
import time
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
def screenGrab():
'''截屏保存为jpg文件'''
im = ImageGrab.grab()
filename = 'Screenshot_' + time.strftime('%Y%m%d%H%M') + '.jpg'
im.save(filename, 'JPEG')
return filename
def sendMail(filename):
'''发送邮件到指定邮箱'''
msg = MIMEMultipart()
msg['Subject'] = filename
msg['From'] = 'MAILADDRESS'
msg['To'] = 'MAILADDRESS'
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(filename, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename))
msg.attach(part)
smail = smtplib.SMTP('smtp.163.com')
smail.login('MAILADDRESS', 'PASSWORD')
smail.sendmail('MAILADDRESS', ['MAILADDRESS'], msg.as_string())
smail.quit()
def main():
filename = screenGrab()
sendMail(filename)
if __name__ == '__main__':
main() | linys1987/mycode | src/screengrab/screengrab.py | screengrab.py | py | 1,145 | python | en | code | 0 | github-code | 13 |
25713274252 | import numpy as np
class DepolarizationInducedSuppressionOfExcitation_DSE:
def __init__(self, num_neurons, initial_connectivity=None, excitatory_strength=0.5, depolarization_threshold=0.7, suppression_factor=0.5, min_strength=0, max_strength=1):
self.num_neurons = num_neurons
self.excitatory_strength = excitatory_strength
self.depolarization_threshold = depolarization_threshold
self.suppression_factor = suppression_factor
if initial_connectivity is None:
self.dse_connectivity = np.random.uniform(min_strength, max_strength, (self.num_neurons, self.num_neurons))
else:
self.dse_connectivity = initial_connectivity
def compute_excitation(self, pre_synaptic_activity, post_synaptic_activity):
excitation = np.outer(pre_synaptic_activity, post_synaptic_activity)
excitation *= self.dse_connectivity
return excitation
def suppress_excitation(self, activity_levels):
suppressed = np.where(activity_levels > self.depolarization_threshold, self.suppression_factor * activity_levels, activity_levels)
return suppressed
def apply_dse(self, activity_levels):
# Calculate the total excitation for each neuron pair
total_excitation = np.zeros((self.num_neurons, self.num_neurons))
for i in range(self.num_neurons):
for j in range(self.num_neurons):
if i != j:
pre_synaptic_activity = activity_levels[i]
post_synaptic_activity = activity_levels[j]
total_excitation[i, j] = self.compute_excitation(pre_synaptic_activity, post_synaptic_activity)
# Apply depolarization-induced suppression of excitation
suppressed_activity = self.suppress_excitation(activity_levels)
# Modify the activity levels based on DSE
activity_levels -= total_excitation * suppressed_activity[:, np.newaxis]
# Ensure the activity levels remain in the valid range [0, 1]
activity_levels = np.clip(activity_levels, 0, 1)
return activity_levels
| RickysChocolateBox/artificial_brain | Neural Networks/Base Neuron Classes/Parent Neuron template Class/Inhibitory Synapse Functions/DepolarizationInducedSuppressionOfExcitation_DSE.py | DepolarizationInducedSuppressionOfExcitation_DSE.py | py | 2,111 | python | en | code | 0 | github-code | 13 |
16048443192 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from scipy.io.wavfile import read
from deepspeech import Model
from django.conf import settings
from django.http import JsonResponse
import pandas as pd
import json
import io
import os
# paths
BASE_DIR = settings.BASE_DIR
DATA_DIR = settings.DATA_DIR
INFO_DIR = settings.INFO_DIR
# DeepSpeech Model
DSQ0 = Model(f'{BASE_DIR}/deepspeech/model_1006211730-1024-taufik-E.pb')
DSQ = Model(f'{BASE_DIR}/deepspeech/model_1006211730-1024-taufik-E.pb')
DSQ.enableExternalScorer(f'{BASE_DIR}/deepspeech/quran.scorer')
# Text Quran Utsmani
df = pd.read_csv(f'{DATA_DIR}/quran_utsmani_no_basmalah.csv')
quran_dict = {f'{i[0]}_{i[1]}': i[2].split(' ') for i in df.values}
# List bacaan Non-Quran
# with open(f'{INFO_DIR}/list_baca.json', 'r') as f:
# data = f.read()
f = open(f'{INFO_DIR}/list_baca.json', encoding="cp437")
data = f.read()
baca_arab = json.loads(data)
# search Ayat from quran_dict
def find_ayat(lookup):
max_score = 0
all_score = {}
for key, target in quran_dict.items():
# score = 0
# for i in lookup:
# score += i in target
match_words = []
try:
ref = target.index(lookup[0])
except:
continue
for idx, x in enumerate(target):
for idy, y in enumerate(lookup):
if (x==y) & (idx-ref == idy):
match_words.append(x)
score = len(match_words)
match = score/len(target)
all_score.update({key: (score, match)})
if score > max_score:
max_score = score
if max_score > 0:
result = { k:v for k, v in all_score.items() if v[0] == max_score }
else:
result = {}
return result
def cari(request):
data = {
'segment': 'cari',
}
if request.method == 'POST':
# audio recognition
sr, signal = read(request.FILES['file'].file)
prediction = DSQ.stt(signal)
result = find_ayat(prediction.split(' '))
# sorting based on similarity (high --> low)
result = dict(sorted(result.items(), key=lambda item: item[1], reverse=True))
data = {
'result': json.dumps(result),
'prediction': prediction,
'segment': 'cari',
}
return HttpResponse(json.dumps(data), content_type='application/json')
return render(request, 'cari.html', data)
def hafalan(request):
data = {
'segment': 'hafalan',
}
if request.method == 'POST':
# audio recognition
sr, signal = read(request.FILES['file'].file)
prediction = DSQ.stt(signal).split(' ')
data = {
'prediction': prediction,
'segment': 'hafalan',
}
return HttpResponse(json.dumps(data), content_type='application/json')
return render(request, 'hafalan.html', data)
def bacaan(request):
data = {
'segment': 'bacaan',
}
if request.method == 'POST':
# audio recognition
sr, signal = read(request.FILES['file'].file)
prediction = DSQ0.stt(signal).split(' ')
data = {
'prediction': prediction,
'segment': 'bacaan',
}
return HttpResponse(json.dumps(data), content_type='application/json')
return render(request, 'bacaan_non-quran.html', data)
def metadata_bacaNonQuran(request):
return JsonResponse(baca_arab) | taufik-adinugraha/ai-quran | ayat_recog/views.py | views.py | py | 3,476 | python | en | code | 3 | github-code | 13 |
12343818235 | # program for finding maximum number of characters between two same chars
# IDEA: logic is to maintain a temporary array (i don't know why i always name it as 'count') , and then initialize it as -1 so that we know we haven't seen thay char yet and the index are based on the ASCII values. Now we traverse and we check if the char is encountered for the first time, then we set its index and if it has already been encountered, then we simply update the result of difference between current index and last index of that particular char and update our max if required.
from sys import stdin, stdout
def max_char(s):
max_char = ord(s[0]) # ord is used to get ASCII value as temp array is based on this ascii values
# find max value of all ASCii values
for i in s:
if max_char < ord(i):
max_char = ord(i)
# now inialize the array so that we get a array of size which includes the maxiumim element till then
count = [-1 for _ in range(max_char)]
# storing our max no in this variable - result_max
result_max = count[0]
for i in range(len(s)):
# this will be -1 if not seen , otherwise anything but -1 if already been seen.
first_idx = count[ord(s[i]) - 1]
# not been seen, set its first index
if first_idx == -1:
count[ord(s[i]) - 1] = i
else:
# already seen, so simply check if we need to update our max and do if required
result_max = max(result_max, i - first_idx - 1)
return result_max
if __name__ == '__main__':
s = 'abbabbbckjkjka' # 12
stdout.write(str(max_char(s)))
| souravs17031999/100dayscodingchallenge | arrays/maximum_character_between_two_same_chars.py | maximum_character_between_two_same_chars.py | py | 1,609 | python | en | code | 43 | github-code | 13 |
1885490974 | from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, f1_score
from joblib import dump, load
from pathlib import Path
import os
class Model:
status_completed = 'Completed'
status_pending = 'Pending'
status_training_model = 'Training - Fitting Model'
status_training_scores = 'Training - Calculating Training Scores'
status_validating = 'Validating Test Set'
status_error = 'Errored'
status_restored = 'Completed - Restored from Cache'
def __init__(self, classifier,
model_type,
dataset,
cache_file_path):
self.classifier = classifier
self.model_type = model_type
self.cache_file_path = cache_file_path
# internal state properties
self.status = Model.status_pending
self.x_train = dataset["x_train"]
self.y_train = dataset["y_train"]
self.x_test = dataset["x_test"]
self.y_test = dataset["y_test"]
self.test_accuracy = None
self.test_f1_score = None
self.train_accuracy = None
self.train_f1_score = None
self.error = None
self.restored = False
def train(self):
try:
self.train_model()
self.calculate_train_scores()
self.validate_model()
if self.restored:
self.status = Model.status_restored
else:
self.status = Model.status_completed
except Exception as e:
self.status = Model.status_error
self.error = e
def retrain(self):
# check if the model file exists, then delete it
self.delete_cached_model_if_exists()
self.status = Model.status_pending
self.restored = False
self.train()
def delete_cached_model_if_exists(self):
exists = os.path.isfile(self.cache_file_path)
if exists:
os.remove(self.cache_file_path)
def load_cached_model_if_exists(self):
try:
cached_model = load(self.cache_file_path)
self.trained_model = cached_model
self.restored = True
except FileNotFoundError:
self.status = Model.status_pending
def cache_model(self):
Path(self.cache_file_path).parent.mkdir(parents=True,exist_ok=True)
dump(self.trained_model, self.cache_file_path)
def train_model(self):
self.load_cached_model_if_exists()
if not self.restored:
self.status = Model.status_training_model
self.classifier.fit(self.x_train, self.y_train)
self.trained_model = self.classifier
self.cache_model()
def calculate_train_scores(self):
self.status = Model.status_training_scores
y_pred = self.trained_model.predict(self.x_train)
self.train_accuracy = accuracy_score(self.y_train, y_pred)
self.train_f1_score = f1_score(self.y_train, y_pred)
def validate_model(self):
self.status = Model.status_validating
y_pred = self.trained_model.predict(self.x_test)
self.test_accuracy = accuracy_score(self.y_test, y_pred)
self.test_f1_score = f1_score(self.y_test, y_pred) | shinmyung0/netsec-crying-crypto | mlcode/src/model.py | model.py | py | 2,966 | python | en | code | 0 | github-code | 13 |
21616796994 | with open('demo.txt', mode='w') as f:
# f.write('Add this content!\n')
# file_content = f. readlines()
# f.close()
#user_input = input('Please enter input: ')
# print(file_content)
# for line in file_content:
# print(line[:-1])
# line = f.readline()
# while line:
# print(line)
# line = f.readline()
f.write('Testing if this closes...')
# f.close()
user_input = input('Testing: ')
print('Done!') | javendano585/PyBlockchain | files.py | files.py | py | 458 | python | en | code | 0 | github-code | 13 |
9248766207 | import asyncio
async def hello_world():
print("hello world!")
await asyncio.sleep(1)
return 1
async def hello_python():
print("hello Python!")
await asyncio.sleep(2)
return 2
event_loop = asyncio.get_event_loop()
try:
result = event_loop.run_until_complete(asyncio.gather(
hello_python(),
hello_world(),
))
print(result)
finally:
event_loop.close() | fuadaghazada/scaling-python | event-loops/sleep_and_gather.py | sleep_and_gather.py | py | 407 | python | en | code | 0 | github-code | 13 |
29041474060 | def heapsort(lst):
for start in range((len(lst)-2)/2, -1, -1):
siftdown(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown(lst, 0, end - 1)
return lst
def siftdown(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end: break
if child + 1 <= end and lst[child] < lst[child + 1]:
child += 1
if lst[root] < lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def heapify(array):
size=len(array)
for root in range((size//2)-1,-1,-1):
root_val = array[root]
child = 2*root+1
while(child<size):
if child<size-1 and array[child]>array[child+1]:
child+=1
if root_val<=array[child]: # compare against saved root value
break
array[(child-1)//2]=array[child] # find child's parent's index correctly
child=2*child+1
array[(child-1)//2]=root_val # here too, and assign saved root value
return array
| javon27/sorts_analysis | heap.py | heap.py | py | 1,163 | python | en | code | 0 | github-code | 13 |
72922406097 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
unit tests for the snp-pileup-wrapper.cwl file
"""
import os
import sys
import unittest
from pluto import (
CWLFile,
PlutoTestCase,
OFile
)
class TestConcatWithCommentsCWL(PlutoTestCase):
cwl_file = CWLFile('concat_with_comments.cwl')
def test_concat_0(self):
"""
Test concat when no comments are present in the original file
"""
# make a dummy file with some lines
input_lines = ["HEADER", "foo", "bar", "baz"]
input_file = os.path.join(self.tmpdir, "input.txt")
with open(input_file, "w") as fout:
for line in input_lines:
fout.write(line + '\n')
self.input = {
"input_files": [{
"class": "File",
"path": input_file
}],
"comment_label": "comment_label",
"comment_value": "comment_value"
}
output_json, output_dir = self.run_cwl()
expected_output = {
'output_file': OFile(name='output.txt', size=49, hash='7cef8f6de47289a55de99de77563beb3fa371deb', dir = output_dir)
}
self.assertCWLDictEqual(output_json, expected_output)
# check the contents of the concatenated file; should be the same as the input
self.assertFileLinesEqual(
expected_output['output_file']['path'],
['#comment_label: comment_value', "HEADER", 'foo', 'bar', 'baz'])
def test_concat1(self):
"""
Test concat when original file has a comment line
"""
# make a dummy file with some lines
input_lines = ["# comment here", "HEADER", "foo", "bar", "baz"]
input_file = os.path.join(self.tmpdir, "input.txt")
with open(input_file, "w") as fout:
for line in input_lines:
fout.write(line + '\n')
self.input = {
"input_files": [{
"class": "File",
"path": input_file
}],
"comment_label": "comment_label",
"comment_value": "comment_value"
}
output_json, output_dir = self.run_cwl()
expected_output = {
'output_file': OFile(name='output.txt', size=64, hash='14ee1247f314dba1e3c28aa8aec9ff7b137a1f41', dir = output_dir)
}
self.assertCWLDictEqual(output_json, expected_output)
# check the contents of the concatenated file; should be the same as the input
self.assertFileLinesEqual(
expected_output['output_file']['path'],
['# comment here', '#comment_label: comment_value', "HEADER", 'foo', 'bar', 'baz'])
def test_concat2(self):
"""
Test concat when multiple files have comments
"""
# make a dummy file with some lines
input_lines1 = ["# comment 1 here", "HEADER", "foo1", "bar1"]
input_file1 = os.path.join(self.tmpdir, "input1.txt")
with open(input_file1, "w") as fout:
for line in input_lines1:
fout.write(line + '\n')
input_lines2 = ["# comment 2 here", "HEADER", "foo2", "bar2"]
input_file2 = os.path.join(self.tmpdir, "input2.txt")
with open(input_file2, "w") as fout:
for line in input_lines2:
fout.write(line + '\n')
self.input = {
"input_files": [
{
"class": "File",
"path": input_file1
},
{
"class": "File",
"path": input_file2
},
],
"comment_label": "comment_label",
"comment_value": "comment_value"
}
output_json, output_dir = self.run_cwl()
expected_output = {
'output_file': OFile(name='output.txt', size=91, hash='5dbce16f9bfef135d6b8288b16350351a33998f3', dir = output_dir)
}
self.assertCWLDictEqual(output_json, expected_output)
self.assertFileLinesEqual(
expected_output['output_file']['path'],
[
'# comment 1 here',
'# comment 2 here',
'#comment_label: comment_value',
"HEADER",
'foo1',
'bar1',
'foo2',
'bar2'
])
| mskcc/pluto-cwl | tests/test_concat_with_comments_cwl.py | test_concat_with_comments_cwl.py | py | 4,419 | python | en | code | 1 | github-code | 13 |
71076140497 | import os
import sys
import errno
import numpy as np
import shutil
import os.path as osp
import matplotlib.pyplot as plt
import scipy.io as sio
import torch
def mkdir_if_missing(directory):
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class AverageMeter(object):
"""Computes and stores the average and current value.
Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))
class Logger(object):
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def plot_features(dataset_name, features, labels, num_classes, epoch, prefix, save_dir):
"""Plot features on 2D plane.
Args:
features: (num_instances, num_features).
labels: (num_instances).
"""
if dataset_name == "UP":
colors = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for label_idx in range(num_classes):
plt.scatter(
features[labels == label_idx, 0],
features[labels == label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='upper right')
if dataset_name == "SA":
colors = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9',
'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16']
for label_idx in range(num_classes):
plt.scatter(
features[labels == label_idx, 0],
features[labels == label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16'], loc='upper right')
if dataset_name == "KSC":
colors = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9',
'C10', 'C11', 'C12', 'C13']
for label_idx in range(num_classes):
plt.scatter(
features[labels == label_idx, 0],
features[labels == label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13'], loc='upper right')
dirname = osp.join(save_dir, prefix)
if not osp.exists(dirname):
os.mkdir(dirname)
save_name = osp.join(dirname, 'epoch_' + str(epoch + 1) + dataset_name + '.png')
plt.savefig(save_name, bbox_inches='tight')
plt.close()
class classification_map(object):
def __init__(self, model, all_loader, use_gpu, dataset_dict, dataset_name, save_directory,
model_name, prefix="Maps", dpi=300):
self.model = model
self.allloader = all_loader
self.use_gpu = use_gpu
self.dataset_dict = dataset_dict
self.dataset_name = dataset_name
self.save_directory = save_directory
self.prefix = prefix
self.dpi = dpi
self.model_name = model_name
def generate_map(self):
dataset_width = self.dataset_dict.dataset_width
dataset_height = self.dataset_dict.dataset_height
gt = self.dataset_dict.gt.flatten()
total_indices = self.dataset_dict.total_indices
self.model.eval()
# Predict labels
with torch.no_grad():
y_hat = []
for data, labels in self.allloader:
if self.use_gpu:
data, labels = data.cuda(), labels.cuda()
y_hat.extend(self.model(data).cpu().argmax(axis=1).detach().numpy())
pred_labels = np.zeros(gt.shape)
pred_labels[total_indices] = y_hat
pred_labels = np.ravel(pred_labels)
y_pred_list = self.list_color(pred_labels)
y_pred_matrix = np.reshape(y_pred_list, (dataset_height, dataset_width, 3))
# Generate Image
fig = plt.figure(frameon=False)
fig.set_size_inches(dataset_height * 2.0 / self.dpi, dataset_width * 2.0 / self.dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.add_axes(ax)
dirname = osp.join(self.save_directory, self.prefix)
if not osp.exists(dirname):
os.mkdir(dirname)
save_name = osp.join(dirname, self.dataset_name + "_" + self.model_name + '.png')
ax.imshow(y_pred_matrix)
plt.savefig(save_name, dpi=self.dpi)
plt.close()
def list_color(self, y_hat):
y = np.zeros((y_hat.shape[0], 3))
if self.dataset_name == "SA":
for index, item in enumerate(y_hat):
if item == 0:
y[index] = np.array([140, 0, 191]) / 255.
if item == 1:
y[index] = np.array([41, 177, 137]) / 255.
if item == 2:
y[index] = np.array([0, 64, 255]) / 255.
if item == 3:
y[index] = np.array([0, 128, 255]) / 255.
if item == 4:
y[index] = np.array([0, 191, 255]) / 255.
if item == 5:
y[index] = np.array([0, 255, 255]) / 255.
if item == 6:
y[index] = np.array([64, 255, 191]) / 255.
if item == 7:
y[index] = np.array([128, 255, 128]) / 255.
if item == 8:
y[index] = np.array([191, 255, 64]) / 255.
if item == 9:
y[index] = np.array([255, 255, 0]) / 255.
if item == 10:
y[index] = np.array([255, 191, 0]) / 255.
if item == 11:
y[index] = np.array([255, 128, 0]) / 255.
if item == 12:
y[index] = np.array([255, 64, 0]) / 255.
if item == 13:
y[index] = np.array([255, 0, 0]) / 255.
if item == 14:
y[index] = np.array([191, 0, 0]) / 255.
if item == 15:
y[index] = np.array([128, 0, 0]) / 255.
if self.dataset_name == "KSC":
for index, item in enumerate(y_hat):
if item == 0:
y[index] = np.array([255, 128, 128]) / 255.
if item == 1:
y[index] = np.array([255, 90, 1]) / 255.
if item == 2:
y[index] = np.array([255, 2, 251]) / 255.
if item == 3:
y[index] = np.array([193, 12, 190]) / 255.
if item == 4:
y[index] = np.array([139, 68, 46]) / 255.
if item == 5:
y[index] = np.array([172, 175, 84]) / 255.
if item == 6:
y[index] = np.array([255, 220, 220]) / 255.
if item == 7:
y[index] = np.array([145, 142, 142]) / 255.
if item == 8:
y[index] = np.array([242, 240, 104]) / 255.
if item == 9:
y[index] = np.array([255, 128, 81]) / 255.
if item == 10:
y[index] = np.array([128, 128, 255]) / 255.
if item == 11:
y[index] = np.array([71, 71, 9]) / 255.
if item == 12:
y[index] = np.array([2, 177, 255]) / 255.
if self.dataset_name == "UP":
for index, item in enumerate(y_hat):
if item == 0:
y[index] = np.array([255, 16, 35]) / 255.
if item == 1:
y[index] = np.array([38, 214, 42]) / 255.
if item == 2:
y[index] = np.array([14, 185, 228]) / 255.
if item == 3:
y[index] = np.array([226, 218, 137]) / 255.
if item == 4:
y[index] = np.array([203, 115, 206]) / 255.
if item == 5:
y[index] = np.array([221, 168, 85]) / 255.
if item == 6:
y[index] = np.array([142, 144, 87]) / 255.
if item == 7:
y[index] = np.array([150, 120, 120]) / 255.
if item == 8:
y[index] = np.array([51, 51, 153]) / 255.
return y
| majidseydgar/Res-CP | 3-Classifications/utils.py | utils.py | py | 10,219 | python | en | code | 40 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.