index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
997,900 | 98b5b340249219200457eb0d55f1c6d67775f5b1 | #! /usr/bin/env python
from tensorflow.contrib import learn
import tensorflow as tf
import numpy as np
def tokenizer(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield value
def loadGloVe(filename):
vocab = []
embd = []
file = open(filename,'r', encoding='utf8')
for line in file.readlines():
row = line.strip().split(' ')
if len(row) != 101:
continue
vocab.append(row[0].split('_'))
embd.append(row[1:])
print('Loaded words vectors!')
file.close()
return vocab,embd
def generateData(embiddingsFile, embeddingsDim, x_text, y, max_document_length):
print("Loading words vectors!")
embedding_dim = embeddingsDim
filename = embiddingsFile
vocab,embd = loadGloVe(filename)
vocab_size = len(vocab)
embedding_dim = len(embd[0])
#embedding = np.asarray(embd)
embedding = np.asarray([np.asarray(xi, dtype=np.float32) for xi in embd])
W = tf.Variable(tf.constant(0.0, shape=[vocab_size, embedding_dim]),
trainable=False, name="W")
embedding_placeholder = tf.placeholder(tf.float32, [vocab_size, embedding_dim])
embedding_init = W.assign(embedding_placeholder)
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess = tf.Session(config=session_conf)
feedDict = {embedding_placeholder: embedding}
sess.run(embedding_init, feed_dict=feedDict)#[list(feedDict.keys())[0]])
#init vocab processor
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length, tokenizer_fn=tokenizer)
#fit the vocab from glove
pretrain = vocab_processor.fit(vocab)
#transform inputs
x = np.array(list(vocab_processor.transform(x_text)))
return x, y, vocab_processor |
997,901 | 02ff5bb01e006b5a0e9993ec7c2a8faf138ca06a | import random
import names
from itertools import cycle, islice, chain
import time
initial_decks = 3
initial_players = 4
initial_hand = 7
game_suits = ["Spades", "Hearts", "Clubs", "Diamonds"]
special_values = {
"A": 15,
"2": 20,
"8": 30,
"J": 10,
"Q": 10,
"K": 10,
"Joker": 50
}
class Card:
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
self.card = (rank, suit)
if self.rank in special_values:
self.points = special_values[rank]
else:
self.points = int(rank)
def __repr__(self):
if self.rank == "Joker":
return f"|{self.rank}|"
else:
return f"|{self.rank} of {self.suit}|"
def __add__(self, p):
return self.points + p.points
class Player:
def __init__(self, name):
self.name = name
self.hand = []
def __repr__(self):
return self.name
def play_card(self, card):
if not is_valid(card):
print("The play is not valid. JODETE! --> You draw 3 cards.")
self.draw(3)
return
else:
self.hand.remove(card)
table.append(card)
return card
def draw(self, x):
if len(deck) - 6 < x:
shuffle()
drawn = draw_from_deck(x)
self.hand.extend(drawn)
return drawn
def calculate_points(self):
points = 0
for card in self.hand:
points = points + card.points
return points
def next_play(self):
""" Automation of plays by CP."""
for card in self.hand:
if is_valid(card):
self.play_card(card)
return card
global forced_rank
if forced_rank == "2":
global two_multiplier
self.draw(two_multiplier)
print(f"{self.name} draws {str(two_multiplier)} cards.")
two_multiplier = 0
forced_rank = False
return None
card = self.draw(1)[0]
print(self.name + " draws a card.")
if is_valid(card):
self.play_card(card)
return card
print(self.name + " passes the turn.")
def shuffle():
tabletop = table[-1]
deck.extend(table[0:-1])
random.shuffle(deck)
table.clear()
table.append(tabletop)
print("--> The table has been shuffled into de the deck. <--")
def choose_suit():
return random.choice(game_suits)
def create_players(n):
players = list()
for i in range(n):
players.append(Player(names.get_full_name()))
return players
def create_deck(boxes, suits):
a_deck = []
special = {1: "A", 11: "J", 12: "Q", 13: "K"}
for a in range(boxes):
for i in range(1, 14):
for suit in suits:
if i in special:
i = special[i]
card = Card(str(i), suit)
a_deck.append(card)
jokers = [Card("Joker", "Joker")] * 2
a_deck.extend(jokers)
return a_deck
def draw_from_deck(y):
cards_drawn = []
for i in range(1, y+1):
cards_drawn.append(deck.pop(random.randrange(len(deck)-1)))
return cards_drawn
def start_game():
players = create_players(initial_players - 1)
user = input("What's your name? \n")
players.append(Player(user))
random.shuffle(players)
for player in players:
player.draw(initial_hand)
table.extend(draw_from_deck(1))
print("The game is starting. Players are, in order of the round:")
time.sleep(2)
for player in players:
time.sleep(1)
print("-- " + player.name)
time.sleep(1)
print("The top card of the deck is revealed:")
time.sleep(1)
print(table[-1])
time.sleep(2)
print(f"{players[0]} will begin.")
return players, user
def is_valid(card):
top = table[-1]
valid_ranks = [top.rank, "8", "Joker"]
if forced_rank:
if card.rank != forced_rank:
return False
if card.rank in valid_ranks:
return True
if forced_suit:
if card.suit == forced_suit:
return True
else:
return False
elif top.suit == card.suit:
return True
elif top.rank == card.rank:
return True
return False
def user_turn(user):
print("It's your turn. what card will you play?")
time.sleep(1)
print("Tabletop:" + str(table[-1]))
time.sleep(1)
print("Your hand:")
print(user.hand)
global forced_rank
global two_multiplier
while True:
user_play = input('Type the card as shown in your hand, or "pass" to draw a card. \n')
if user_play.lower() == "pass":
if forced_rank == "2":
draw = user.draw(two_multiplier)
print(f"You drew {str(two_multiplier)} cards: {draw}.")
forced_rank = False
two_multiplier = 0
return
else:
draw = user.draw(1)[0]
play_last_draw = input(f"You drew a {draw}. Want to play it? Y or N. \n")
if play_last_draw.lower() == "y":
print(f"You play a {draw}")
play = user.play_card(draw)
return play
else:
return
else:
card_input = user_play.split(" ")
if len(card_input) == 3 or card_input[0].lower() == "joker":
for card in user.hand:
is_correct = card_input[0].lower() == "joker" and card.rank == "Joker"
if not is_correct:
is_correct = card_input[0].lower() == card.rank.lower() and card_input[2].lower() == card.suit.lower()
if is_correct:
print(f"You play a {card}")
play = user.play_card(card)
if forced_rank == "2" and not play:
draw = user.draw(two_multiplier)
print(f"You drew {str(two_multiplier)} cards: {draw}.")
forced_rank = False
two_multiplier = 0
return
return play
print("Input not valid! Try again.")
def cp_turn(player):
play = player.next_play()
if play is not None:
print(f"-- {player.name} plays a {play}.")
return play
def end_of_game(winner, players):
print(f" {winner.name} has no more cards in hand. The game ended!")
print("The final points are:")
for player in players:
print(player.name + ": " + str(player.calculate_points()))
exit()
def reverse_round(current_player):
'''Returns a reversed cycle object starting with current player.'''
players.reverse()
reverse_players = cycle(players)
while True:
if next(reverse_players) == current_player:
return reverse_players
def main(players_, user_):
players_cycle = cycle(players_)
global forced_suit
global forced_rank
current_player = next(players_cycle)
while True:
print("The length of the deck is " + str(len(deck)))
if user_ == current_player.name:
played = user_turn(current_player)
if len(current_player.hand) == 0:
end_of_game(current_player, players_)
if played:
if played.rank == "8" or played.rank == "Joker":
suit = input("--> Choose a suit. \n")
while True:
lower_suits = [i.lower() for i in game_suits]
if suit.lower() in lower_suits:
forced_suit = game_suits[lower_suits.index(suit.lower())]
break
else:
suit = input("Suit is not valid. Type a valid suit.\n")
else:
forced_suit = False
else:
played = cp_turn(current_player)
time.sleep(1)
if len(current_player.hand) == 0:
end_of_game(current_player, players_)
if played:
if played.rank == "8" or played.rank == "Joker":
forced_suit = choose_suit()
print(f"-- {current_player.name} chose {forced_suit}.")
else:
forced_suit = False
if played:
if played.rank == "2":
global forced_rank
forced_rank = "2"
global two_multiplier
two_multiplier += 2
if played.rank == "K":
players_cycle = reverse_round(current_player)
if played.rank == "J":
next(players_cycle)
if played.rank == "Q":
next(players_cycle)
next(players_cycle)
if played.rank == "Joker":
current_player = next(players_cycle)
current_player.draw(4)
print(f"{current_player} draws 4 cards.")
continue
current_player = next(players_cycle)
deck = create_deck(initial_decks, game_suits)
table = []
forced_rank = False
forced_suit = False
two_multiplier = 0
players, user = start_game()
main(players, user)
# todo shuffle deck
|
997,902 | 06791723f14d09a0b40ca6789b419e6a19669e82 | # Given an array nums of n integers where n > 1, return an array output such that
# output[i] is equal to the product of all the elements of nums except nums[i].
#
# Example:
#
# Input: [1,2,3,4]
# Output: [24,12,8,6]
# Constraint: It's guaranteed that the product of the elements of
# any prefix or suffix of the array (including the whole array) fits
# in a 32 bit integer.
#
# Note: Please solve it without division and in O(n).
#
# Follow up:
# Could you solve it with constant space complexity? (The output array does
# not count as extra space for the purpose of space complexity analysis.)
class Solution:
def productExpectSelf(self, nums):
res = [1] * len(nums)
for i in range(1, len(nums)):
res[i] = res[i - 1] * nums[i - 1]
right = 1
for i in range(len(nums) - 2, -1, -1):
right *= nums[i + 1]
res[i] *= right
return res
|
997,903 | 927edb138e5a55ba0f1bf8a3386aceb77c6d9153 | class ContextSplitterStatus:
NotStarted = 1
Started = 2
Ended = 3
class ContextSplitter:
def __init__(self, context):
self.status = ContextSplitterStatus.NotStarted
self.context = context
def start(self):
self.status = ContextSplitterStatus.Started
return self.context.__enter__()
def end(self, exc):
if exc is None:
exc_type = None
else:
exc_type = type(exc)
r = self.context.__exit__(exc_type, exc, None)
self.status = ContextSplitterStatus.Ended
return r
def is_started(self)->bool:
return self.status >= ContextSplitterStatus.Started
def is_ended(self)->bool:
return self.status == ContextSplitterStatus.Ended
|
997,904 | e31d9ed50c35902d66be2cb39f91803ea9454963 | # -*- coding: utf-8 -*-
"""
The Epilepsy Ecosystem's data has dropout; intervals in which the sensors failed to pick up signals.
They are identified by all sensors reading 0 (which was numerically mapped to 0.034). This script
extracts uninterrupted segments which we can draw images from safely
@author: Bananin
"""
import numpy as np
import pandas as pd
import os
# an interval's minimum duration in seconds
min_duration = 5
F = 400 # Hz
CHANNELS = np.array(["ch0","ch1","ch2","ch3","ch4","ch5","ch6","ch7","ch8","ch9", "ch10","ch11",
"ch12","ch13","ch14","ch15"])
# directories containing the observations for each patient
patient_dirs = [dir_pat for dir_pat in os.listdir("dataset") if dir_pat.startswith("Pat")]
try:
os.makedirs("intervals")
except:
pass
for patient_dir in patient_dirs:
print("Processing intervals in directory "+patient_dir)
# list of uninterrupted intervals for this patient
intervals = []
# this patient's .csv eeg files
EEGs = [filename for filename in os.listdir("dataset/"+patient_dir)]
for EEG in EEGs:
# list all uninterrupted intervals for this segment (.csv file)
segment = pd.read_csv("dataset/"+patient_dir+"/"+EEG)
# dropout value is actually 0.034
segment.loc[:,CHANNELS] = segment.loc[:,CHANNELS].replace(0.034,0)
sum_channels = np.sum(np.abs(segment.loc[:,CHANNELS]), axis=1)
# look for uninterrupted intervals
interval_start = 0
for i in range(len(segment)):
if sum_channels[i] == 0 or i == len(segment)-1:
# interruption due to dropout or file end
if i-interval_start >= min_duration*F:
# consolidate an interval (end tick not included)
intervals.append({"start":interval_start,"end":i,"file":patient_dir+"/"+EEG})
interval_start = i+1
# store all discovered intervals for this patient
intervals = pd.DataFrame(intervals)
intervals.to_csv("intervals/"+patient_dir+".csv", index=False)
|
997,905 | fccb6c7b9a9d4211d4e69aa1ebecbdc63e3fa5db | from django.urls import path
from .import views
app_name = "lists"
urlpatterns = [
path('', views.lists, name='lists'),
path('create/', views.list_create, name='create'),
path('<slug:slug>/', views.list_detail, name='list_detail'),
]
|
997,906 | 3f59271ddf5bb5ca7210debe63aae261a7f4c199 | """ You've built an inflight entertainment system with on-demand movie streaming.
Users on longer flights like to start a second movie right when their first one ends,
but they complain that the plane usually lands before they can see the ending.
So you're building a feature for choosing two movies whose total runtimes will equal
the exact flight length.
Write a function that takes an integer flight_length (in minutes) and a list of
integers movie_lengths (in minutes) and returns a boolean indicating whether there
are two numbers in movie_lengths whose sum equals flight_length.
When building your function:
Assume your users will watch exactly two movies
Don't make your users watch the same movie twice
Optimize for runtime over memory. """
# Start coding from here |
997,907 | 2f1a830e83c80bcb5e57a46890aa07638305f91d | """
class Phone:
def __init__(self, height, display, camera, version):
self.height = height
self.display = display
self.camera = camera
self.version = version
oppo = Phone(6, 5.7, 13, 7)
vivo = Phone(5.5, 5.8, 20, 8)
print(f"This is vivo display =>",vivo.display)
print(f"this is my oppo camera {oppo.camera} mp lense")
"""
"""
class ok:
name = "lol"
def __init__(self):
self.name= "GabbAr"
a = ok()
print(a.name)
print(ok.name)
"""
"""
class hero:
def __init__(self, name, age, power):
self.name = name
self.age = age
self.power = power
def compare_power(self, other): # to compare two diffrent objects
if self.power == other.power:
return True
else:
return False
ironman = hero('ironman', 35, 99)
captain = hero('captain', 28, 85)
if captain.compare_power(ironman):
print("Both have same power")
else:
print("Both have diffrent power")
"""
"""
class Student:
school = ("S.T. Xaviers High School") # class variable
def __init__(self, maths, english, gujrati): # instance variable
self.maths = maths
self.english = english
self.gujrati = gujrati
def set_marks_maths(self, marks):
self.maths = marks
def get_marks_maths(self):
print(self.maths)
def set_marks_english(self, marks):
self.english = marks
def get_marks_english(self):
print(self.english)
def set_marks_gujrati(self, marks):
self.gujrati = marks
def get_marks_gujrati(self):
print(self.gujrati)
def info(cls): # this is example of class method
print(Student.school)
def ok(): # This is example of static method
print("yes okay")
raj = Student(85, 45, 56)
raj.get_marks_gujrati()
raj.set_marks_gujrati(58)
raj.get_marks_gujrati()
raj.info() # to call class method
Student.ok() # to call static mathod
"""
'''
class A:
def f1(self):
print("f1 is working")
def f2(self):
print("f2 is working")
class B(A): # Example of inheritance
def f3(self):
print("f3 is working")
def f4(self):
print("f4 is working")
a = A()
b = B()
b.f1() # Example of inheritance
'''
'''
class A:
def f1(self):
print("f1 is working")
def f2(self):
print("f2 is working")
class B(A):
def f3(self):
print("f3 is working")
def f4(self):
print("f4 is working")
class C(B): # multi lvl inheritance bcs class c taking all features from class a and class b
def f5(self):
print("f5 is working")
a = A()
b = B()
c = C()
c.f1() # Multi lvl inheritance
'''
'''
class A:
def f1(self):
print("f1 is working")
def f2(self):
print("f2 is working")
class B:
def f3(self):
print("f3 is working")
def f4(self):
print("f4 is working")
class C(A,B): # Multiple inheritance
def f5(self):
print("f5 is working")
a = A()
b = B()
c = C()
c.f1()
'''
class A:
def __init__(self):
print("A is initiated")
def f1(self):
print("f1 is working")
def f2(self):
print("f2 is working")
class B:
def __init__(self):
print("B is initiated")
def f3(self):
print("f3 is working")
def f4(self):
print("f4 is working")
class C(A,B):
def __init__(self):
super().__init__() # we can use super() method to tell to excute from super classes but remember if class has multiple classes than it will select from left to right , it's call Method Resolution Order (MRO)
print("C is inititated")
a = C() # so in this case , it will first look for in own class even if it's inheritance the other classes but if it's found same method in own class , it will excute it first for example here __init__ method
|
997,908 | d6232c22f119b8c6edfcc7747f13c5999944a8cb | import datetime
import random
import uuid
from locust import HttpUser, task, between
from locust.contrib.fasthttp import FastHttpUser
class RealityTubeUser(FastHttpUser):
wait_time = lambda *args, **kwargs: 0
userId = None
email = ''
headers = None
host = 'https://reality-tube.net/'
def _index_page(self):
self.client.get("/api/v1/config/")
self.client.get("/api/v1/oauth-clients/local")
self.client.get('/api/v1/videos/languages')
self.client.get('/api/v1/plugins/peertube-plugin-simplelogo/public-settings')
self.client.get('/api/v1/plugins/peertube-plugin-matomo/public-settings')
self.client.get('/api/v1/plugins/peertube-plugin-custom-links/public-settings')
self.client.get('/api/v1/plugins/peertube-plugin-custom-links/public-settings')
self.client.get('/api/v1/videos/?start=0&count=25&sort=-publishedAt&skipCount=true')
self.client.get('/api/v1/plugins/peertube-plugin-topmenu/public-settings')
self.client.get('/api/v1/plugins/peertube-plugin-topmenu/public-settings')
@task(1)
def performance_test(self):
self._index_page()
|
997,909 | 71eb2555faf009114c984c2cfc92bb407170fb42 | """Fixtures for testing mip app"""
import string
from typing import List
import pytest
from cg.apps.mip import MipAPI
@pytest.fixture
def valid_fastq_filename_pattern():
"""the pattern MIP file names should match"""
# 'xxx_R_1.fastq.gz and xxx_R_2.fastq.gz'
return r"^.+_R_[1-2]{1}\.fastq.gz$"
def _full_content():
"""The content the files are made of"""
return string.ascii_letters
@pytest.fixture
def files_content(tmpdir):
"""The content the files are made of"""
return _full_content()[0 : len(_simple_files(tmpdir))]
def simple(tmpdir):
"""Creates a dict with the data to use in the tests"""
flowcells = [1, 2, 3, 4, 5, 6, 7, 8]
lanes = [1, 2, 3]
reads = [1, 2]
_simple = {"files": [], "data": []}
i = 0
for read in reads:
for flowcell in flowcells:
for lane in lanes:
content = _full_content()[i]
file_path = create_file(tmpdir, flowcell, lane, read, content)
_simple["files"].append(file_path)
data = create_file_data(file_path, flowcell, lane, read)
_simple["data"].append(data)
i += 1
return _simple
def _simple_files(tmpdir):
""""Some files to test with"""
return simple(tmpdir)["files"]
@pytest.fixture
def simple_files(tmpdir):
""""Some files to test with"""
return _simple_files(tmpdir)
@pytest.fixture
def simple_files_data(tmpdir):
"""Data for link method"""
return simple(tmpdir)["data"]
def create_file(tmpdir, flowcell, lane, read, file_content):
"""actual file on disk"""
file_name = f"S1_FC000{flowcell}_L00{lane}_R_{read}.fastq.gz"
file_path = tmpdir / file_name
file_path.write(file_content)
return file_path
def create_file_data(file_path, flowcell, lane, read):
"""meta data about a file on disk"""
data = {
"path": file_path,
"lane": lane,
"flowcell": flowcell,
"read": read,
"undetermined": False,
}
return data
@pytest.fixture
def cg_config():
"""mock relevant parts of a cg-config"""
return {}
@pytest.fixture
def link_family():
"""mock family name"""
return "family"
@pytest.fixture
def link_sample():
"""mock sample name"""
return "sample"
class MockTB:
"""Trailblazer mock fixture"""
def __init__(self):
self._link_was_called = False
def link(self, family: str, sample: str, analysis_type: str, files: List[str]):
"""Link files mock"""
del family, sample, analysis_type, files
self._link_was_called = True
def link_was_called(self):
"""Check if link has been called"""
return self._link_was_called
@pytest.fixture
def tb_api():
"""Trailblazer API fixture"""
return MockTB()
@pytest.fixture(scope="session")
def mip_api():
"""MipAPI fixture"""
_mip_api = MipAPI(script="test/fake_mip.pl", pipeline="analyse rd_dna")
return _mip_api
@pytest.fixture
def mip_config_path():
"""path to a mip config"""
return "tests/fixtures/global_config.yaml"
@pytest.fixture
def case_id():
"""the name of a case"""
return "angrybird"
|
997,910 | 1592f15714eafd518c2944b7b32abc795c47aadb | import re
import enchant
dictio = enchant.Dict("en_US")
def get_binary(mystring):
out = []
foo = len(mystring)
num = ""
for i in range(0,foo):
num += "0"
out.append(num)
while "0" in num:
num = bin(int(num,2) + 1)[2:]
while len(num) < foo:
num = "0" + num
out.append(num)
return out
def get_countries():
output = []
with open ("C:/Users/cdobb/Desktop/Python_stuff/sunday_puzzle/07_14_2019/countries.csv", "r") as BigFile:
data=BigFile.readlines()
for i in range(len(data)):
mylist = re.split('"', data[i])
output.append(mylist[1].lower())
return output
foo = get_countries()
#ignore ones with spaces for now
bar = []
for x in foo:
if " " not in x:
bar.append(x)
#print(bar)
#1. get all binary numbers in strings for length of name
#print(bar[3])
#print(get_binary(bar[3]))
#2. check if 2 real words for 1's part and 0's part for each country
for x in bar:
tempbin = get_binary(x)
for bins in tempbin:
i = 0
thing1 = ""
thing2 = ""
for zerone in bins:
if zerone == "0":
thing1 += x[i]
i+=1
if zerone == "1":
thing2 += x[i]
i+=1
if thing1 != "" and thing2 != "" and dictio.check(thing1) and dictio.check(thing2):
print(x + ", " + thing1 + ", " + thing2) |
997,911 | 835b67ffb0534a463b18ffcc01affb6434903cd8 | #takes a list of words and returns numbers corresponding to the words' lengths
def idk(lst):
numbers = []
for x in lst:
f = len(x)
numbers.append(f)
return numbers
i = ["hi", "westside", "just", "later", "opposite"]
print(idk(i))
|
997,912 | 599e9f50e2805cb123265cd1caadc27117096252 | APP_ENV = 'local'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/food'
SQLALCHEMY_TRACK_MODIFICATIONS = False
APP_NAME = 'NUTRITION_CHECK'
APP_SECRET = 'fghjkl23456fcgvhbjnkmlrtyuio456723456'
|
997,913 | c2a7661f5a1888930192a1ceb916ba01cfb6a29f | from swampy.TurtleWorld import *
from polygon import *
def flower(turtle, petals, radius, angle):
for i in range(petals):
petal(t, radius, angle)
lt(t, 360.0/petals)
def petal(turtle, radius, angle):
for i in range(2):
arc(turtle, radius, angle)
lt(turtle, 180-angle)
world = TurtleWorld()
t = Turtle()
t.delay = 0.01
petals = 5
arc_radius = 120
angle = 60
flower(t, petals, arc_radius, angle)
die(t)
wait_for_user()
|
997,914 | 5451b0059d7cfb6ce31e017b3be2947cf083513d | # streamlit app demo
#
from collections import namedtuple
import streamlit as st
import pandas as pd
import numpy as np
import tensorflow as tf
from googletrans import Translator
st.title("IChiba Category Wizard")
DATA_URL = "data/ichiba_test_strict_100316_more.tsv.gz"
LABEL_CSV = "data/labels.csv"
Lab_name = namedtuple("lab_name", ["jpn", "eng"])
@st.cache
def load_data(nrows):
data = pd.read_csv(
DATA_URL,
nrows=nrows,
compression="gzip",
header=None, # no header in tsv
sep="\t",
quotechar='"',
)
selected = data.iloc[:, [0, 1, 8]] # title, label-path, img_url
selected.columns = ["title", "label", "image_url"]
labels = pd.read_csv(
LABEL_CSV,
header=None,
)
labels.columns = ["row", "lab", "jpn", "eng"]
lab_dict = {
lab: Lab_name(labels["jpn"][row], labels["eng"][row])
for row, lab in enumerate(labels["lab"])
}
return selected, lab_dict
data_load_state = st.text("Loading data...")
data, labels = load_data(1000)
data_load_state.text("Done! (using st.cache)")
@st.cache
def load_model():
clf_model = tf.keras.models.load_model("data/100316")
match_model = tf.keras.models.load_model("data/100316_matcher")
return clf_model, match_model
model_load_state = st.text("Loading model...")
clf_model, match_model = load_model()
model_load_state.text("Done! (using st.cache)")
if st.checkbox("Show raw data"):
st.subheader("Raw data")
st.write(data)
st.subheader("Choose one product to show")
@st.cache
def gen_trans(t_):
translator = Translator()
t_zh = translator.translate(t_, dest="zh-CN").text
t_en = translator.translate(t_, dest="en").text
return t_zh, t_en
item_idx = st.slider("item_idx", 0, 999, 10)
item_data = data.iloc[item_idx, :]
t_, l_ = item_data["title"], item_data["label"]
t_zh, t_en = gen_trans(t_)
st.markdown(f'- *Title*:{t_}\n- *中文*:{t_zh}\n- *English*:{t_en}\n- *Label*:{labels[l_]}')
st.image(item_data["image_url"], width=None)
st.subheader("IC predicts category based on product title")
label_pred = clf_model(tf.constant([t_]))
st.write(labels[label_pred.numpy()[0].decode()])
st.subheader("Check merchant category selection")
inv_labels = {v: k for k, v in labels.items()}
label_merchant = st.selectbox("Merchant chooses a category", list(labels.values()))
st.write("Chosen:", label_merchant)
match_pred = match_model([tf.constant([t_]), tf.constant([inv_labels[label_merchant]])])
st.write(match_pred)
|
997,915 | 1ef29d08abef2ecf246cd6bfbbab0077f4a55e08 | import math
def func(n, arr):
delta = 0
for i in range(len(arr)):
for j in range(len(arr)):
if i == j:
continue
sum_before = arr[i] + arr[j]
d2 = (arr[i] + arr[j]) * (arr[i] + arr[j]) - 4 * arr[i] * arr[j]
# print(f"d2 = {d2}")
if d2 < 0:
continue
d = math.sqrt(d2)
x1 = (arr[i] + arr[j] - d) / 2 / arr[j]
x2 = (arr[i] + arr[j] + d) / 2 / arr[j]
x = round(x1 + (x2 - x1) / 2)
# print(f"ai = {arr[i]}; aj = {arr[j]}; x_i = {x_i}; x1 = {x1}; x2 = {x2}")
if (arr[i] / x).is_integer():
sum_after = arr[j] * x + arr[i] / x
new_delta = sum_before - sum_after
if new_delta > 0 and new_delta > delta:
delta = new_delta
return int(sum(arr) - delta)
print(f"{func(5, [1, 2, 3, 4, 5])} = 14")
print(f"{func(4, [4, 2, 4, 4])} = 14")
print(f"{func(5, [2, 4, 2, 3, 7])} = 18")
# n = int(input())
# arr = list(map(lambda _: int(_), input().split(' ')))
# print(func(n, arr))
|
997,916 | 16061d2153892f72e010cc99ed5c30f464ddbb59 | import random
import string
import time
def random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
current_time_milli = lambda : int(round(time.time() * 1000)) |
997,917 | b6c94c34952b4d3595d5de032a3d58b0aa3f3afd | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import operator
df = pd.read_csv('http://web.mta.info/developers/data/nyct/turnstile/turnstile_180630.txt')
df.columns = df.columns.str.strip()
df['real_entries'] = df['ENTRIES'].diff()
df['real_exits'] = df['EXITS'].diff()
df.loc[df['real_entries'] < 0] = np.nan
df.loc[df['real_exits'] < 0] = np.nan
df.loc[df['real_entries'] > 100000] = np.nan
df.loc[df['real_exits'] > 100000] = np.nan
print(df.columns)
station_1_av = df[df['STATION'] == '1 AV']
print(df.sort_values(by='real_entries', ascending=False).head(100))
print(df.groupby(['STATION'])['real_entries', 'real_exits'].median())
new_df = df.groupby(['STATION'])['real_entries', 'real_exits'].median()
totals = {}
for index, row in new_df.iterrows():
totals[index] = row['real_entries'] + row['real_exits']
sorted_totals = sorted(totals.items(), key=operator.itemgetter(1), reverse=True)
print(sorted_totals)
|
997,918 | 75d421d04b665eb92406d486d214c25bd76d4834 | import re
import numpy as np
import gensim
import requests
import json
from scipy import spatial
data = []
with open('./avas_list.txt') as inputfile:
for line in inputfile:
data.append(line)
print("Loaded function data")
model = gensim.models.KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True, limit=500000)
print("Loaded model")
index2word_set = set(model.wv.index2word)
def avg_feature_vector(sentence, model, num_features, index2word_set):
words = sentence.split()
feature_vec = np.zeros((num_features, ), dtype='float32')
n_words = 0
for word in words:
if word in index2word_set:
n_words += 1
feature_vec = np.add(feature_vec, model[word])
if (n_words > 0):
feature_vec = np.divide(feature_vec, n_words)
return feature_vec
def make_list(name):
words = []
if('_' in name): #if snake case
name = name.lower()
words = name.split('_')
else: #identify if camel case
word = ""
for c in name:
if(c.islower()):
word +=c
else:
words.append(word)
word = ""
word += c.lower()
words.append(word)
return words
def make_sentence(words):
sentence = ""
for w in words:
sentence += w
sentence += " "
return sentence[:-1]
def similarity_sentences(s1, s2):
s1_afv = avg_feature_vector(s1, model=model, num_features=300, index2word_set=index2word_set)
s2_afv = avg_feature_vector(s2, model=model, num_features=300, index2word_set=index2word_set)
sim = 1 - spatial.distance.cosine(s1_afv, s2_afv)
return sim
#s1 = make_sentence(make_list('remove'))
#s2 = make_sentence(make_list('delete'))
#print(similarity_sentences(s1,s2))
def camel_to_snake(name):
list = make_list(name)
new_name = ""
for w in list:
new_name += w
new_name += "_"
return new_name[:-1]
def snake_to_camel(name):
list = make_list(name)
new_name = ""
pp = False
for w in list:
c = w[0].upper() if pp else w[0].lower()
pp = True
new_name += c
new_name += w[1:]
return new_name
def change_case(name):
if('_' in name): #this is snake
return snake_to_camel(name)
return camel_to_snake(name)
def find_synonyms(word):
#dev
return ["sum","total","append"]
p = make_sentence(make_list(word))
s_list = []
#r = requests.get('https://wordsapiv1.p.mashape.com/words/'+word+'/synonyms'
# , headers={"x-rapidapi-host": "wordsapiv1.p.rapidapi.com",
# "x-rapidapi-key": ""} )
#print(json.loads(r.content))
#synonym_list = json.loads(r.content)['synonyms']
r = requests.get('https://words.bighugelabs.com/api/2/3d61b2dab0e22df66fd693006de7a367/'+word+'/json')
j = json.loads(r.content)
synonym_list = []
for (key,val) in j.items():
if('syn' in val.keys()):
synonym_list += val['syn']
#synonym_list = j['noun']['syn'] + j['verb']['syn']
for s in synonym_list:
if(s.count(' ')>0):
continue
p1 = make_sentence(make_list(s))
sim = similarity_sentences(p,p1)
obj = ( s, sim)
if(not np.isnan(sim)):
s_list.append(obj)
#print(s_list)
s_list.sort(key = lambda synonym: synonym[1] )
firsts = [t[0] for t in s_list]
return firsts[-3:]
def getReplacementsName(name):
#for each word in the name, get the replacements
words = make_list(name)
replace_dict = []
for w in words:
w_replacements = find_synonyms(w)
w_replacements.append(w)
w_replacements = list(set(w_replacements))
replace_dict.append(w_replacements)
a = replace_dict[0]
for b in replace_dict[1:]:
o = []
for ia in a:
for ib in b:
o.append(ia+"_"+ib)
a = o
ca = a.copy()
for poss in a:
ca.append(change_case(poss))
return ca
def extractName(regex):
i = regex.find("def")
before = regex[:(i+4)]
i += 4
name = ""
while True:
name+=regex[i]
i+=1
if(i>=len(regex)):
break
if(i<len(regex) and not regex[i].isalpha() and not regex[i].isnumeric() and not regex[i]=='_'):
break
after = regex[i:]
return before, name, after
def replaceFunctionNames(regex):
if(regex.find("def") == -1): #has no function def
return regex
before,name,after = extractName(regex)
names = getReplacementsName(name)
final_regex = regex
for n in names:
r = before + n
r = r + after
final_regex += '|('+r+')'
return final_regex
def lookup(regex):
#nothing to do
if(regex.find("def") == -1):
return regex
#fast search
fast_regex = regex
found = False
r_before,r_def,r_after = extractName(regex)
if not re.match(r'^[A-Za-z0-9_]+$', r_def):
return regex
r = make_sentence(make_list(r_def))
for d in data:
d_before,d_def,d_after = extractName(d)
d_s = make_sentence(make_list(d_def))
score = similarity_sentences(d_s,r)
#print(r, d_s,score,d)
if(score > 0.7):
fast_regex += '|('+d_before+d_def+d_after+')'
found = True
#if(not found):
# print("Nothing good and fast")
#else:
# print("GOOD:",fast_regex)
#look for the synonyms
r = replaceFunctionNames(regex) + '|'+fast_regex
return r
#print(extractName("def delete_selected"))
print(lookup('def removeSelected'))
#print(lookup('somestuff def base64ToInt\([a-z]*\): func'))
#print(lookup('somestuff def checkErr: func'))
#print(lookup('somestuff def add_one[a-z]*: func'))
#print(replaceFunctionNames('somestuff def addOne\(\): func'))
|
997,919 | a4fd04facdc2e0bef0d84215724b4f393d919b03 | import copy
import torch
import pytest
from transformers import AutoModel
from allennlp.common import Params
from allennlp.modules.transformer.attention_module import T5Attention
from transformers.models.t5.configuration_t5 import T5Config
from transformers.models.t5.modeling_t5 import T5Attention as HFT5Attention
from allennlp.nn.util import min_value_of_dtype
PARAMS_DICT = {
"hidden_size": 6,
"num_heads": 2,
"key_value_proj_dim": 3,
"dropout": 0.0,
"relative_attention_num_buckets": 2,
}
@pytest.fixture
def params_dict():
return copy.deepcopy(PARAMS_DICT)
@pytest.fixture
def params(params_dict):
return Params(params_dict)
@pytest.fixture
def t5_attention(params):
return T5Attention.from_params(params.duplicate())
def test_can_construct_from_params(t5_attention, params_dict):
assert t5_attention.num_attention_heads == params_dict["num_heads"]
assert t5_attention.attention_head_size == params_dict["key_value_proj_dim"]
assert (
t5_attention.all_head_size == params_dict["num_heads"] * params_dict["key_value_proj_dim"]
)
assert t5_attention.query.in_features == params_dict["hidden_size"]
assert t5_attention.key.in_features == params_dict["hidden_size"]
assert t5_attention.value.in_features == params_dict["hidden_size"]
assert t5_attention.output.in_features == params_dict["hidden_size"]
assert t5_attention.dropout == params_dict["dropout"]
def test_forward_against_huggingface_output(params_dict):
hidden_states = torch.randn(2, 3, 6)
attention_mask = torch.tensor([[0, 1, 0], [1, 1, 0]])
hf_kwargs = {
"d_model": params_dict["hidden_size"],
"d_kv": params_dict["key_value_proj_dim"],
"num_heads": params_dict["num_heads"],
"relative_attention_num_buckets": params_dict["relative_attention_num_buckets"],
"dropout_rate": params_dict["dropout"],
}
torch.manual_seed(1234)
hf_module = HFT5Attention(T5Config(**hf_kwargs), has_relative_attention_bias=False)
torch.manual_seed(1234)
params = copy.deepcopy(params_dict)
params["normalize"] = False # only for this test, as HF does not normalize.
t5_attention = T5Attention(**params)
# setting to eval mode to avoid non-deterministic dropout.
t5_attention = t5_attention.eval()
hf_module = hf_module.eval()
output = t5_attention.forward(hidden_states, mask=attention_mask)
attention_mask_hf = (attention_mask == 0).view((2, 1, 1, 3)).expand(
2, 2, 3, 3
) * min_value_of_dtype(hidden_states.dtype)
hf_output = hf_module.forward(hidden_states, mask=attention_mask_hf)
hs = output.hidden_states
assert torch.allclose(hs, hf_output[0])
@pytest.mark.parametrize(
"pretrained_name, relevant_module",
[
("t5-small", "encoder.block.0.layer.0.SelfAttention"),
],
)
def test_loading_from_pretrained_weights_using_model_name(pretrained_name, relevant_module):
torch.manual_seed(1234)
module = T5Attention.from_pretrained_module(pretrained_name, relevant_module=relevant_module)
torch.manual_seed(1234)
pretrained_module = dict(AutoModel.from_pretrained(pretrained_name).named_modules())[
relevant_module
]
batch_size = 2
seq_len = 3
dim = module.query.in_features
hidden_states = torch.randn(batch_size, seq_len, dim)
attention_mask = torch.tensor([[1, 1, 0], [1, 0, 1]])[:, None, None, :]
# setting to eval mode to avoid non-deterministic dropout.
module = module.eval()
pretrained_module = pretrained_module.eval()
torch.manual_seed(1234)
output = module(hidden_states, mask=attention_mask.squeeze()).hidden_states
# The attn_mask is processed outside the self attention module in HF bert models.
attention_mask = (~(attention_mask == 1)) * min_value_of_dtype(hidden_states.dtype)
torch.manual_seed(1234)
hf_output = pretrained_module(hidden_states, mask=attention_mask)[0]
assert torch.allclose(output, hf_output)
|
997,920 | 5cbb3bf73b4f83885b6c5e22369680c7e5eaa815 | import hdf_pull
import netCDF_build as netcdf
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import csv
files = hdf_pull.file_path("\Aqua")
#print files
"""plotting frp with frp intensity colour for single hdf file"""
def plot_fire(FP_data,i,date_time):
# FP_data is a array with lat,long,FRP and Confidane
latitude = FP_data[0,:]
longitude = FP_data[1,:]
FRP=FP_data[2,:]
# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon
# are the lat/lon values of the lower left and upper right corners
# of the map.
# resolution = 'i' means use intermediate resolution coastlines.
# lon_0, lat_0 are the central longitude and latitude of the projection.
m = Basemap(llcrnrlon=124,llcrnrlat=-17,urcrnrlon=129,urcrnrlat=-14,
resolution='i',projection='tmerc',lon_0=126,lat_0=-15)
m.drawcoastlines()
m.drawmapboundary()#(color='lightgray',fill_color='aqua')
m.fillcontinents(lake_color='aqua',zorder=0)
# draw parallels and meridians.
m.drawparallels(np.arange(-40,61.,0.5), labels = [True])
m.drawmeridians(np.arange(100.,140.,0.5), labels = [True])
#m.drawmapboundary(fill_color='aqua')
#FRP.max()
x, y = m(longitude, latitude)
m.scatter(x, y, c=FRP, s = 150, marker ='^', zorder=10,norm=mpl.colors.SymLogNorm(linthresh=10, vmin=0, vmax=(1000)))
m.shadedrelief()
cb = m.colorbar()
cb.set_ticks([0,10,100,500,1000])
plt.title(date_time)
plt.show()
plt.savefig('C:\Users\Hannah.N\Documents\Data\Sept_Oct2016_data\images\MODIS_'+ str(i) +'.png')
plt.close()
file_info = netcdf.file_info()
#print file_info
i=1
for x in file_info:
try:
#print x[0]
data = hdf_pull.read_in(x[0])
FP_data = hdf_pull.FP_data_pull(data)
date_time = x[2]
plot_fire(FP_data,i,date_time)
i = i+1
except ValueError as error:
bad_files = []
bad_files.append(x)
with open('bad_files', 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(bad_files)
"""
file = files[1]
data = hdf_pull.read_in(file[0])
hdf_pull.list_sds(data)
FP_data = hdf_pull.FP_data_pull(data)
FRP_map_plot.plot_fire(FP_data)
"""
|
997,921 | e68fccf24607b81344e350c18565fe43e4f364fe | ## Randy: This run took about 4 hours, as it is set up now.
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
from pyclaw import data
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
ndim = 2
rundata = data.ClawRunData(claw_pkg, ndim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata) # Defined below
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
clawdata.restart = False # Turn restart switch on or off
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.ndim = ndim
# Lower and upper edge of computational domain:
# clawdata.xlower = 137.57 ##
# clawdata.xupper = 141.41 ##
# clawdata.ylower = 39.67 ##
# clawdata.yupper = 44.15 ##
# For OK08 grid:
# clawdata.xlower = 138.5015 ##
# clawdata.xupper = 140.541 ##
# clawdata.ylower = 40.5215 ##
# clawdata.yupper = 43.2988 ##
clawdata.xlower = 139.05 ##
clawdata.xupper = 140. ##
clawdata.ylower = 41.6 ##
clawdata.yupper = 42.55 ##
# # Number of grid cells:
# clawdata.mx = 36 ## 3.84 deg/36 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.my = 42 ## 4.48 deg/42 cells = 384 sec/cell = 16*24 sec/cell
# clawdata.mx = 576 ## 3.84 deg/576 cells = 24 sec/cell
# clawdata.my = 672 ## 4.48 deg/672 cells = 24 sec/cell
# clawdata.mx = 84 ## 8*24 sec/cell
# clawdata.my = 72 ## 8*24 sec/cell
clawdata.mx = 60
clawdata.my = 60
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.meqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.maux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.mcapa = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.outstyle = 1 ##
if clawdata.outstyle==1:
# Output nout frames at equally spaced times up to tfinal:
# Note: Frame time intervals = (tfinal-t0)/nout
clawdata.nout = 10 ## Number of frames (plus the t = 0.0 frame)
clawdata.tfinal = 6.5*60 ## End run time in Seconds
elif clawdata.outstyle == 2:
# Specify a list of output times.
from numpy import arange,linspace
#clawdata.tout = list(arange(0,3600,360)) + list(3600*arange(0,21,0.5))
# clawdata.tout = list(linspace(0,32000,9)) + \
# list(linspace(32500,40000,16))
clawdata.tout = list(linspace(0,4,2))
clawdata.nout = len(clawdata.tout)
elif clawdata.outstyle == 3:
# Output every iout timesteps with a total of ntot time steps:
iout = 1
ntot = 1
clawdata.iout = [iout, ntot]
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = 1
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.max_steps = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Transverse order for 2d or 3d (not used in 1d):
clawdata.order_trans = 2
# Number of waves in the Riemann solution:
clawdata.mwaves = 3
# List of limiters to use for each wave family:
# Required: len(mthlim) == mwaves
clawdata.mthlim = [3,3,3]
# Source terms splitting:
# src_split == 0 => no source term (src routine never called)
# src_split == 1 => Godunov (1st order) splitting used,
# src_split == 2 => Strang (2nd order) splitting used, not recommended.
clawdata.src_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.mbc = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.mthbc_xlower = 1 # Open Left BC
clawdata.mthbc_xupper = 1 # Open Right BC
clawdata.mthbc_ylower = 1 # Open Bottom BC
clawdata.mthbc_yupper = 1 # Open Top BC
# ---------------
# AMR parameters:
# ---------------
# max number of refinement levels:
mxnest = 5 ##
clawdata.mxnest = -mxnest # negative ==> anisotropic refinement in x,y,t
# List of refinement ratios at each level (length at least mxnest-1)
## Levels 2 3 4 5
clawdata.inratx = [2,4,4,6] ##
clawdata.inraty = [2,4,4,6] ##
clawdata.inratt = [2,4,4,2] ##
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
clawdata.auxtype = ['center','capacity','yleft']
clawdata.tol = -1.0 # negative ==> don't use Richardson estimator
clawdata.tolsp = 0.5 # used in default flag2refine subroutine
# (Not used in geoclaw!)
clawdata.kcheck = 3 # how often to regrid (every kcheck steps)
clawdata.ibuff = 2 # width of buffer zone around flagged points
clawdata.tchk = [33000., 35000.] # when to checkpoint
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geodata = rundata.geodata
except:
print "*** Error, this rundata has no geodata attribute"
raise AttributeError("Missing geodata attribute")
# == setgeo.data values ==
geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above
geodata.igravity = 1
geodata.gravity = 9.81
geodata.icoordsys = 2
geodata.Rearth = 6367.5e3
geodata.icoriolis = 0
# == settsunami.data values ==
geodata.sealevel = 0.
geodata.drytolerance = 1.e-2
geodata.wavetolerance = 1.e-1 ##
geodata.depthdeep = 1.e6 ## Definition of "deep" water
geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels
geodata.ifriction = 1 ## Friction switch. 0=off, 1=on
# geodata.coeffmanning =0.0
geodata.coeffmanning =.025
geodata.frictiondepth = 10.
#okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \
#+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##
okushiri_dir = '..' ## this directory
# == settopo.data values ==
geodata.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)
geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0, 1.e10, \
# okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([1, 1, 1, 0., 1.e10, \
# okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth40_140.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_138.txt']) ## JODC 500 m
# geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \
# okushiri_dir + '/depth42_140.txt']) ## JODC 500 m
# == setdtopo.data values ==
geodata.dtopofiles = []
# for moving topography, append lines of the form: (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.
# == setqinit.data values ==
geodata.iqinit = 0
geodata.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
#geodata.qinitfiles.append([1, 1, 'hump.xyz'])
# == setregions.data values ==
geodata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell
# Grid Limits
# Name x1 x2 y1 y2
# OK24 137.53666670 141.53000000 39.53666670 44.26333330
# HNO 138.50000000 140.55000000 40.51666670 43.30000000
# OK08 138.50111110 140.55222220 40.52111110 43.29888890
# OK03 139.38925930 139.66407410 41.99592590 42.27074070
# AO15 139.43419750 139.49987650 42.03118520 42.07251850
# MO01 139.41123460 139.43320990 42.07790120 42.14580250
# MB05 139.41385190 139.42639510 42.09458550 42.10343920
#geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain
geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri
geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri
# geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae
#geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri
geodata.regions.append([4, 4, 90., 1e9, 139.42, 139.431, 42.07, 42.12])
# == setgauges.data values ==
geodata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs
geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
# geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs
#
# == setfixedgrids.data values ==
geodata.fixedgrids = []
for g in geodata.gauges:
xg = g[1]
yg = g[2]
xg1 = xg - 0.001
xg2 = xg + 0.002
yg1 = yg - 0.001
yg2 = yg + 0.002
nx = 31
ny = 31
gaugeno = g[0]
if gaugeno == 9:
xg2 = xg + 0.003
nx = 41
if gaugeno == 8:
xg1 = xg - 0.002
xg2 = xg + 0.001
yg1 = yg - 0.002
yg2 = yg + 0.001
geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])
geodata.regions.append([5, 5, 180., 1e9, xg1,xg2,yg1,yg2])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
|
997,922 | f465bcc50bd3cb5bfc28b1d3466c52f4a13d7761 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import ConfigParser
import os
import datetime
import random
def gen_dates(base_date, days):
"""
Generate days list
:param base_date: start date
:param days: how many days to generate the list
:return: days list
"""
day = datetime.timedelta(days = 1)
for i in range(days):
yield base_date + day * i
def generate_log(dir, start_date, end_date, ip_addr, http_verb_uri_collect, http_verb_uri_other, per_200, resp_time_90, resp_time_95, resp_time_99):
"""
Generate logs
:param dir: the directory to generate logs
:param start_date: start date
:param end_date: end date
:param ip_addr: ip address
:param http_verb_uri_collect: the http verb uri to collect
:param http_verb_uri_other: the interferential http verb uri
:param per_200: the percentage of success response
:param resp_time_90: the response time in milliseconds to generate in 90% of requests
:param resp_time_95: the response time in milliseconds to generate in 95% of requests
:param resp_time_99: the response time in milliseconds to generate in 99% of requests
:return:
"""
for date in gen_dates(start_date, (end_date - start_date).days + 1):
filename = date.strftime(dir + "%Y-%m-%d" + ".log")
next_day = start_date + datetime.timedelta(days = 1)
log_time = start_date
with open(filename, "w") as log_file:
log_time = log_time + datetime.timedelta(milliseconds = random.randint(100, 200))
while log_time < next_day:
log_ip_addr = ip_addr.replace("*", str(random.randint(2, 5)))
if random.randint(1, 10) <=9:
http_verb_uri = http_verb_uri_collect.replace("%d", str(random.randint(1, 99)))
else:
http_verb_uri = http_verb_uri_other
if random.randint(1, 100) <= per_200:
response_status = "200"
else:
response_status = "404"
randnum = random.randint(1, 100)
if randnum <= 90:
response_time = str(random.randint(1, resp_time_90))
elif randnum > 90 and randnum <= 95:
response_time = str(random.randint(resp_time_90 + 1, resp_time_95))
elif randnum > 95 and randnum <= 99:
response_time = str(random.randint(resp_time_95 + 1, resp_time_99))
else:
response_time = str(random.randint(resp_time_99 + 1, resp_time_99 * 2))
#10.2.3.4 [2018/13/10:14:02:39] "GET /api/playeritems?playerId=3" 200 1230
#10.3.4.5 [2018/13/10:14:02:41] "GET /api/playeritems?playerId=2" 200 4630
log = "{} [{}] {} {} {}\n".format(log_ip_addr, log_time.strftime('%Y-%m-%d %H:%M:%S:%f')[:-3], http_verb_uri, response_status, response_time)
log_file.write(log)
log_time = log_time + datetime.timedelta(milliseconds = random.randint(100, 200))
log_file.close()
return
def main():
#get config from config file
config = ConfigParser.ConfigParser()
with open("logper.cfg", "r") as cfg_file:
config.readfp(cfg_file)
for section in config.sections():
#config is in Generator section
if section == "Generator":
dir = config.get(section, "dir")
start_date = datetime.datetime.strptime(config.get(section, "start_date"), '%Y-%m-%d')
end_date = datetime.datetime.strptime(config.get(section, "end_date"), '%Y-%m-%d')
ip_addr = config.get(section, "ip_address")
http_verb_uri_collect = config.get(section, "http_verb_uri_collect")
http_verb_uri_other = config.get(section, "http_verb_uri_other")
per_200 = int(config.get(section, "per_200"))
resp_time_90 = int(config.get(section, "resp_time_90"))
resp_time_95 = int(config.get(section, "resp_time_95"))
resp_time_99 =int(config.get(section, "resp_time_99"))
#create directory if not exist
if not os.path.exists(dir):
os.makedirs(dir)
#generate log according to config
generate_log(dir, start_date, end_date, ip_addr, http_verb_uri_collect, http_verb_uri_other, per_200, resp_time_90, resp_time_95, resp_time_99)
if __name__ == '__main__':
main()
|
997,923 | b6984e396ee93d1afb8dceb9c2e7397ca1217c76 | ##################
### original author: Parashar Dhapola
### modified by Rintu Kutum
##################
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy import ndimage
import math
#########################
chrom_indices = {}
chrom_lens = []
bin_size = 1000
with open('./data/hg19/hg19-chrom-sizes.txt') as h:
for n, l in enumerate(h):
c = l.rstrip('\n').split('\t')
chrom_indices[c[0]] = n
chrom_lens.append(int(c[1]))
##########
## create chrom for TRF2
trf2_chrom_array_c = [np.zeros(int(x / bin_size) + 1) for x in chrom_lens]
h = open('./data/TRF2/all_common_peaks_pankaj_sorted.bed','r')
for l in h:
c = l.rstrip('\n').split('\t')
s = int(c[1]) // 1000
e = int(c[2]) // 1000
if s == e:
e+=1
trf2_chrom_array_c[chrom_indices[c[0]]][s:e] += 1
if not os.path.exists('./data/TRF2/trf2_binned_array_count/'):
os.makedirs('./data/TRF2/trf2_binned_array_count/')
for n, i in enumerate(trf2_chrom_array_c):
for k, v in chrom_indices.items():
if v == n:
fn1 = './data/TRF2/trf2_binned_array_count/%s.npy' % k
print "Saving to file %s" % fn1
sys.stdout.flush()
np.save(fn1, i)
def get_binned_array(a, b):
remainder = len(a) % b
if remainder > 0:
binned = a[:-remainder]
else:
binned = a.copy()
binned = binned.reshape(len(a) // b, b).mean(axis=1)
if remainder > 0:
return np.append(binned, a[-remainder:].mean())
return binned
all_chrom_subtracted = np.array([])
points = []
bin_size = 400
np.random.seed(1261)
for chrom in [x for x in range(1,23)]+['X', 'Y']:
#print '\r%s' % chrom, end='',
print '\r%s' % chrom
trf2_peaks = np.load('./data/TRF2/trf2_binned_array_count/chr%s.npy' % chrom)
trf2_peaks = get_binned_array(trf2_peaks, bin_size)
dnase = np.load('./data/DES/chr%s.npy' % chrom)
dnase = get_binned_array(dnase, bin_size)
dnase = np.log2(dnase+1)
####################
specific = dnase.copy()
specific[trf2_peaks == 0] = 0
####################
chrom_len = len(trf2_peaks)
random_peaks = []
for i in range(1):
got_nums = []
rand_p= np.zeros(chrom_len)
for j in range(len(np.nonzero(trf2_peaks)[0])):
while True:
rand_num = np.random.randint(chrom_len)
if rand_num not in got_nums:
got_nums.append(rand_num)
break
rand_p[rand_num] = 1
random_peaks.append(rand_p)
background = dnase.copy()
idx = random_peaks[0] == 0
background[idx] = 0
if len(all_chrom_subtracted) == 0:
all_chrom_subtracted = specific.copy()
all_chrom_background = background.copy()
else:
all_chrom_subtracted = np.hstack((all_chrom_subtracted, specific))
all_chrom_background = np.hstack((all_chrom_background, background))
points.append(len(dnase))
ref_subtract = all_chrom_subtracted - all_chrom_background
#############################
#############################
#########################
breaks_pos = np.cumsum([0.0] + points)
breaks_pos = np.radians((breaks_pos/breaks_pos[-1])*360)
xticks = []
for i in range(len(breaks_pos)-1):
xticks.append(((breaks_pos[i+1]-breaks_pos[i])/2)+breaks_pos[i])
polar_pos = np.radians(np.linspace(0,360,len(ref_subtract)))
##################
pos = ref_subtract.copy()
pos[pos < 0] = 0
pos[pos > 10] = 10
neg = ref_subtract.copy()
neg[neg > 0] = 0
neg[neg < -10] = -10
#########################
fig = plt.figure(figsize=(7, 7))
gs = mpl.gridspec.GridSpec(6, 6, hspace=0.5)
ax = fig.add_subplot(gs[:, :], projection='polar')
#######
smoothen_factor = 5
a = ndimage.gaussian_filter1d(pos, smoothen_factor)
b = ndimage.gaussian_filter1d(neg, smoothen_factor)
#######
track_pos = 7
ax.fill_between(polar_pos, track_pos, a + track_pos,
color='crimson', alpha=0.7, edgecolor='none', label='DES in TRF2\npeaks only')
ax.fill_between(polar_pos, track_pos, b + track_pos,
color='grey', alpha=0.7, edgecolor='none', label='DES in random\npeaks only')
for i in breaks_pos:
ax.axvline(i, ymin=0.4, ymax=0.9, ls='-.', c='k', lw=1)
for i,j in zip([x for x in range(1,23)]+['X', 'Y'], xticks):
rotation = math.degrees(j)
if 270 > rotation > 90:
rotation-=180
ax.text(j, track_pos + 3, 'chr'+str(i), fontsize=10, color='grey', alpha=0.7,
horizontalalignment='center', verticalalignment='center', rotation=rotation)
ax.set_xticks(xticks)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim((0,11))
ax.spines['polar'].set_visible(False)
ax.legend(loc='center', frameon=False, fontsize=11)
plt.savefig('./figures/Figure-2B-dnase_TRF2_vs_Random.png', dpi=300)
|
997,924 | e6e73ef7f06588ab788a0e17deb9676564519012 | #!/usr/bin/env python3
import argparse, sys, numpy, os
import matplotlib.pyplot
import datetimelib
import photometrylib
import generallib
import astropy
import subprocess
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Loads a FITS file bins it.')
parser.add_argument('inputfile', type=str, nargs='+', help='File(s) to rebin.')
parser.add_argument('-b', '--bin', type=int, default=2, help="New binning factor.")
parser.add_argument('-o', '--output', type=str, default="auto", help="Name of the output file. By default it will add an _nxn suffix to the input filename.")
arg = parser.parse_args()
print(arg)
for FITSfile in arg.inputfile:
hdul = astropy.io.fits.open(FITSfile)
header = hdul[0].header
binning = "unknown"
try:
binning = header['CCDSUM']
except KeyError:
pass
imageData = hdul[0].data
print("Image dimensions:", numpy.shape(imageData))
width = numpy.shape(imageData)[1]
height = numpy.shape(imageData)[1]
binningStrings = binning.split()
binning = [ int(b) for b in binningStrings]
print("Binning of original image: ", binning)
hdul.close()
rebin = arg.bin
a = numpy.array(imageData)
width = numpy.shape(a)[1]
height = numpy.shape(a)[0]
new_height = int(height/rebin)
new_width = int(width/rebin)
b = a.reshape(new_height, rebin, new_width, rebin)
c = rebin*rebin * b.mean(axis=3).mean(axis=1)
d = numpy.clip(c, 0, 65535)
print(a)
print(d)
print("Old shape:", numpy.shape(a), " New shape:",numpy.shape(d))
hdr = astropy.io.fits.Header()
hdu = astropy.io.fits.PrimaryHDU(d)
header['CCDSUM'] = "%d %d"%(rebin, rebin)
header['CCDXBIN'] = rebin
header['CCDYBIN'] = rebin
hdu.header = header
if arg.output=="auto":
filename = os.path.splitext(FITSfile)[0] + "_%dx%d"%(rebin,rebin) + os.path.splitext(FITSfile)[1]
else:
filename = arg.output
hdul = astropy.io.fits.HDUList([hdu])
hdul.writeto(filename, overwrite=True)
print("Written rebinned image to: %s"%filename)
sys.exit()
|
997,925 | 5703b52b97e054c50e5561188bf815367434807b | from curtain import Curtain, frame_length
import sys
from plugin import Plugin
import time, os, math
import midi
plugins = []
for filename in os.listdir('plugins'):
name, extension = os.path.splitext(filename)
if extension == '.py' and name != '__init__':
module = __import__('plugins.' + name).__dict__[name]
for var in module.__dict__.values():
if isinstance(var, type) and Plugin in var.__bases__:
plugins.append(var)
curtain = Curtain()
from plugins.strobe import Strobe
from plugins.rainbow import Rainbow
from plugins.snakes import Snakes
from plugins.ec import EC
from plugins.fancyrainbow import FancyRainbow
plugins = [Strobe(),Snakes(), FancyRainbow(), EC(),Strobe()]
input = midi.MidiInput(9)
class Constants():
p = 3
def do_strobe(lol):
print "strobe"
Constants.p = 0
def do_strobe_2(lol):
print "strobe"
Constants.p = 4
def do_rainbow(lol):
global p
Constants.p=2
def do_snakes(lol):
global p
Constants.p=1
def do_ec(lol):
global p
Constants.p = 3
def set_period(rate):
rate = int((127*3)/(rate+1))
plugins[0].period = rate
def set_period_2(rate):
rate = int((127*3)/(rate+1))
plugins[4].period = rate/5+2
input.attach_callback(102,do_strobe)
input.attach_callback(106,do_strobe_2)
input.attach_callback(103,do_rainbow)
input.attach_callback(104,do_snakes)
input.attach_callback(105,do_ec)
input.attach_callback(16,set_period)
input.attach_callback(20,set_period_2)
input.start()
def weighted_average(a, b, f):
average = {}
for k in set(a.keys() + b.keys()):
r1, g1, b1 = a.get(k, (0, 0, 0))
r2, g2, b2 = b.get(k, (0, 0, 0))
average[k] = (f*r1 + (1-f)*r2, f*g1 + (1-f)*g2, f*b1 + (1-f)*b2)
return average
frame = 0
try:
while True:
start = time.clock()
frame += 1
for plugin in plugins:
plugin.step()
plugin =plugins[Constants.p] # plugins[(int(frame / 600) + 2) % len(plugins)]
curtain.send_color_dict(plugin.canvas)
end = time.clock()
sleep_length = frame_length - (end - start)
if sleep_length > 0:
time.sleep(sleep_length)
except Exception:
input.stop()
sys.exit(0)
|
997,926 | 8a2fec0cf510431b7b6a4cb7f9916452ec64ecbc | import Tkinter
import tkMessageBox
import gridbutton
top = Tkinter.Tk()
top.withdraw()
class Run():
def _init_(self,fona):
self.fona = fona
self.screen_lock = False
def make_call(self):
if Entry.get() > (1+1+1+1+1+1+1+1+1+1):
self.valid_call=True
if self.valid_call:
self.fona.transmit('ATD' + (Entry.get())+ ';')
self.ongoing_call = True
else:
self.ongoing_call = False
#self.screen_lock = False
tkMessageBox.showinfo("Call Failed", "FAILED")
else:
self.valid_call=False
def end_call(self):
self.fona.transmit('ATH')
#self.screen_lock = False
self.ongoing_call = False
def ongoing_call(self):
#self.screen_lock = True
tkMessageBox.showinfo("Headset", Tkinter.Button(top, text="ON", command = self.headset_on),Tkinter.Button(top, text="OFF", command = self.headset_off))
# def screen_lock(self):
# Entry.get() = 0
# def new_contact(self):
# print "contacts dont do anything right now"
# def find_contact(self):
# print "find a contact"
def headset_on(self):
#self.headset:
self.headset = False
self.fona.transmit('AT+CHFA=1')
def headset_off(self):
self.headset = True
self.fona.transmit('AT+CHFA=0')
top.mainloop()
|
997,927 | db9b5e89312b3b3ac8875c431046aea5d947c424 | import sys
"""
This function take the sequence input and output the complementary sequence in 5' to 3' fashion.
output[complementary] = wcc[input_sequence]
Example:
wcc('TAGCTACTCAGACGACCAGTAGATAAAAG') will give "CTTTTATCTACTGGTCGTCTGAGTAGCTA"
"""
def comp(base):
if base == 'A':
baseC = 'T'
if base == 'G':
baseC = 'C'
if base == 'C':
baseC = 'G'
if base == 'T':
baseC = 'A'
return baseC
def wcc(s):
c = []
for i in range(len(s.strip())):
c.append(comp(s[i]))
sys.stdout.write(" Note that the output sequence is from 5' to 3' \n")
c = c[::-1] #invert the sequence i.e. make it 5' to 3'
for element in c:
sys.stdout.write(str(element))
sys.stdout.write("\n")
|
997,928 | b292ff349c00b8de5db9e8a4fa485669ff32c750 | from bs4 import BeautifulSoup
import requests
import re
from django.db import models
class ScrapeLinkdin(models.Model):
location = None
fieldToSearch = None
url = None
jobPosting = []
def __init__(self,searchTerm):
# This is just a variable I set to test it, however the website should have a drop down menu or an input field to get this
location = "Ontario Canada"
# This just breaks up the location to have it fit the URL format
for char in location:
if char in " ":
location = location.replace(char, '%2C')
# This is just a variable I set to test it out. In this case this is the search field
fieldToSearch = searchTerm
# This just adds the %20 inbetween spaces to make it a proper url
for char in fieldToSearch:
if char in " ":
fieldToSearch = fieldToSearch.replace(char, '%20')
# This adds the fields to generate the proper URL
self.url = "https://www.linkedin.com/jobs/search/?keywords=" + fieldToSearch + "&location=" + location
# Finds all the information for each job posting
self.jobPosting = []
def scrapeSite(self):
# Getting the webpage
response = requests.get(self.url)
# Getting source code
data = response.text
# Pass source code to BeautifulSoup
soup = BeautifulSoup(data, features="lxml")
# Finds all the urls of the job postings and puts it in a list
links=[]
for link in soup.findAll(attrs={ 'class': "result-card__full-card-link"}):
links.append(link['href'])
for link in links:
# Creates the webpages data for each individual job posting
indivResponse = requests.get(link)
indivData = indivResponse.text
indivSoup = BeautifulSoup(indivData, features="lxml")
jobInfo = {
# The postings URL
'URL':link,
# The postings title
'Title':(indivSoup.find(attrs={ 'class': "topcard__title"})).getText(),
# The postings Company
'Company':(indivSoup.find(attrs={ 'class': "topcard__flavor"})).getText(),
'Location':(indivSoup.find(attrs={ 'class': "topcard__flavor topcard__flavor--bullet"})).getText()
}
# The postings Date posted (for some reason just using find wont work for the specific class, however findAll in a loop only looping once works)
count = 0
for date in indivSoup.findAll(attrs={ 'class': "topcard__flavor--metadata posted-time-ago__text"}):
if count == 0:
jobInfo['Date Posted'] = date.getText()
break
# This gets the whole Description of the post including the requirments that may be listed
descrip = ''
for li in indivSoup.findAll(attrs={ 'class': "show-more-less-html__markup"}):
descrip = descrip +" " + li.getText()
jobInfo['Description'] = descrip
# Criteria section have the same class, so by getting the first 2 we can get the Seniority level (ex Entry) and the Employment type (ex fulltime)
for critList in indivSoup.findAll(attrs={ 'class': "job-criteria__text job-criteria__text--criteria"}):
if count == 0:
jobInfo['Seniority'] = critList.getText()
elif count == 1:
jobInfo['Employment'] = critList.getText()
break
count+=1
self.jobPosting.append(jobInfo)
|
997,929 | e754829a029427a0e65ab21e55f6dc8bce76b282 | import numpy as np
import matplotlib.pyplot as plt
from fkpp_vect1D import main_FD_Solver
N = 1000 # resolution
L = 1000 # box size units (m)
dt = .001 # time step size
gamma = 1.0 # growth rate
tend = 1000 # tending time in (s)
t_trans = 50
saves = False
verbose = False
save_freq = 1000
D_arr = np.linspace(0.01, 2.5, 25) # diffusion array
v_arr = np.zeros([D_arr.shape[0], 3]) # record velocity
for i, D in enumerate(D_arr):
print(i, ': d = ', round(D, 4))
vel_values = main_FD_Solver(N, L, dt, D, gamma, tend, save_freq, verbose, saves, t_trans)
assert vel_values[0] - vel_values[1] < 0.010
vel_numeric = vel_values[0]
vel_predict = vel_values[2]
CFL_ = vel_values[-1]
v_arr[i, :] = vel_predict, vel_numeric, CFL_
print('Done vel iterations')
fig, ax = plt.subplots()
ax.plot(D_arr, v_arr[:, 0], label=r"Predicted: $2\sqrt{D}$")
ax.plot(D_arr, v_arr[:, 1], label="Numeric FTCD")
ax.set_title('Numeric FTCD vs Analytical velocity 1D')
ax.set_xlabel('Diffusion constant')
ax.set_ylabel('Velocity')
ax.grid(alpha=0.50)
plt.legend()
plt.savefig('numeric_Vs_analytical_fkpp1D')
plt.show()
plt.plot(D_arr, v_arr[:, 2])
plt.show() |
997,930 | 44c3a86c579a0637cdceb16537d535b7971b609a | import math
def solution(progresses, speeds):
answer = []
N = len(progresses)
now_day = -1
cnt = 1
for i in range(N):
progress = progresses[i]
speed = speeds[i]
DONE = 100
remain = math.ceil((DONE - progress) / speed)
if now_day == -1:
now_day = remain
continue
if now_day >= remain:
cnt += 1
else:
answer.append(cnt)
now_day = remain
cnt = 1
return answer + [cnt]
print(solution([93, 99, 92, 93], [1, 1, 1, 1]))
|
997,931 | 0ffb656967a1c903ca2289973f35cc0245a09368 | def main():
import csv
from itertools import islice
reptotal=0
demtotal=0
demlist=(1961,1962,1963,1964,1965,1966,1967,1968,1977,1978,1979,1980,1993,1994,1995,1996,1997,1998,1999,2000,2009,2010,2011,2012,2013)
replist=(1969,1970,1971,1972,1973,1974,1975,1976,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,2001,2002,2003,2004,2005,2006,2007,2008)
p = open('presidents.txt', 'w')
with open('BLS_private.csv') as d:
for line in islice(csv.reader(d), 6, None):
yeartotal=0
year=float(line[0])
yeartotal+=float(line[1])
yeartotal+=float(line[2])
yeartotal+=float(line[3])
yeartotal+=float(line[4])
yeartotal+=float(line[5])
yeartotal+=float(line[6])
yeartotal+=float(line[7])
yeartotal+=float(line[8])
yeartotal+=float(line[9])
yeartotal+=float(line[10])
if line[11]<str(10):
line[11]=0
line[12]=0
yeartotal+=float(line[11])
yeartotal+=float(line[12])
for i in demlist:
if i==year:
demtotal+=yeartotal
for i in replist:
if i==year:
reptotal+=yeartotal
p.write('\n%s: %s\n'%("Democrat",demtotal))
p.write('\n%s: %s\n'%("Republican",reptotal))
if __name__ == "__main__":
main() |
997,932 | 9b8328094fd054eb4f989e976d7a70d1e02d05a8 |
import random
from bottle import Bottle, template, static_file, request, redirect, HTTPError
import model
import session
app = Bottle()
# get the css file and apply to all the html
@app.route('/static/<filename:path>')
def static(filename):
return static_file(filename=filename, root='static')
# this is for index.html
@app.route('/')
def index(db):
# session
session.get_or_create_session(db)
# create a string
lis = []
# get the product_list dict and name it as productList
productList = model.product_list(db,None)
# use for loop an for productList and get the id, name, url and so on for each dict
for row in productList:
info = {
'id': row['id'],
'name': row['name'],
'url': row['image_url'],
'inventory': row['inventory'],
'cost': row['unit_cost']
}
# append those data into the lis
lis.append(info)
# return the title and a lis that contain all the data for each product. and use the for loop in html
return template('index', title='The WT Store', lis=lis)
# product/id
@app.route('/product/<id>', method='GET')
def product(db, id):
# just the product data base on id
product = model.product_get(db, id)
# if no such product, 404 error and print 'No such Item'
if not product:
return HTTPError(404,'No such Item ')
else:
# get the data and insert to the product.html
info = {
'title': product['name'],
'id': product['id'],
'name': product['name'],
'url': product['image_url'],
'inventory': product['inventory'],
'cost': product['unit_cost'],
'description': product['description']
}
# return the template with the product data
return template('product', info)
# create a Method that handle the Product Post
# if 'quantity' in request.query:
# @app.post('/product/<id>')
# def add_to_cart(db,id):
# session.get_cart_contents(db)
#
# #the quantity will be request and name as quantity
# quantity = request.forms.get['quantity']
#
# # the data use the method of session.add_to_cart with the data
# # id from the product/<id>
# session.add_to_cart(db, id, quantity=quantity)
#
# # once it works, it redirect to cart
# return redirect('/cart')
# create a cart page
@app.route('/cart')
def cart(db):
# session get the content from the from the db
session.get_cart_contents(db)
return template('cart')
if __name__ == '__main__':
from bottle.ext import sqlite
from dbschema import DATABASE_NAME
# install the database plugin
app.install(sqlite.Plugin(dbfile=DATABASE_NAME))
app.run(debug=True, port=8010)
|
997,933 | a4b2f0dd236722c61b37dde913d05573e6d635d4 | import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
import sys
import csv
def bollinger_value(price_array, ldt_timestamps, bollinger_date):
d= {'price': price_array, 'time': ldt_timestamps}
data=pd.DataFrame(d)
data['roll_mean']=pd.rolling_mean(data['price'],20)
data['roll_std']=pd.rolling_std(data['price'],20)
data['boll_low']=data['roll_mean']-data['roll_std']
data['boll_high']=data['roll_mean']+data['roll_std']
data['boll_val']= (data['price']-data['roll_mean']) / data['roll_std']
result_array=[]
for i in range(0, len(ldt_timestamps)):
if data['time'][i]==bollinger_date:
for j in keys:
result_array.append(data[j][i])
return result_array, data
def getdata(dt_start,dt_end,ls_symbols):
#dt_start=dt.datetime(na_dates[0,0],na_dates[0,1],na_dates[0,2])
#dt_end=dt.datetime(na_dates[len(na_dates)-1,0],na_dates[len(na_dates)-1,1],na_dates[len(na_dates)-1,2]+1)
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
c_dataobj = da.DataAccess('Yahoo')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data['close'].values
return na_price, ldt_timestamps
def printresults(result_array,keys):
for i in range(0,len(keys)):
print str(keys[i]) + " : " + str(result_array[i])
if __name__ == '__main__':
'''def get_data(symlist,filename):'''
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
days=252
k=math.sqrt(days)
ls_symbols=sys.argv[1]
lookback=int(sys.argv[2])
#date1=[]
#date1.append(sys.argv[3])
#print date1
year=int(sys.argv[3])
month=int(sys.argv[4])
day=int(sys.argv[5])
date=dt.datetime(year,month,day)
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2010, 12, 31)
keys= ['time', 'roll_mean', 'roll_std', 'boll_low', 'boll_high', 'boll_val']
na_price, ldt_timestamps=getdata(dt_start,dt_end,ls_symbols)
price_array=[]
for i in range(0, len(na_price)):
price_array.append(na_price[i][0])
#d= {'price': price_array, 'time': ldt_timestamps}
#data-pd.DataFrame(d)
#price_data=pd.Series(price_array,index=ldt_timestamps)
#timestamps
bollinger_date=pd.tslib.Timestamp(dt.datetime(date.year, date.month, date.day, 16))
result_array, all_data=bollinger_value(price_array, ldt_timestamps, bollinger_date)
printresults(result_array,keys)
|
997,934 | 36719a627701d3baeedf29460088f2835acdeecc | # Generated by Django 3.1 on 2021-08-12 06:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataapp', '0013_auto_20210812_0848'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='year',
field=models.IntegerField(default=2021, editable=False),
),
migrations.AlterField(
model_name='farmer',
name='join_year',
field=models.IntegerField(default=2021, editable=False),
),
migrations.AlterField(
model_name='payfarmer',
name='year',
field=models.IntegerField(default=2021, editable=False),
),
]
|
997,935 | fb93cb9321c4536ab8d2cfa5a6b0f044247c4d6e | from difflib import *
class Op(object):
def __init__(self, delete=False, insert=[]):
self.delete=delete
self.insert=insert
def rep(self):
return (self.delete, tuple(self.insert))
def __repr__(self):
return "Op({0})".format({'delete':self.delete,
'insert': self.insert })
def __hash__(self):
return hash(self.rep())
def __cmp__(self, other):
return cmp(self.rep(), other)
def align(a_, b_):
'''Align edit operations to each position in the source
sequence. The sequence of edit operations transforms source
sequence a to target sequence b.'''
a = list(a_)
b = list(b_)
codes = SequenceMatcher(a=a, b=b, autojunk=False).get_opcodes()
T = {}
for code in codes:
tag, a_i, a_j, b_i, b_j = code
if tag in ['insert', 'replace']:
op = T.get(a_i, Op())
op.insert = op.insert + b[b_i:b_j]
T[a_i] = op
if tag in ['delete', 'replace']:
for i in range(a_i, a_j):
op = T.get(i, Op())
op.delete = True
T[i] = op
for i in range(0, len(a)+1):
if not i in T:
T[i] = Op()
return [ op for _, op in sorted(T.iteritems()) ]
def apply(ops, a_):
'''Apply edit operations ops to source sequence a and return
a generator for the resulting target sequence.'''
a = list(a_)
for i, op in enumerate(ops):
for x in op.insert + (a[i:i+1] if not op.delete else []):
yield x
|
997,936 | 77e1de7e2eb393dfcb6bd14c708faab6c9086ef2 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log
from neutron.plugins.nicira.dbexts import nicira_db
from neutron.plugins.nicira import nvplib
LOG = log.getLogger(__name__)
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
"""Return the NSX switch and port uuids for a given neutron port.
First, look up the Neutron database. If not found, execute
a query on NSX platform as the mapping might be missing because
the port was created before upgrading to grizzly.
This routine also retrieves the identifier of the logical switch in
the backend where the port is plugged. Prior to Icehouse this
information was not available in the Neutron Database. For dealing
with pre-existing records, this routine will query the backend
for retrieving the correct switch identifier.
As of Icehouse release it is not indeed anymore possible to assume
the backend logical switch identifier is equal to the neutron
network identifier.
"""
nvp_switch_id, nvp_port_id = nicira_db.get_nsx_switch_and_port_id(
session, neutron_port_id)
if not nvp_switch_id:
# Find logical switch for port from backend
# This is a rather expensive query, but it won't be executed
# more than once for each port in Neutron's lifetime
nvp_ports = nvplib.query_lswitch_lports(
cluster, '*', relations='LogicalSwitchConfig',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
# Only one result expected
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nvp_ports:
LOG.warn(_("Unable to find NVP port for Neutron port %s"),
neutron_port_id)
# This method is supposed to return a tuple
return None, None
nvp_port = nvp_ports[0]
nvp_switch_id = (nvp_port['_relations']
['LogicalSwitchConfig']['uuid'])
with session.begin(subtransactions=True):
if nvp_port_id:
# Mapping already exists. Delete before recreating
nicira_db.delete_neutron_nsx_port_mapping(
session, neutron_port_id)
else:
nvp_port_id = nvp_port['uuid']
# (re)Create DB mapping
nicira_db.add_neutron_nsx_port_mapping(
session, neutron_port_id,
nvp_switch_id, nvp_port_id)
return nvp_switch_id, nvp_port_id
|
997,937 | 504e061bcd80fcbd66d098f8966587c4d957dc51 | import re, string
import sys, math, operator
from collections import defaultdict
from decimal import Decimal
import numpy as np
import mixem
import matplotlib.pyplot as plt
import itertools
alphabet = "qwertyuiopasdfghjklzxcvbnm0. #"
all_trigram = [''.join(i) for i in itertools.product(alphabet, repeat = 3)]
#Remove non-ASCII characters
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127 and c is not "\n")
return ''.join(stripped)
#Count the number of lines in a file
def file_len(fname):
for i, l in enumerate(fname):
pass
return i + 1
#split a list in n chunks
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
#Converts a dictionary into a list of tuple sorted by the second element and return the list splitted in n chunks
def sort_and_chunk(dictionary, n):
return list(split(sorted(dictionary.items(), key=operator.itemgetter(1)), n))
#Lowecase all character, remove non ascii characters, then punctiations (except full stop), converts all digits to 0 and add the initials and final marks
def preprocess_line(line):
line = "##" + re.sub("\d", "0",strip_non_ascii(line.lower()).translate(str.maketrans('', '', string.punctuation.replace(".", "")))) + "#"
return line
#Returns two dictionaries containing the counts of the n-grams, one for calculating the likelihood and one for train the lambdas
def n_gram_count(n, file, length):
counts = [defaultdict(int), defaultdict(int)]
held_out_index = int(length*0.7)
index_flag = 0
line_count=0
for line in file:
line = preprocess_line(line)
index_flag = 1 if line_count > held_out_index else 0
for j in range(len(line)-(n-1)):
gram = line[j:j+n]
counts[index_flag][gram] += 1
line_count += 1
return counts
#Load the training file and return 3-2-1 counts dictionaries
def generate_count_dict():
if len(sys.argv) != 2:
print("Usage: ", sys.argv[0], "<training_file>")
sys.exit(1)
infile = sys.argv[1] #get input argument: the training file
with open(infile) as f:
result_1 = []
result_2 = []
length = file_len(f)
for i in range(1,4):
f.seek(0) #reset the file read
dicts =n_gram_count(i,f, length)
result_1.append(dicts[0])
result_2.append(dicts[1])
return result_1, result_2
def get_likelihood(trigram, bigram, unigram, count):
return tuple((trigram/bigram, bigram/unigram, unigram/count))
#Apply EM Algorithm to calculate the optimal lambdas
def train_lambda(data):
# plt.scatter(np.array(range(data.shape[0])), data)
# plt.show();
weights, distributions, ll = mixem.em(np.sort(np.array(data)), [mixem.distribution.NormalDistribution(0,1),mixem.distribution.NormalDistribution(0.3,5),mixem.distribution.NormalDistribution(1,9)])
return weights
def J_M_interpolation(dicts):
uni_counts, bi_counts, tri_counts = dicts[0]
uni_counts_lamda, bi_counts_lamda, tri_counts_lamda = dicts[1]
probabilities = []
lambdas = []
v1 = sum(uni_counts.values())
v2 = sum(uni_counts_lamda.values())
#bucketing -> Train the lambda
for chunk in sort_and_chunk(tri_counts_lamda, 10):
data = []
#Create the input vector for train the lambdas
for trigram in chunk:
key=trigram[0]
likelihood = get_likelihood(tri_counts_lamda[key],bi_counts_lamda[key[:-1]], uni_counts_lamda[key[:-2]], v2)
data.append(likelihood[0])
data.append(likelihood[1])
data.append(likelihood[2])
lambdas.append(train_lambda(np.array(data)))
index = 0
#Interpolate!
for chunk in sort_and_chunk(tri_counts, 10):
for trigram in chunk:
key=trigram[0]
likelihood = get_likelihood(tri_counts[key],bi_counts[key[:-1]], uni_counts[key[:-2]], v1)
probabilities.append([key,lambdas[index][0]*likelihood[0] + lambdas[index][1]*likelihood[1] + lambdas[index][2]*likelihood[2] ])
#calculate probability for all unseen trigram
for trigram in all_trigram:
if trigram not in tri_counts.keys():
if bi_counts_lamda[trigram[:-1]] == 0:
probability = [trigram, 0.05*uni_counts[trigram[:-2]]/v1]
else:
probability = [trigram, 0.05*uni_counts[trigram[:-2]]/v1 + 0.07*bi_counts_lamda[trigram[:-1]]/uni_counts[trigram[:-2]]]
probabilities.append(probability)
print (lambdas)
#break
return probabilities
def generate_JM_model(model_name):
probabilities = J_M_interpolation(generate_count_dict())
with open(model_name, "w") as f:
for couple in probabilities:
f.write(couple[0]+"\t"+ ('%.2E' % Decimal(couple[1]) +"\n"))
def generate_MLE_model():
tri_counts, bi_counts, s_counts = generate_count_dict()
with open("my_model_MLE.en", "w") as f:
for key in sorted(tri_counts.keys()):
#Write key ,tab> likelihood in the file
f.write(key+"\t"+ ('%.2E' % Decimal( tri_counts[key] / bi_counts[key[:-1]]) +"\n")) |
997,938 | 058e8c63290a4c4d029971b03c36422030b9ec32 | from django.contrib.auth.backends import ModelBackend
from .models import CashierProfile
from django.contrib.auth.models import User
class PasswordlessAuthBackend(ModelBackend):
"""Log in to Django without providing a password, just a cashier code/login number
"""
def authenticate(self, request, rest_id, login):
try:
cashier = CashierProfile.objects.filter(restaurant = rest_id).filter(login_number = login).first()
return cashier
except CashierProfile.DoesNotExist:
return None
# def get_user(self, user_id):
# try:
# return User.objects.get(pk=user_id)
# except CashierProfile.DoesNotExist:
# return None
|
997,939 | b4407e499f9583f1e0a5dd118d465fe18fe7e38a | import os
import torch
import numpy as np
from scipy.spatial import distance_matrix
dataset = 'diab'
model_info = 'lstm+tanh'
dirname = '../outputs/' + dataset + '/' + model_info
dirs = [d for d in os.listdir(dirname) if
'enc.th' in os.listdir(os.path.join(dirname, d))]
# print(dirs)
distance_matrices = []
for dir in dirs:
pth = os.path.join(dirname, dir, 'enc.th')
enc = torch.load(pth)
enc = enc['embedding.weight'].cpu().numpy()
print(enc.shape)
distance_matrices.append(
distance_matrix(enc, enc)) # Minkowski distance with p-norm=2
import pickle
file_name = dataset + model_info + "-distance-matrices.pkl"
pkl_file = open(file_name, 'wb')
pickle.dump(distance_matrices, pkl_file)
pkl_file.close()
|
997,940 | 05943fdff745512181fdcf90ac271b5d56a6697a | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 10:50:22 2019
@author: Manuel
"""
def calcular_mochila(peso_maximo, *monton):
numobj=0
numobjquedarse=0
valpess=[]
mochila=[]
quedarse=[]
descartes=[]
dejar=[]
for objeto in monton:
valpes=objeto["valor"]/objeto["peso"] #valpes calcula la relación entre el valor y el peso.
objeto["valpes"]=valpes
valpess.append(valpes)
numobj+=1
valpess.sort() #Esto permitirá empezar por los objetos más valiosos.
while numobj != 0:
obj_prueba=valpess.pop()
for objeto in monton:
if obj_prueba==objeto["valpes"]:
objeto["valpes"]=-1 #Esto evita problemas cuando hay más de un objeto con el mismo valor de valpes.
if peso_maximo>=objeto["peso"]:
quedarse.append(objeto)
numobjquedarse+=1
peso_maximo-=objeto["peso"]
else: #Para, cuando esté casi llena, comparar el valor de los últimos objetos a meter.
if numobjquedarse!=0: #Esto es por si el objeto más valioso es muy grande y no se puede llevar, que no se saque de una lista vacía.
comparar=quedarse.pop()
peso_maximo+=comparar["peso"]
if peso_maximo >= objeto["peso"]:
if comparar["valor"] > objeto["valor"]:
quedarse.append(comparar)
numobjquedarse+=1
peso_maximo-=comparar["peso"]
dejar.append(objeto)
else:
quedarse.append(objeto)
numobjquedarse+=1
peso_maximo-=objeto["peso"]
dejar.append(comparar)
else:
quedarse.append(comparar)
numobjquedarse+=1
peso_maximo-=comparar["peso"]
dejar.append(objeto)
else:
dejar.append(objeto)
numobj-=1
valor_llevo=0
peso_llevo=0
valor_dejo=0
peso_dejo=0
for objeto in quedarse:
mochila.append(objeto["nombre"])
valor_llevo+=objeto["valor"]
peso_llevo+=objeto["peso"]
for objeto in dejar:
descartes.append(objeto["nombre"])
valor_dejo+=objeto["valor"]
peso_dejo+=objeto["peso"]
print("En la mochila me llevo los siguientes objetos: ",mochila, ",con un valor total de",valor_llevo,"y un peso de",peso_llevo)
print("Dejo los siguientes objetos: ",descartes, ",con un valor total de",valor_dejo,"y un peso de",peso_dejo)
#Ahora una prueba con diferentes objetos
moneda={"nombre":"moneda","peso":1,"valor":3}
piedra={"nombre":"piedra","peso":5,"valor":1}
anillo={"nombre":"anillo","peso":1,"valor":10}
talisman={"nombre":"talismán","peso":4,"valor":10}
estatua={"nombre":"estatua","peso":10,"valor":100}
corona={"nombre":"corona","peso":3,"valor":100}
amuleto={"nombre":"amuleto","peso":1,"valor":1}
calcular_mochila(5,moneda, piedra, anillo, talisman, estatua,corona,amuleto) |
997,941 | cf5975dd2e67a00515c1256c2bf277ae67fd8802 | import logging
from dataclasses import dataclass, field, asdict
from datetime import datetime
from typing import Callable, Optional, Dict, Tuple, Any, Type, List, Union
import yaml
import torch
import torch.nn as nn
import torchvision
from action_recognition import models
# Initiate Logger
logger = logging.getLogger(__name__)
def transform_dict(config_dict: Dict, expand: bool = True):
"""
General function to transform any dictionary into wandb config acceptable format
(This is mostly due to datatypes that are not able to fit into YAML format which makes wandb angry)
The expand argument is used to expand iterables into dictionaries
So the configs can be used when comparing results across runs
"""
ret: Dict[str, Any] = {}
for k, v in config_dict.items():
if v is None or isinstance(v, (int, float, str)):
ret[k] = v
elif isinstance(v, (list, tuple, set)):
# Need to check if item in iterable is YAML-friendly
t = transform_dict(dict(enumerate(v)), expand)
# Transform back to iterable if expand is False
ret[k] = t if expand else [t[i] for i in range(len(v))]
elif isinstance(v, dict):
ret[k] = transform_dict(v, expand)
else:
# Transform to YAML-friendly (str) format
# Need to handle both Classes, Callables, Object Instances
# Custom Classes might not have great __repr__ so __name__ might be better in these cases
vname = v.__name__ if hasattr(v, '__name__') else v.__class__.__name__
ret[k] = f"{v.__module__}:{vname}"
return ret
def dfac_cur_time():
return datetime.now().strftime("%Y%m%d-%H%M%S")
def update_dict_by_key_value(argdict, key, value):
now_dict = argdict
*dict_keys, last_key = key.split('.')
for k in dict_keys:
now_dict.setdefault(k, {})
if now_dict[k] is None:
now_dict[k] = {}
now_dict = now_dict[k]
now_dict[last_key] = value
return argdict
ARGS_TYPE = Dict[str, Any]
@dataclass
class WandbConfig:
# Logging Related
cur_time: str = field(default_factory=dfac_cur_time)
# WandB setting
wandb_repo: str = "donny"
wandb_project: str = "video_classification"
wandb_group: str = "test"
wandb_name: str = ''
wandb_dir: str = "./output/wandb/"
@dataclass # pylint: disable=too-many-instance-attributes
class ExperimentConfig(WandbConfig):
# GPU Device Setting
gpu_device_id: Union[None, int, List[int]] = None
tensorboard_log_root: str = "./output/tensorboard/"
# Set random seed. Set to None to create new Seed
random_seed: Optional[int] = None
# Dataset Config
dataset_root: str = "./data/clipped_database/preprocessed"
dataset_artifact: str = ""
output_type: str = "random_frame"
split_by: str = "random"
frames_per_clip: int = 1
step_between_clips: int = 1
frame_rate: Optional[int] = None
metadata_path: Optional[str] = None
num_sample_per_clip: int = 1
# mouse dataset args
mix_clip: int = 0
no_valid: bool = False
extract_groom: bool = True
exclude_5min: bool = False
exclude_2_mouse: bool = False
exclude_fpvid: bool = True
exclude_2_mouse_valid: bool = False
# Over-write arguments for valid set and test set
valid_set_args: ARGS_TYPE = field(default_factory=dict)
test_set_args: ARGS_TYPE = field(default_factory=dict)
# sampler
train_sampler_config: ARGS_TYPE = field(default_factory=dict)
valid_sampler_config: ARGS_TYPE = field(default_factory=dict)
test_sampler_config: ARGS_TYPE = field(default_factory=dict)
# Transform Function
transform_size: int = 224
train_transform: Optional[Tuple[Callable, ...]] = None
valid_transform: Optional[Tuple[Callable, ...]] = None
test_transform: Optional[Tuple[Callable, ...]] = None
# Increase dataloader worker to increase throughput
num_worker: int = 16
# Training Related
batch_size: int = 64
# Default Don't Select Model
model: Optional[Union[Type[torch.nn.Module], Callable[..., torch.nn.Module]]] = None
model_args: ARGS_TYPE = field(default_factory=dict)
xavier_init: bool = False
# Default Cross Entropy loss
loss_function: nn.Module = field(default_factory=lambda: nn.CrossEntropyLoss(reduction='sum'))
# Default Select Adam as Optimizer
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam # type: ignore
optimizer_args: ARGS_TYPE = field(default_factory=lambda: {"lr": 1e-2})
# Default adjust learning rate
lr_scheduler: Optional[Type[torch.optim.lr_scheduler._LRScheduler]] = None # pylint: disable=protected-access
lr_scheduler_args: ARGS_TYPE = field(default_factory=dict)
# Set number of epochs to train
num_epochs: int = 20
samples_per_epoch: Optional[int] = None
valid_every_epoch: int = 1
save_path: Optional[str] = None
best_metric: str = 'Accuracy'
def to_dict(self, expand: bool = True):
return transform_dict(asdict(self), expand)
def update_value(self, key, value):
first_key, *rest = key.split('.')
if key in ["model", "model.class"]:
self.model = getattr(models, value, None) \
or getattr(torchvision.models, value, None) \
or getattr(torchvision.models.video, value)
elif key.startswith('model.'):
self.model_args = update_dict_by_key_value(self.model_args, key[len('model.'):], value)
elif key.startswith('train_sampler_config.'):
self.train_sampler_config = update_dict_by_key_value(
self.train_sampler_config, key[len('train_sampler_config.'):], value)
elif key in ["optimizer", "optimizer.class"]:
assert hasattr(torch.optim, value)
self.optimizer = getattr(torch.optim, value)
elif key.startswith('optimizer.'):
self.optimizer_args = update_dict_by_key_value(self.optimizer_args, key[len('optimizer.'):], value)
elif key in ["lr_scheduler", "lr_scheduler.class"]:
assert hasattr(torch.optim.lr_scheduler, value)
self.lr_scheduler = getattr(torch.optim.lr_scheduler, value)
elif key == "lr_scheduler.step":
self.lr_scheduler_args['milestones'] = list(range(value, self.num_epochs, value))
elif key.startswith('lr_scheduler.'):
self.lr_scheduler_args = update_dict_by_key_value(self.lr_scheduler_args, key[len('lr_scheduler.'):], value)
elif key == 'frames_per_clip':
self.frames_per_clip = value
self.batch_size = 16 * 16 // value
elif hasattr(self, key):
assert getattr(self, key) is None or isinstance(getattr(self, key), type(value)), (key, value)
setattr(self, key, value)
elif hasattr(self, first_key) and len(rest) > 0:
assert isinstance(getattr(self, first_key), dict)
setattr(
self, first_key, update_dict_by_key_value(
getattr(self, first_key), key[len(first_key) + 1:], value)
)
else:
raise NotImplementedError(f"Unknown key={key}")
def update_sweep_dict(self, wandb_config: Dict[str, Any]):
SWEEP_ARG_PREFIX = "WS_"
for k, v in wandb_config.items():
if k.startswith("WS_BASE"):
sp_value = v['value']
if isinstance(sp_value, list):
for item in sp_value:
self.merge_with_yaml(item)
elif sp_value:
self.merge_with_yaml(sp_value)
elif k.startswith(SWEEP_ARG_PREFIX):
sp_name = k[len(SWEEP_ARG_PREFIX):]
# wandb.config.as_dict() returns Dict[k, Dict[str, v]]
# https://github.com/wandb/client/blob/master/wandb/wandb_config.py#L321
sp_value = v['value']
self.update_value(sp_name, sp_value)
def update_dict_value(self, dictionary, prefix=''):
for k, v in dictionary.items():
if isinstance(v, dict):
self.update_dict_value(v, prefix=prefix + k + '.')
else:
self.update_value(prefix + k, v)
def merge_with_yaml(self, yaml_path):
with open(yaml_path, 'r') as f:
cfg_dict = yaml.safe_load(f)
if "__BASE__" in cfg_dict:
if isinstance(cfg_dict['__BASE__'], list):
for path in cfg_dict['__BASE__']:
self.merge_with_yaml(path)
else:
assert isinstance(cfg_dict['__BASE__'], str)
self.merge_with_yaml(cfg_dict['__BASE__'])
cfg_dict.pop("__BASE__")
self.update_dict_value(cfg_dict)
|
997,942 | 691ceb833dd3f0d6591a9773820d261052276553 | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
SECRET_KEY = 'ishalltellyouonlyonce'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'fa',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
},
]
|
997,943 | 25f13848f9f55c6114521dcca043d43e2fe233f7 | from datetime import datetime
from flask import render_template, redirect, url_for, flash, abort, request, jsonify
from flask_login import logout_user, login_required, login_user
from autoP.models import User, load_user, query_results
from forms import LoginForm, RegistrationForm, SearchForm
from . import app
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Automation Center',
year=datetime.now().year,
message='Welcome to the Automation Center'
)
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
message='Contact me if you have any questions.'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
message='Project Brief.'
)
@app.route('/show')
def show():
"""Renders the listUsers page."""
return render_template(
'listUsers.html',
title='List Users',
message='These are the users in our system'
)
@app.route('/dash_board')
def dash_board():
return render_template(
'dash_board.html',
title = 'Dash Board',
message='Welcome to the Automation Center'
)
@app.route('/search', methods=['GET', 'POST'])
@login_required
def search():
form = SearchForm()
if form.validate_on_submit():
# input some search here
search_string = form.search.data
flash(search_string)
app.logger.info(form.search.data)
return redirect(url_for('search_result', query=search_string))
# redirect(url_for('search'))
return render_template('search.html', form=form)
@app.route('/search_result/<string:query>')
@login_required
def search_result(query):
results = query_results(query)
return render_template('_results.html', query=query, results=results)
@app.route('/login', methods=['GET', 'POST'])
def login():
# Here we use a class of some kind to represent and validate our
# client-side form data. For example, WTForms is a library that will
# handle this for us, and we use a custom LoginForm to validate.
form = LoginForm()
if form.validate_on_submit():
# Login and validate the user.
# user should be an instance of your `User` class
users = load_user(form.email.data)
if users is None:
flash(message='We don\'t know you', category='error')
return render_template('login.html', form=form)
user = users
if user.verify_password(form.password.data):
login_user(user)
flash(message='Logged in successfully.', category='message')
return redirect(url_for('dash_board'))
else:
flash(message='wrong password', category='error')
return render_template('login.html', form=form)
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('home'))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
if User.get_by(email=form.email.data):
flash(message='User already existing.', category='error')
return redirect(url_for('register'))
user = User(email=form.email.data, password=form.password.data)
user.save()
flash('User is registered!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/shutdown')
def server_shutdown():
if app.testing:
return 'Cannot shutdown testing......'
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down the server......'
@app.errorhandler(403)
def forbidden(error):
return handle_error(error, 'Forbidden', 403)
@app.errorhandler(404)
def not_found(error):
return handle_error(error, 'Page Not Found', 404)
@app.errorhandler(500)
def internal_server_error(error):
return handle_error(error, 'Internal Server Error', 500)
def handle_error(error, server_error, error_code):
if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html:
response = jsonify({'error': server_error})
response.status_code = error_code
return response
return render_template('error.html', message='Internal Server Error:'), error_code
|
997,944 | 18d25718ec8d10097b4c9d0a54a03b783d240270 | # set up relays
from gpiozero import OutputDevice
import time
import sys
# devices specified by GPIO number
# gpiozero.OutputDevice(pin, *, active_high=True, initial_value=False, pin_factory=None)
air_pump = OutputDevice(18, False)
nutrient_pump = OutputDevice(23, False)
solenoid = OutputDevice(24, False)
l_board = OutputDevice(25, False)
motors = OutputDevice(4, False)
#function to toggle relay power
def TogglePower(relay):
relay.toggle()
#TogglePower(air_pump)
#time.sleep(1)
#TogglePower(motors)
#time.sleep(1)
#TogglePower(l_board)
#time.sleep(1)
#TogglePower(nutrient_pump)
#TogglePower(solenoid)
#time.sleep(1)
#TogglePower(solenoid)
|
997,945 | a6bbf5eb88d39c1519d258ce16f0af99236f162f | from django.db.models.signals import post_save
from djblets.extensions.hooks import SignalHook
from djblets.integrations.integration import Integration
from djblets.integrations.manager import IntegrationManager
from djblets.integrations.tests.models import IntegrationConfig
from djblets.integrations.tests.testcases import IntegrationsTestCase
class DummyIntegration1(Integration):
def initialize(self) -> None:
self.dummy_initted = True
self.hook = SignalHook(self, post_save, self._on_callback)
def shutdown(self) -> None:
self.dummy_initted = False
def _on_callback(self, **kwargs) -> None:
pass
class DummyIntegration2(Integration):
def initialize(self) -> None:
pass
class IntegrationTests(IntegrationsTestCase):
"""Unit tests for djblets.integrations.integration.Integration."""
def setUp(self) -> None:
super().setUp()
self.manager = IntegrationManager(IntegrationConfig)
self.integration = \
self.manager.register_integration_class(DummyIntegration1)
def test_init(self) -> None:
"""Testing Integration initialization"""
self.assertEqual(self.integration.hooks, set())
self.assertFalse(self.integration.enabled)
self.assertFalse(hasattr(self.integration, 'dummy_initted'))
def test_id(self) -> None:
"""Testing Integration.id"""
self.assertEqual(self.integration.id, self.integration.integration_id)
def test_enable_integration(self) -> None:
"""Testing Integration.enable_integration"""
self.integration.enable_integration()
self.assertTrue(self.integration.enabled)
self.assertTrue(hasattr(self.integration, 'dummy_initted'))
self.assertTrue(getattr(self.integration, 'dummy_initted'))
self.assertEqual(self.integration.hooks, {
getattr(self.integration, 'hook'),
})
def test_disable_integration(self) -> None:
"""Testing Integration.disable_integration"""
self.integration.enable_integration()
self.integration.disable_integration()
self.assertFalse(self.integration.enabled)
self.assertTrue(hasattr(self.integration, 'dummy_initted'))
self.assertFalse(getattr(self.integration, 'dummy_initted'))
self.assertEqual(self.integration.hooks, set())
def test_get_configs(self) -> None:
"""Testing Integration.get_configs"""
config1 = self.integration.create_config(name='Config 1',
enabled=True,
save=True)
config2 = self.integration.create_config(name='Config 2',
enabled=True,
save=True)
# Add some configs that shouldn't be returned.
integration2 = \
self.manager.register_integration_class(DummyIntegration2)
self.integration.create_config(name='Config 3', save=True)
integration2.create_config(name='Config 4', save=True)
self.assertEqual(self.integration.get_configs(), [config1, config2])
def test_get_configs_with_filter(self) -> None:
"""Testing Integration.get_configs with filter"""
config1 = self.integration.create_config(name='Config 1',
enabled=True,
save=True)
self.integration.create_config(name='Config 2',
enabled=True,
save=True)
# Add some configs that shouldn't be returned.
integration2 = \
self.manager.register_integration_class(DummyIntegration2)
self.integration.create_config(name='Config 3', save=True)
integration2.create_config(name='Config 4', save=True)
self.assertEqual(self.integration.get_configs(name='Config 1'),
[config1])
def test_create_config(self) -> None:
"""Testing Integration.create_config"""
config = self.integration.create_config(name='Config 1')
self.assertFalse(config.enabled)
self.assertIsNone(config.pk)
def test_create_config_with_save(self) -> None:
"""Testing Integration.create_config with save=True"""
config = self.integration.create_config(name='Config 1', save=True)
self.assertFalse(config.enabled)
self.assertIsNotNone(config.pk)
|
997,946 | e2bd5a633e4a63b23968441999cf1599f754a3e0 | from chunk import make_all_sentences_chunks_list
import pydot_ng as pydot
F_NAME = "neko.txt.cabocha"
"""
びにおもちろいわと 名詞,一般,*,*,*,*,*
云う 動詞,自立,*,*,五段・ワ行促音便,基本形,云う,イウ,イウ
のようなものもちゃんと管理する
"""
def _45():
contents = make_all_sentences_chunks_list(F_NAME)
verb_case_patterns = []
for sentence in contents:
for chunk in sentence:
case_pattern = []
verb_base = None
for chunk_morph in chunk.morphs:
if chunk_morph.pos == "動詞":
verb_base = chunk_morph.base
break
if verb_base:
for i in chunk.srcs:
for src_morph in sentence[i].morphs:
if src_morph.pos == "助詞":
case_pattern.append(src_morph.surface)
if case_pattern:
verb_case_patterns.append(verb_base+"\t"+" ".join(sorted(case_pattern)))
for verb_case_pattern in verb_case_patterns:
print(verb_case_pattern)
if __name__ == "__main__":
_45()
"""
sort 45 | uniq -c | sort -r | head -10
565 云う と
442 する を
249 思う と
199 ある が
189 なる に
174 する に
173 見る て
127 する と
117 する が
105 する に を
"""
"""
grep -E "^する" 45 | sort | uniq -c | sort -r | head -10
442 する を
174 する に
127 する と
117 する が
105 する に を
86 する て を
59 する は
58 する て
57 する が を
48 する から
"""
"""
grep -E "^見る" 45 | sort | uniq -c | sort -r | head -10
173 見る て
94 見る を
21 見る て て
20 見る から
18 見る て を
14 見る と
12 見る から て
12 見る で
11 見る て は
8 見る に
"""
"""
grep -E "^与える" 45 | sort | uniq -c | sort -r | head -10
3 与える に を
2 与える て に は を
1 与える ば を
1 与える に に対して のみ は は も
1 与える て も を
1 与える て に を
1 与える て に に は を
1 与える だけ で に を
1 与える たり て に を
1 与える けれども に は を
""" |
997,947 | eb536f14768417698ae24195bbf1684852dc7299 | special.gdtr(a,b,x) |
997,948 | 55497f19f059c7a12b13501ceb1afe5b05f08df0 | # Isabella Gomez A15305555
# ECE143 HW5
# imports:
import re
def get_average_word_length(words):
'''
This function takes in a list of words and computes the average length
of the words in the list.
:param words: list of words
:return: average length of the words in the list
'''
# check that words is a list of strings
assert type(words) == list
for word in range(len(words)):
assert type(words[word]) == str
average_length = 0
# get average length of all words in list
for word in range(len(words)):
average_length = average_length + len(words[word])
average_length = average_length / len(words)
return average_length
def get_longest_word(words):
'''
Gets the longest word in the list
:param words: list of words
:return: returns the string of the longest word
'''
# check that words is a list of strings
assert type(words) == list
for word in range(len(words)):
assert type(words[word]) == str
list_of_lenghts = []
# make a list with all the word lengths
for word in range(len(words)):
list_of_lenghts.append(len(words[word]))
longest_word_length = max(list_of_lenghts)
for l in range(len(list_of_lenghts)):
if list_of_lenghts[l] == longest_word_length:
longest_word = words[l]
return longest_word
def get_longest_words_startswith(words,start):
'''
This function finds the longest word that begins with the letter
"start"
:param words: list with all words
:param start: the letter with which the longest word will start
:return: the longest word that begins with the given letter
'''
# check that words is a list of strings
assert type(words) == list
for word in range(len(words)):
assert type(words[word]) == str
# check that start is a single letter and str
assert type(start) == str
assert len(start) == 1
# make start lowercase
if start.isupper() == True:
start = start.lower()
words_with_start_letter = []
for word in range(len(words)):
first_letter = words[word][0]
# make a list with all words that begin with "start"
if start == first_letter:
words_with_start_letter.append(words[word])
list_of_lenghts = []
# make a list with all the word lengths
for word in range(len(words_with_start_letter)):
list_of_lenghts.append(len(words_with_start_letter[word]))
longest_word_length = max(list_of_lenghts)
for l in range(len(list_of_lenghts)):
if list_of_lenghts[l] == longest_word_length:
longest_word = words_with_start_letter[l]
return longest_word
def get_most_common_start(words):
'''
This function checks what beginning letter is most common
amongst the list of words
:param words: list of words
:return: string of most common letter
'''
# check that words is a list of strings
assert type(words) == list
for word in range(len(words)):
assert type(words[word]) == str
first_letter_list = []
# make a list of first letters
for word in range(len(words)):
first_letter_list.append(words[word][0])
# finding most common letter
most_common = max(set(first_letter_list), key=first_letter_list.count)
return most_common
def get_most_common_end(words):
'''
This function checks what beginning letter is most common
amongst the list of words
:param words: list of words
:return: string of most common letter
'''
# check that words is a list of strings
assert type(words) == list
for word in range(len(words)):
assert type(words[word]) == str
last_letter_list = []
# make a list of last letters
for word in range(len(words)):
last_letter_list.append(words[word][-1])
# finding most common letter
most_common = max(set(last_letter_list), key=last_letter_list.count)
return most_common
|
997,949 | 502fe5eebd8e47ddc93500420a165e1117782882 | #!/usr/bin/python3
class MyInt(int):
def __eq__(self, val2):
return self - val2 is not 0
def __ne__(self, val2):
return self - val2 is 0
|
997,950 | 3ed6dc7fc0b4136e4f1aa5f8d999f11b8c85d00c | import json
import traceback
from db_utils import DB
from kafka import KafkaConsumer, TopicPartition
from random import randint
from time import time, sleep
from field_validations import apply_field_validations
from chained_rules import run_chained_rules
from producer import produce
from business_rules_api import apply_business_rules, update_table, initialize_rules, get_tables_data
try:
from app.da_bizRul_factory import DABizRulFactory
from app.ace_logger import Logging
except:
from da_bizRul_factory import DABizRulFactory
from ace_logger import Logging
DAO = DABizRulFactory.get_dao_bizRul()
logging = Logging()
import BusinessRules
import json
def get_chain_rules(data_base):
"""
Getting the rule_string, next_if_sucess and next_if_failure data from the database
"""
business_rule_db = DB(data_base, host='business_rules_db')
df = business_rule_db.execute(f"SELECT `id`, `rule_id`,`rule_string`, `next_if_sucess`, `next_if_failure`, `group`, `description`, `data_source` from `sequence_rule_data`")
chained_rules = [[e['rule_id'], e['rule_string'], e['next_if_sucess'], e['next_if_failure'], e['group'],e['description'], e['data_source']] for e in df.to_dict(orient='records') ]
return chained_rules
database = 'business_rules'
rules = get_chain_rules(database)
# print (rules[0])
# rules = [rules[0]]
rules_id_mapping = {rule.pop(0):rule for i,rule in enumerate(rules)}
data = {'id': 1, 'case_id': '2000441939', 'highlight': '{"Document Heading": {"height": 12, "width": 63, "y": 38, "x": 297, "right": 364, "word": "Tax Invoice", "page": 0}, "Vendor GSTIN": {"height": 7, "width": 101, "y": 117, "x": 102, "right": 203, "word": "37AAUFA4486C1Z5", "page": 0}, "Billed To (DRL Name)": {"height": 9, "width": 197, "y": 159, "x": 39, "right": 252, "word": "Dr. Reddys Laboratories Limited CTO-6", "page": 0}, "DRL GSTIN": {"height": 10, "width": 105, "y": 205, "x": 128, "right": 239, "word": "37AAACD7999Q1ZJ", "page": 0}, "Invoice Number": {"height": 9, "width": 24, "y": 79, "x": 364, "right": 389, "word": "6106", "page": 0}, "Vendor Name": {"height": 7, "width": 93, "y": 816, "x": 529, "right": 628, "word": "Akshaya Lab Products", "page": 0}, "Invoice Date": {"height": 9, "width": 60, "y": 79, "x": 498, "right": 559, "word": "4-May-2019", "page": 0}, "PO Number": {"height": 10, "width": 61, "y": 157, "x": 364, "right": 426, "word": "5800438872", "page": 0}, "Invoice Total": {"height": 7, "width": 163, "y": 708, "x": 325, "right": 629, "word": "50,000.00 4,500.00 4,500.00 9,000.00", "page": 0}}', 'PO Number': '5800438872', 'Invoice Number': '6106', 'Invoice Category': None, 'Invoice Date': '2019-05-04', 'Invoice Total': '59000', 'Invoice Base Amount': '50000', 'GST Percentage': '', 'IGST Amount': '0', 'DRL GSTIN': '37AAACD7999Q1ZJ', 'Vendor GSTIN': '37AAUFA4486C1Z5', 'Billed To (DRL Name)': 'Dr. Reddys Laboratories Limited CTO-6', 'Vendor Name': 'Akshaya Lab Products', 'Special Instructions': '', 'Digital Signature': 'Yes', 'Document Heading': 'Tax Invoice', 'HSN/SAC': '', 'DC Number': '', 'SGST/CGST Amount': '', 'GRN Number': '', 'Service Entry Number': '', 'Comments': None, 'Table': '[[[[[["<b>Product description</b>",1,1],["<b>HSN/SAC</b>",1,1],["<b>Quantity</b>",1,1],["<b>Rate</b>",1,1],["<b>Gross Amount</b>",1,1]],[[" Headspace Vials 20ml - Pk/100 Cat No: FULV21 Make: Saint-Gobain Material Code: 940003734",1,1],[" Rate 70179090 18 %",1,1],[" 50 Pack",1,1],[" 1,000.00",1,1],[" 50,000.00",1,1]],[[" SGST Output @ 9%",1,1],["",1,1],["",1,1],[" 9",1,1],[" 4,500.00",1,1]],[[" CGST Output @ 9%",1,1],["",1,1],["",1,1],[" 9",1,1],[" 4,500.00",1,1]]]]]]'}
# data['PO Number'] = "4"
record = {
'Name': ['Ankit', 'Amit', 'Aishwarya', 'Priyanka', 'Priya', 'Shaurya' ],
'Age': [21, 19, 20, 18, 17, 21],
'Stream': ['Math', 'Commerce', 'Science', 'Math', 'Math', 'Science'],
'Percentage': [88, 92, 95, 70, 65, 78]
}
def evaluate_chained_rules(start=None, fax_unique_id=None):
"""Evaluate the chained rules"""
# this means we are just starting..
if start == None:
start = '1'
rule_id = start
# dangerous...might end in infinite loop..take care
while rule_id != "END":
rule_string, next_ruleid_if_success, next_ruleid_if_fail,stage, description, data_source = rules_id_mapping[rule_id]
rule_to_evaluate = json.loads(rule_string)
# get the data_source according to the case_id...and..the stage...
data_source = {"ocr":data, "validation": data.copy(), "master":record}
BR = BusinessRules.BusinessRules(fax_unique_id,[rule_to_evaluate], data_source)
decision = BR.evaluate_rule(rule_to_evaluate)
print ("HEREEE", decision)
if decision:
rule_id = next_ruleid_if_success
else:
rule_id = next_ruleid_if_fail
if rule_id == "BOT":
# send to the microservices
# parameters- case_id, start, stage, table_name that bot should update
return
result = {}
result['flag'] = True
return result
def run_business_rule_consumer_(data, function_params):
print ("GOT THE PARAMTERTES", data, function_params)
print (f"\n the data got is \n {data} \n")
# evaluate the field validation
case_id = data['case_id']
start_rule_id = data.get('next_rule_id', None)
bot_message = data.get('bot_message', False)
bot_status = data.get('bot_status', None)
stage = None
try:
for func in data['functions']:
try:
if func['route'] == 'run_business_rule':
stage = func['parameters']['stage'][0]
except Exception as e:
print (f"No stage is got {e}")
stage = None
except Exception as e:
print (e)
# stage = data.get('stage', None)
print (f"\n GOT THE STAGE {stage} \n")
if stage == 'default':
db_tables = {
"alorica_data": ['screen_shots'],
"extraction" : ["ocr"],
}
apply_field_validations(case_id, stage, db_tables)
return {'flag': True, 'message': 'Completed runninig default business rules.', 'updates': updates}
if stage == 'validation':
db_tables = {
"extraction" : ["ocr"],
"queues":["process_queue"]
}
updates = apply_field_validations(case_id, stage, db_tables)
return {'flag': True, 'message': 'Completed runninig validation business rules.', 'updates': updates}
if not bot_message:
# updates = apply_field_validations(case_id)
updates = None
else:
print ("CALLED BY BOT")
print (case_id, start_rule_id, bot_message)
updates = None
# run all the chained rules
print ("RUNNING CHAINED RULES ", start_rule_id,bot_message )
run_chained_rules(case_id,data, start_rule_id=start_rule_id, bot_finished=bot_message, bot_status=bot_status)
return {'flag': True, 'message': 'Completed runninig business rules.', 'updates': updates}
def run_business_rule_consumer(data):
queue_db_config = {
'host': '172.31.45.112',
'user': 'root',
'password': 'AlgoTeam123',
'port': '3306'
}
queue_db = DB('queues', **queue_db_config)
print("Submit data", data)
if 'case_id' not in data:
message = f'`case_id` key not provided.'
print(message)
return {'flag': False, 'message': message}
case_id = data['case_id']
initialize_rules()
tables = {
"extraction" : ["business_rule","ocr","sap","validation"],
"queues" : ["ocr_info", "process_queue"]
}
table_data = get_tables_data(tables, case_id)
table_data['update'] = {}
if 'stage' not in data:
message = f'`{case_id}` key not provided. Running all stages...'
print(message)
stages = ['One']
for stage in stages:
starting_time = time()
data = apply_business_rules(api=True, table_data= table_data,stage=stage, case_id=case_id)
print("time taken - ",time() - starting_time)
if not data['flag'] and stage == 'One':
DAO.update_error_msg(data['message'], case_id)
update_values = update_table(table_data, case_id, tables)
if data['flag']:
return {'flag': data['flag'], 'message': 'Applied all business rules.', 'updates': update_values}
else:
return {'flag': data['flag'], 'message': 'Something failed.', 'updates': update_values}
stages = data['stage']
starting_time = time()
if type(stages) is str:
data = apply_business_rules(api=True, table_data = table_data,**data)
time_taken = time() - starting_time
data['time_taken'] = time_taken
# print("time takne - ",time_taken)
update_values = update_table(table_data, case_id, tables)
data['updates'] = update_values
if not data['flag'] and stages == 'One':
DAO.update_error_msg(data['message'], case_id)
return data
elif type(stages) is list:
for stage in stages:
# starting_time = time()
rule_response = apply_business_rules(api=True, table_data= table_data,stage=stage, case_id=case_id)
if not rule_response['flag'] and stage == 'One':
try:
messsage = rule_response['message']
except:
messsage = 'Validation Failed due to execution error in business rules.'
DAO.update_error_msg(message, case_id)
if not rule_response['flag']:
message = f'Something went wrong running stage at `{stage}`. Skipping other rules.'
print(message)
update_values = update_table(table_data, case_id, tables)
return {'flag': False, 'message': message, 'updates':update_values}
update_values = update_table(table_data, case_id, tables)
print("time takne - ",time() - starting_time)
if rule_response['flag']:
return {'flag': rule_response['flag'], 'message': 'Completed runninig business rules.', 'updates': update_values}
else:
return {'flag': rule_response['flag'], 'message': 'Something wrong happened.', 'updates': update_values}
def consume(broker_url='broker:9092'):
try:
route = 'run_business_rule'
common_db_config = {
'host': 'common_db',
'port': '3306',
'user': 'root',
'password': 'root'
}
kafka_db = DB('kafka', **common_db_config)
# kafka_db = DB('kafka')
queue_db_config = {
'host': 'queue_db',
'port': '3306',
'user': 'root',
'password': ''
}
queue_db = DB('queues', **queue_db_config)
# queue_db = DB('queues')
message_flow = kafka_db.get_all('grouped_message_flow')
print(f'Listening to topic `{route}`...')
consumer = KafkaConsumer(
bootstrap_servers=broker_url,
value_deserializer=lambda value: json.loads(value.decode()),
auto_offset_reset='earliest',
group_id='run_business_rule',
api_version=(0,10,1),
enable_auto_commit=False,
session_timeout_ms=800001,
request_timeout_ms=800002
)
print('Consumer object created.')
parts = consumer.partitions_for_topic(route)
if parts is None:
logging.warning(f'No partitions for topic `{route}`')
logging.debug(f'Creating Topic: {route}')
produce(route, {})
print(f'Listening to topic `{route}`...')
while parts is None:
consumer = KafkaConsumer(
bootstrap_servers=broker_url,
value_deserializer=lambda value: json.loads(value.decode()),
auto_offset_reset='earliest',
group_id='sap_portal',
api_version=(0,10,1),
enable_auto_commit=False,
session_timeout_ms=800001,
request_timeout_ms=800002
)
parts = consumer.partitions_for_topic(route)
logging.warning("No partition. In while loop. Make it stop")
partitions = [TopicPartition(route, p) for p in parts]
consumer.assign(partitions)
query = 'SELECT * FROM `button_functions` WHERE `route`=%s'
function_info = queue_db.execute(query, params=[route])
in_progress_message = list(function_info['in_progress_message'])[0]
failure_message = list(function_info['failure_message'])[0]
success_message = list(function_info['success_message'])[0]
for message in consumer:
data = message.value
try:
case_id = data['case_id']
functions = data['functions']
except Exception as e:
print(f'Recieved unknown data. [{data}] [{e}]')
consumer.commit()
continue
function_params = {}
# coming from bot_watcher .... not button function
print ("CHECKING FOR THE IS BUTTON", data.get('is_button', True))
if not data.get('is_button', False):
try:
result = run_business_rule_consumer_(data, function_params)
except Exception as e:
print (f"Error runnign the business_rules {e}")
print (str(e))
query = 'UPDATE `process_queue` SET `queue`=%s, `status`=%s, `case_lock`=0, `failure_status`=1 WHERE `case_id`=%s'
queue_db.execute(query, params=['GHI678', failure_message, case_id])
consumer.commit()
continue
# Get which button (group in kafka table) this function was called from
group = data['group']
# Get message group functions
group_messages = message_flow.loc[message_flow['message_group'] == group]
# If its the first function the update the progress count
first_flow = group_messages.head(1)
first_topic = first_flow.loc[first_flow['listen_to_topic'] == route]
query = 'UPDATE `process_queue` SET `status`=%s, `total_processes`=%s WHERE `case_id`=%s'
if not first_topic.empty:
if list(first_flow['send_to_topic'])[0] is None:
queue_db.execute(query, params=[in_progress_message, len(group_messages), case_id])
else:
queue_db.execute(query, params=[in_progress_message, len(group_messages) + 1, case_id])
# Getting the correct data for the functions. This data will be passed through
# rest of the chained functions.
function_params = {}
for function in functions:
if function['route'] == route:
function_params = function['parameters']
break
# Call save changes function
try:
print (f"\n the data got is \n {data} \n")
result = run_business_rule_consumer_(data, function_params)
# return {'flag': True, 'message': 'Completed runninig business rules.', 'updates': None}
except Exception as e:
# Unlock the case.
logging.error(e)
query = 'UPDATE `process_queue` SET `queue`=%s, `status`=%s, `case_lock`=0, `failure_status`=1 WHERE `case_id`=%s'
queue_db.execute(query, params=['Maker', failure_message, case_id])
consumer.commit()
continue
# Check if function was succesfully executed
if result['flag']:
# If there is only function for the group, unlock case.
if not first_topic.empty:
if list(first_flow['send_to_topic'])[0] is None:
# It is the last message. So update file status to completed.
query = 'UPDATE `process_queue` SET `status`=%s, `case_lock`=0, `completed_processes`=`completed_processes`+1 WHERE `case_id`=%s'
queue_db.execute(query, params=[success_message, case_id])
consumer.commit()
continue
last_topic = group_messages.tail(
1).loc[group_messages['send_to_topic'] == route]
# If it is not the last message, then produce to next function else just unlock case.
if last_topic.empty:
# Get next function name
print ("\n CHECKING FOR THE DATA \n")
print (data)
next_topic = list(
group_messages.loc[group_messages['listen_to_topic'] == route]['send_to_topic'])[0]
if next_topic is not None:
produce(next_topic, data)
# Update the progress count by 1
query = 'UPDATE `process_queue` SET `status`=%s, `completed_processes`=`completed_processes`+1 WHERE `case_id`=%s'
queue_db.execute(query, params=[success_message, case_id])
consumer.commit()
else:
# It is the last message. So update file status to completed.
query = 'UPDATE `process_queue` SET `status`=%s, `case_lock`=0, `completed_processes`=`completed_processes`+1 WHERE `case_id`=%s'
queue_db.execute(query, params=[success_message, case_id])
consumer.commit()
else:
# Unlock the case.
query = 'UPDATE `process_queue` SET `status`=%s, `case_lock`=0, `failure_status`=1 WHERE `case_id`=%s'
queue_db.execute(query, params=[failure_message, case_id])
consumer.commit()
except:
logging.exception('Something went wrong in consumer. Check trace.')
if __name__ == '__main__':
consume()
|
997,951 | 6103bfe71f3902e7aecf01904c4a375a20713a47 | #syntax:
num = 20
if num < 10:
print("less than")
elif num > 10:
print("greater than")
else:
print("equal to")
#None returns false
#empty objects (string, etc) return false
#Any non-empty object returns true
string = ""
if string:
print("string is non-empty")
else:
print("string is empty")
s = []
if string:
print("list is non-empty")
else:
print("list is empty")
#logical opertors
#and
#or
#not
#identity operators
#is
#is not
#membership operators
#in
#not ink |
997,952 | c249104133c1defa2d08e44347762ed8d011890f | import time,threading
"""
多线程
"""
def loop():
thread_name = threading.current_thread().name
print('Thread %s is running...' % thread_name)
n = 0
while n < 5:
n += 1
print('Thread %s >>> %d' % (thread_name, n))
print('Thread %s ends.' % thread_name)
thread_name = threading.current_thread().name
print('Thread %s is running...' % thread_name)
t = threading.Thread(target= loop, name= 'loopThread')
t.start()
t.join()
print('Thread %s ends.' % thread_name) |
997,953 | 48adabe462019ee5c264a8450be820aa147ca697 | # -*- coding: utf-8 -*-
import time
import pika
def callback(channel, method, properties, body):
print("****** received: %r" % body)
time.sleep(body.count(b'.'))
connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost"))
channel = connection.channel()
channel.queue_declare(queue="hello")
channel.basic_consume(callback, queue="hello", no_ack=True)
print("--- Waiting for message. To exit press ctrl + c. ---")
channel.start_consuming()
|
997,954 | f5e9503aa86286a887c1d0b137b9f05edbbce624 | import player
# required to make imports work properly
import sys
sys.path.append("../")
from game import ChessGame
def main():
play_game()
def play_game():
p1 = player.RandomComputer()
p2 = player.BasicMinimax()
game = ChessGame(p1, p2)
game.play()
if __name__ == "__main__":
main() |
997,955 | 319a8a297bfcaf203c0b15af46a4d3376db93f4f | import pygame
import constants
import objects
class Level():
entities = None
enemy_list = None
camerafocus = ""
background = None
def __init__(self, player, game):
self.game = game
game.entities = pygame.sprite.Group()
game.enemygroup = pygame.sprite.Group()
game.lives = pygame.sprite.Group()
self.heart_list = pygame.sprite.Group()
self.player = player
def update(self):
self.game.entities.update()
self.game.enemygroup.update()
self.game.enemygroup2.update(self.game.entities, self.game.projectilegroup, self.player)
self.game.projectilegroup.update(self.game.entities, self.game.enemygroup2)
#self.game.lives.update(self.player)
def drawBackground(self, screen):
screen.blit(self.background, (0, 0))
def setLives(self):
heart_list = [[objects.HEART, 0, 0]]
for hearts in heart_list:
block = objects.Lives(hearts[0])
block.rect.x = hearts[1]
block.rect.y = hearts[2]
block.player = self.player
self.game.lives.add(block)
def setGroups(self, neutral, enemy):
for enemy in enemy:
block = objects.Harmful(enemy[0])
block.rect.x = enemy[1]
block.rect.y = enemy[2]
block.player = self.player
self.game.enemygroup.add(block)
for platform in neutral:
block = objects.Platform(platform[0])
block.rect.x = platform[1]
block.rect.y = platform[2]
block.player = self.player
self.game.entities.add(block)
class Level1(Level):
def __init__(self, player):
Level.__init__(self, player, player.game)
self.background = pygame.image.load("background1.png").convert()
#Invisible obstacles stop the player from going out of bounds
self.level = [[objects.INV_OBSTACLE, -50, -400], [objects.INV_OBSTACLE, -50, 0], [objects.INV_OBSTACLE, -50, 408]]
self.harmful_objects = [[objects.SPIKES, 480, 768],[objects.SPIKES, 600, 768], [objects.SPIKES, 720, 768], [objects.SPIKES, 840, 768], [objects.SPIKES, 450, 185], [objects.SPIKES, 578, 185], [objects.SPIKES, 706, 185], [objects.SPIKES, 833, 185],]
# Add the hearts and enemy/platform sprites to spritegroups in class Game
self.setLives()
self.setGroups(self.level, self.harmful_objects)
block = objects.Bat(500, 600, 2)
block.boundary_left = 300
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(350, 285, 2)
block.boundary_left = 200
block.boundary_right = 630
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(300, 50, 2)
block.boundary_left = 150
block.boundary_right = 780
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(1300, 600, 2)
block.boundary_left = 1100
block.boundary_right = 1500
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(1300, 400, 2)
block.boundary_left = 1000
block.boundary_right = 1500
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(1100, 300, 2)
block.boundary_left = 1000
block.boundary_right = 1500
block.player = self.player
self.game.enemygroup2.add(block)
block = objects.Bat(1100, 200, 2)
block.boundary_left = 1000
block.boundary_right = 1500
block.player = self.player
self.game.enemygroup2.add(block)
'''
block = objects.Harmful(objects.SPIKES)
block.rect.x = 330
block.rect.y = 550
block.player = self.player
block.level = self
self.game.entities.add(block)
'''
block = objects.Platform(objects.FLOOR)
block.rect.x = 0
block.rect.y = 750
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.FLOOR)
block.rect.x = 480
block.rect.y = 783
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.FLOOR)
block.rect.x = 960
block.rect.y = 750
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.FLOOR)
block.rect.x = 1200
block.rect.y = 750
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.FLOOR)
block.rect.x = 1350
block.rect.y = 750
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 960
block.rect.y = 141
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 960
block.rect.y = 237
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 960
block.rect.y = 333
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 960
block.rect.y = 429
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 650
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 650
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 555
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 460
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 365
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 270
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 175
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = 80
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.CHAINS)
block.rect.x = 1810
block.rect.y = -15
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_S_STRIPPED)
block.rect.x = 960
block.rect.y = 686
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_S_STRIPPED)
block.rect.x = 960
block.rect.y = 623
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_S_STRIPPED)
block.rect.x = 960
block.rect.y = 560
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_S_STRIPPED)
block.rect.x = 960
block.rect.y = 497
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_B_STRIPPED)
block.rect.x = 1023
block.rect.y = 653
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 270
block.rect.y = 520
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 270
block.rect.y = 270
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 843
block.rect.y = 520
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 1227
block.rect.y = 520
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = -300
block.rect.y = 270
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 843
block.rect.y = 270
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_W_WINDOWS)
block.rect.x = 1227
block.rect.y = 270
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 0
block.rect.y = 143
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 764
block.rect.y = 143
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 380
block.rect.y = 143
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 0
block.rect.y = 0
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 764
block.rect.y = 0
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Backgrounds(objects.BACKGROUND_BLACK_BOXES)
block.rect.x = 380
block.rect.y = 0
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Platform(objects.FLOOR)
block.rect.x = -125
block.rect.y = 526
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Backgrounds(objects.BACKGROUND_W_STAIRS)
block.rect.x = 0
block.rect.y = 526
block.player = self.player
block.level = self
self.game.backgrounds.add(block)
block = objects.Platform(objects.PLATFORM_DARK_GREY)
block.rect.x = 500
block.rect.y = 200
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.PLATFORM_DARK_GREY)
block.rect.x = 720
block.rect.y = 200
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.PLATFORM_DARK_GREY)
block.rect.x = 256
block.rect.y = 200
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.PLATFORM_DARK_GREY)
block.rect.x = 960
block.rect.y = 113
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.MovingPlatform(objects.SMALL_PLAT)
block.rect.x = 770
block.rect.y = 400
block.boundary_bottom = 680
block.boundary_top = 350
block.boundary_left = -380
block.boundary_right = 1900
block.change_y = 1
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.MovingPlatform(objects.SMALL_PLAT)
block.rect.x = 1300
block.rect.y = 400
block.boundary_bottom = 630
block.boundary_top = 150
block.boundary_left = -380
block.boundary_right = 1900
block.change_y = 1
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.MovingPlatform(objects.SMALL_PLAT)
block.rect.x = 0
block.rect.y = 250
block.boundary_bottom = 450
block.boundary_top = 150
block.boundary_left = -380
block.boundary_right = 1900
block.change_y = 1
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.MovingPlatform(objects.PLATFORM_GREY_SMALL)
block.rect.x = 700
block.rect.y = 150
block.boundary_bottom = 450
block.boundary_top = 150
block.boundary_left = 450
block.boundary_right = 900
block.change_x = 1
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.BOX_S_STRIPPED)
block.rect.x = 200
block.rect.y = 463
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.PLATFORM_GREY_SMALL)
block.rect.x = 550
block.rect.y = 700
block.player = self.player
block.level = self
self.game.entities.add(block)
block = objects.Platform(objects.PLATFORM_GREY_SMALL)
block.rect.x = 550
block.rect.y = 400
block.player = self.player
block.level = self
self.game.entities.add(block)
'''
block = objects.MovingPlatform(objects.SMALL_PLAT)
block.rect.x = 550
block.rect.y = 700
block.boundary_bottom = 700
block.boundary_top = 100
block.boundary_left = 550
block.boundary_right = 750
block.change_x = -1
block.player = self.player
block.level = self
self.game.entities.add(block)
'''
'''
block = objects.MovingPlatform(objects.FLOOR) #add the moving platforms (direction: horizontal)
block.rect.x = 0
block.rect.y = 750
block.boundary_left = -380
block.boundary_right = 2000
block.change_x = -2
block.player = self.player
block.level = self
self.game.entities.add(block)
'''
class Level2(Level):
#Level 2 Empty for now
def __init__(self, player):
Level.__init__(self,player, player.game)
self.background = pygame.image.load("back4.png").convert()
self.level = [[objects.FLOOR, 390,700], [objects.AIR_PLAT, 500, 400]]
self.game.harmful_objects = [[objects.HOT_MAGMA, 0, 765], [objects.HOT_MAGMA, 295, 765], [objects.HOT_MAGMA, 2 * 295, 765],
[objects.HOT_MAGMA, 3 * 295, 765], [objects.HOT_MAGMA, 4 * 295, 765],[objects.HOT_MAGMA, -250,765], [objects.HOT_MAGMA, -500,765]]
self.setLives()
self.setGroups(self.level, self.game.harmful_objects)
|
997,956 | 211b30794b165ac5728a5dbae3d1ef6838e82511 | # Generated from visualg.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2?")
buf.write("\u0249\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\3\2\3\2\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3\u009c\n\3\3\4\3")
buf.write("\4\3\4\3\4\5\4\u00a2\n\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3")
buf.write("\5\5\5\u00ac\n\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5")
buf.write("\6\u00b7\n\6\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3")
buf.write("\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\13\6\13\u00da\n\13")
buf.write("\r\13\16\13\u00db\3\13\7\13\u00df\n\13\f\13\16\13\u00e2")
buf.write("\13\13\3\f\3\f\3\f\3\f\7\f\u00e8\n\f\f\f\16\f\u00eb\13")
buf.write("\f\3\f\3\f\3\f\3\f\3\r\3\r\6\r\u00f3\n\r\r\r\16\r\u00f4")
buf.write("\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17")
buf.write("\3\20\3\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24\3\24\3\25")
buf.write("\3\25\3\25\3\26\6\26\u0110\n\26\r\26\16\26\u0111\3\27")
buf.write("\6\27\u0115\n\27\r\27\16\27\u0116\3\27\3\27\6\27\u011b")
buf.write("\n\27\r\27\16\27\u011c\5\27\u011f\n\27\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\5\30\u0130\n\30\3\31\3\31\3\31\3\32\3\32\3\32\3")
buf.write("\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32")
buf.write("\3\32\5\32\u0144\n\32\3\33\3\33\3\33\3\33\3\33\3\34\3")
buf.write("\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3 \3 \3")
buf.write(" \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3")
buf.write("\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3")
buf.write("$\3$\3%\3%\3%\3%\3%\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3")
buf.write("(\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3+\3")
buf.write(",\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3-\3-\3")
buf.write("-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3")
buf.write("\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\61\3\62\3\62\3\63\3\63\3\63\3\63\3\64\3\64")
buf.write("\3\64\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\67\3\67\6\67\u020e\n\67\r\67\16\67\u020f")
buf.write("\3\67\3\67\6\67\u0214\n\67\r\67\16\67\u0215\3\67\3\67")
buf.write("\38\38\38\38\38\38\39\39\59\u0222\n9\3:\3:\5:\u0226\n")
buf.write(":\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3=\3=\3=\3=\3=\3")
buf.write("=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3\u00e9")
buf.write("\2@\3\2\5\3\7\4\t\5\13\6\r\7\17\b\21\t\23\n\25\13\27\f")
buf.write("\31\r\33\16\35\17\37\20!\21#\22%\23\'\24)\25+\26-\27/")
buf.write("\30\61\31\63\32\65\33\67\349\35;\36=\37? A!C\"E#G$I%K")
buf.write("&M\'O(Q)S*U+W,Y-[.]/_\60a\61c\62e\63g\64i\65k\66m\67o")
buf.write("8q9s:u;w<y={>}?\3\2\f\3\2\62;\5\2,-//\61\61\4\2>>@@\4")
buf.write("\2--//\4\2C\\c|\6\2\62;C\\aac|\3\2$$\b\2\"\"%\60\62\\")
buf.write("``c|~~\3\2\60\60\5\2\62;C\\c|\2\u0260\2\5\3\2\2\2\2\7")
buf.write("\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2")
buf.write("\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2")
buf.write("\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2")
buf.write("\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2")
buf.write("\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63")
buf.write("\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2")
buf.write("\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2")
buf.write("\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3")
buf.write("\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y")
buf.write("\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2")
buf.write("c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2")
buf.write("\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2")
buf.write("\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\3\177\3")
buf.write("\2\2\2\5\u009b\3\2\2\2\7\u00a1\3\2\2\2\t\u00ab\3\2\2\2")
buf.write("\13\u00b6\3\2\2\2\r\u00b8\3\2\2\2\17\u00ba\3\2\2\2\21")
buf.write("\u00c1\3\2\2\2\23\u00ce\3\2\2\2\25\u00d9\3\2\2\2\27\u00e3")
buf.write("\3\2\2\2\31\u00f0\3\2\2\2\33\u00f8\3\2\2\2\35\u00fe\3")
buf.write("\2\2\2\37\u0101\3\2\2\2!\u0103\3\2\2\2#\u0105\3\2\2\2")
buf.write("%\u0107\3\2\2\2\'\u0109\3\2\2\2)\u010b\3\2\2\2+\u010f")
buf.write("\3\2\2\2-\u0114\3\2\2\2/\u012f\3\2\2\2\61\u0131\3\2\2")
buf.write("\2\63\u0143\3\2\2\2\65\u0145\3\2\2\2\67\u014a\3\2\2\2")
buf.write("9\u014d\3\2\2\2;\u0153\3\2\2\2=\u0159\3\2\2\2?\u015f\3")
buf.write("\2\2\2A\u0167\3\2\2\2C\u016c\3\2\2\2E\u0176\3\2\2\2G\u0181")
buf.write("\3\2\2\2I\u0183\3\2\2\2K\u0188\3\2\2\2M\u018c\3\2\2\2")
buf.write("O\u0191\3\2\2\2Q\u0199\3\2\2\2S\u01a0\3\2\2\2U\u01a9\3")
buf.write("\2\2\2W\u01b5\3\2\2\2Y\u01c0\3\2\2\2[\u01cd\3\2\2\2]\u01dd")
buf.write("\3\2\2\2_\u01e4\3\2\2\2a\u01ec\3\2\2\2c\u01f6\3\2\2\2")
buf.write("e\u01f8\3\2\2\2g\u01fc\3\2\2\2i\u01ff\3\2\2\2k\u0203\3")
buf.write("\2\2\2m\u020b\3\2\2\2o\u0219\3\2\2\2q\u0221\3\2\2\2s\u0225")
buf.write("\3\2\2\2u\u0227\3\2\2\2w\u0231\3\2\2\2y\u0233\3\2\2\2")
buf.write("{\u023d\3\2\2\2}\u0243\3\2\2\2\177\u0080\t\2\2\2\u0080")
buf.write("\4\3\2\2\2\u0081\u0082\7k\2\2\u0082\u0083\7p\2\2\u0083")
buf.write("\u0084\7v\2\2\u0084\u0085\7g\2\2\u0085\u0086\7k\2\2\u0086")
buf.write("\u0087\7t\2\2\u0087\u009c\7q\2\2\u0088\u0089\7t\2\2\u0089")
buf.write("\u008a\7g\2\2\u008a\u008b\7c\2\2\u008b\u009c\7n\2\2\u008c")
buf.write("\u008d\7e\2\2\u008d\u008e\7c\2\2\u008e\u008f\7t\2\2\u008f")
buf.write("\u0090\7c\2\2\u0090\u0091\7e\2\2\u0091\u0092\7v\2\2\u0092")
buf.write("\u0093\7g\2\2\u0093\u0094\7t\2\2\u0094\u009c\7g\2\2\u0095")
buf.write("\u0096\7n\2\2\u0096\u0097\7q\2\2\u0097\u0098\7i\2\2\u0098")
buf.write("\u0099\7k\2\2\u0099\u009a\7e\2\2\u009a\u009c\7q\2\2\u009b")
buf.write("\u0081\3\2\2\2\u009b\u0088\3\2\2\2\u009b\u008c\3\2\2\2")
buf.write("\u009b\u0095\3\2\2\2\u009c\6\3\2\2\2\u009d\u00a2\t\3\2")
buf.write("\2\u009e\u009f\7o\2\2\u009f\u00a0\7q\2\2\u00a0\u00a2\7")
buf.write("f\2\2\u00a1\u009d\3\2\2\2\u00a1\u009e\3\2\2\2\u00a2\b")
buf.write("\3\2\2\2\u00a3\u00ac\t\4\2\2\u00a4\u00a5\7>\2\2\u00a5")
buf.write("\u00ac\7?\2\2\u00a6\u00a7\7@\2\2\u00a7\u00ac\7?\2\2\u00a8")
buf.write("\u00ac\7?\2\2\u00a9\u00aa\7>\2\2\u00aa\u00ac\7@\2\2\u00ab")
buf.write("\u00a3\3\2\2\2\u00ab\u00a4\3\2\2\2\u00ab\u00a6\3\2\2\2")
buf.write("\u00ab\u00a8\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ac\n\3\2\2")
buf.write("\2\u00ad\u00ae\7p\2\2\u00ae\u00af\7c\2\2\u00af\u00b7\7")
buf.write("q\2\2\u00b0\u00b1\7q\2\2\u00b1\u00b7\7w\2\2\u00b2\u00b7")
buf.write("\7g\2\2\u00b3\u00b4\7z\2\2\u00b4\u00b5\7q\2\2\u00b5\u00b7")
buf.write("\7w\2\2\u00b6\u00ad\3\2\2\2\u00b6\u00b0\3\2\2\2\u00b6")
buf.write("\u00b2\3\2\2\2\u00b6\u00b3\3\2\2\2\u00b7\f\3\2\2\2\u00b8")
buf.write("\u00b9\t\5\2\2\u00b9\16\3\2\2\2\u00ba\u00bb\7k\2\2\u00bb")
buf.write("\u00bc\7p\2\2\u00bc\u00bd\7k\2\2\u00bd\u00be\7e\2\2\u00be")
buf.write("\u00bf\7k\2\2\u00bf\u00c0\7q\2\2\u00c0\20\3\2\2\2\u00c1")
buf.write("\u00c2\7h\2\2\u00c2\u00c3\7k\2\2\u00c3\u00c4\7o\2\2\u00c4")
buf.write("\u00c5\7C\2\2\u00c5\u00c6\7n\2\2\u00c6\u00c7\7i\2\2\u00c7")
buf.write("\u00c8\7q\2\2\u00c8\u00c9\7t\2\2\u00c9\u00ca\7k\2\2\u00ca")
buf.write("\u00cb\7v\2\2\u00cb\u00cc\7o\2\2\u00cc\u00cd\7q\2\2\u00cd")
buf.write("\22\3\2\2\2\u00ce\u00cf\7c\2\2\u00cf\u00d0\7n\2\2\u00d0")
buf.write("\u00d1\7i\2\2\u00d1\u00d2\7q\2\2\u00d2\u00d3\7t\2\2\u00d3")
buf.write("\u00d4\7k\2\2\u00d4\u00d5\7v\2\2\u00d5\u00d6\7o\2\2\u00d6")
buf.write("\u00d7\7q\2\2\u00d7\24\3\2\2\2\u00d8\u00da\t\6\2\2\u00d9")
buf.write("\u00d8\3\2\2\2\u00da\u00db\3\2\2\2\u00db\u00d9\3\2\2\2")
buf.write("\u00db\u00dc\3\2\2\2\u00dc\u00e0\3\2\2\2\u00dd\u00df\t")
buf.write("\7\2\2\u00de\u00dd\3\2\2\2\u00df\u00e2\3\2\2\2\u00e0\u00de")
buf.write("\3\2\2\2\u00e0\u00e1\3\2\2\2\u00e1\26\3\2\2\2\u00e2\u00e0")
buf.write("\3\2\2\2\u00e3\u00e4\7\61\2\2\u00e4\u00e5\7\61\2\2\u00e5")
buf.write("\u00e9\3\2\2\2\u00e6\u00e8\13\2\2\2\u00e7\u00e6\3\2\2")
buf.write("\2\u00e8\u00eb\3\2\2\2\u00e9\u00ea\3\2\2\2\u00e9\u00e7")
buf.write("\3\2\2\2\u00ea\u00ec\3\2\2\2\u00eb\u00e9\3\2\2\2\u00ec")
buf.write("\u00ed\7\f\2\2\u00ed\u00ee\3\2\2\2\u00ee\u00ef\b\f\2\2")
buf.write("\u00ef\30\3\2\2\2\u00f0\u00f2\t\b\2\2\u00f1\u00f3\t\t")
buf.write("\2\2\u00f2\u00f1\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4\u00f2")
buf.write("\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5\u00f6\3\2\2\2\u00f6")
buf.write("\u00f7\t\b\2\2\u00f7\32\3\2\2\2\u00f8\u00f9\7x\2\2\u00f9")
buf.write("\u00fa\7g\2\2\u00fa\u00fb\7v\2\2\u00fb\u00fc\7q\2\2\u00fc")
buf.write("\u00fd\7t\2\2\u00fd\34\3\2\2\2\u00fe\u00ff\7f\2\2\u00ff")
buf.write("\u0100\7g\2\2\u0100\36\3\2\2\2\u0101\u0102\7]\2\2\u0102")
buf.write(" \3\2\2\2\u0103\u0104\7_\2\2\u0104\"\3\2\2\2\u0105\u0106")
buf.write("\7*\2\2\u0106$\3\2\2\2\u0107\u0108\7+\2\2\u0108&\3\2\2")
buf.write("\2\u0109\u010a\7.\2\2\u010a(\3\2\2\2\u010b\u010c\7>\2")
buf.write("\2\u010c\u010d\7/\2\2\u010d*\3\2\2\2\u010e\u0110\5\3\2")
buf.write("\2\u010f\u010e\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u010f")
buf.write("\3\2\2\2\u0111\u0112\3\2\2\2\u0112,\3\2\2\2\u0113\u0115")
buf.write("\5\3\2\2\u0114\u0113\3\2\2\2\u0115\u0116\3\2\2\2\u0116")
buf.write("\u0114\3\2\2\2\u0116\u0117\3\2\2\2\u0117\u011e\3\2\2\2")
buf.write("\u0118\u011a\t\n\2\2\u0119\u011b\5\3\2\2\u011a\u0119\3")
buf.write("\2\2\2\u011b\u011c\3\2\2\2\u011c\u011a\3\2\2\2\u011c\u011d")
buf.write("\3\2\2\2\u011d\u011f\3\2\2\2\u011e\u0118\3\2\2\2\u011e")
buf.write("\u011f\3\2\2\2\u011f.\3\2\2\2\u0120\u0121\7H\2\2\u0121")
buf.write("\u0122\7C\2\2\u0122\u0123\7N\2\2\u0123\u0124\7U\2\2\u0124")
buf.write("\u0130\7Q\2\2\u0125\u0126\7X\2\2\u0126\u0127\7G\2\2\u0127")
buf.write("\u0128\7T\2\2\u0128\u0129\7F\2\2\u0129\u012a\7C\2\2\u012a")
buf.write("\u012b\7F\2\2\u012b\u012c\7G\2\2\u012c\u012d\7K\2\2\u012d")
buf.write("\u012e\7T\2\2\u012e\u0130\7Q\2\2\u012f\u0120\3\2\2\2\u012f")
buf.write("\u0125\3\2\2\2\u0130\60\3\2\2\2\u0131\u0132\7\60\2\2\u0132")
buf.write("\u0133\7\60\2\2\u0133\62\3\2\2\2\u0134\u0135\7g\2\2\u0135")
buf.write("\u0136\7u\2\2\u0136\u0137\7e\2\2\u0137\u0138\7t\2\2\u0138")
buf.write("\u0139\7g\2\2\u0139\u013a\7x\2\2\u013a\u0144\7c\2\2\u013b")
buf.write("\u013c\7g\2\2\u013c\u013d\7u\2\2\u013d\u013e\7e\2\2\u013e")
buf.write("\u013f\7t\2\2\u013f\u0140\7g\2\2\u0140\u0141\7x\2\2\u0141")
buf.write("\u0142\7c\2\2\u0142\u0144\7n\2\2\u0143\u0134\3\2\2\2\u0143")
buf.write("\u013b\3\2\2\2\u0144\64\3\2\2\2\u0145\u0146\7n\2\2\u0146")
buf.write("\u0147\7g\2\2\u0147\u0148\7k\2\2\u0148\u0149\7c\2\2\u0149")
buf.write("\66\3\2\2\2\u014a\u014b\7u\2\2\u014b\u014c\7g\2\2\u014c")
buf.write("8\3\2\2\2\u014d\u014e\7g\2\2\u014e\u014f\7p\2\2\u014f")
buf.write("\u0150\7v\2\2\u0150\u0151\7c\2\2\u0151\u0152\7q\2\2\u0152")
buf.write(":\3\2\2\2\u0153\u0154\7u\2\2\u0154\u0155\7g\2\2\u0155")
buf.write("\u0156\7p\2\2\u0156\u0157\7c\2\2\u0157\u0158\7q\2\2\u0158")
buf.write("<\3\2\2\2\u0159\u015a\7h\2\2\u015a\u015b\7k\2\2\u015b")
buf.write("\u015c\7o\2\2\u015c\u015d\7u\2\2\u015d\u015e\7g\2\2\u015e")
buf.write(">\3\2\2\2\u015f\u0160\7g\2\2\u0160\u0161\7u\2\2\u0161")
buf.write("\u0162\7e\2\2\u0162\u0163\7q\2\2\u0163\u0164\7n\2\2\u0164")
buf.write("\u0165\7j\2\2\u0165\u0166\7c\2\2\u0166@\3\2\2\2\u0167")
buf.write("\u0168\7e\2\2\u0168\u0169\7c\2\2\u0169\u016a\7u\2\2\u016a")
buf.write("\u016b\7q\2\2\u016bB\3\2\2\2\u016c\u016d\7q\2\2\u016d")
buf.write("\u016e\7w\2\2\u016e\u016f\7v\2\2\u016f\u0170\7t\2\2\u0170")
buf.write("\u0171\7q\2\2\u0171\u0172\7e\2\2\u0172\u0173\7c\2\2\u0173")
buf.write("\u0174\7u\2\2\u0174\u0175\7q\2\2\u0175D\3\2\2\2\u0176")
buf.write("\u0177\7h\2\2\u0177\u0178\7k\2\2\u0178\u0179\7o\2\2\u0179")
buf.write("\u017a\7g\2\2\u017a\u017b\7u\2\2\u017b\u017c\7e\2\2\u017c")
buf.write("\u017d\7q\2\2\u017d\u017e\7n\2\2\u017e\u017f\7j\2\2\u017f")
buf.write("\u0180\7c\2\2\u0180F\3\2\2\2\u0181\u0182\7/\2\2\u0182")
buf.write("H\3\2\2\2\u0183\u0184\7r\2\2\u0184\u0185\7c\2\2\u0185")
buf.write("\u0186\7t\2\2\u0186\u0187\7c\2\2\u0187J\3\2\2\2\u0188")
buf.write("\u0189\7c\2\2\u0189\u018a\7v\2\2\u018a\u018b\7g\2\2\u018b")
buf.write("L\3\2\2\2\u018c\u018d\7h\2\2\u018d\u018e\7c\2\2\u018e")
buf.write("\u018f\7e\2\2\u018f\u0190\7c\2\2\u0190N\3\2\2\2\u0191")
buf.write("\u0192\7h\2\2\u0192\u0193\7k\2\2\u0193\u0194\7o\2\2\u0194")
buf.write("\u0195\7r\2\2\u0195\u0196\7c\2\2\u0196\u0197\7t\2\2\u0197")
buf.write("\u0198\7c\2\2\u0198P\3\2\2\2\u0199\u019a\7t\2\2\u019a")
buf.write("\u019b\7g\2\2\u019b\u019c\7r\2\2\u019c\u019d\7k\2\2\u019d")
buf.write("\u019e\7v\2\2\u019e\u019f\7c\2\2\u019fR\3\2\2\2\u01a0")
buf.write("\u01a1\7g\2\2\u01a1\u01a2\7p\2\2\u01a2\u01a3\7s\2\2\u01a3")
buf.write("\u01a4\7w\2\2\u01a4\u01a5\7c\2\2\u01a5\u01a6\7p\2\2\u01a6")
buf.write("\u01a7\7v\2\2\u01a7\u01a8\7q\2\2\u01a8T\3\2\2\2\u01a9")
buf.write("\u01aa\7h\2\2\u01aa\u01ab\7k\2\2\u01ab\u01ac\7o\2\2\u01ac")
buf.write("\u01ad\7g\2\2\u01ad\u01ae\7p\2\2\u01ae\u01af\7s\2\2\u01af")
buf.write("\u01b0\7w\2\2\u01b0\u01b1\7c\2\2\u01b1\u01b2\7p\2\2\u01b2")
buf.write("\u01b3\7v\2\2\u01b3\u01b4\7q\2\2\u01b4V\3\2\2\2\u01b5")
buf.write("\u01b6\7k\2\2\u01b6\u01b7\7p\2\2\u01b7\u01b8\7v\2\2\u01b8")
buf.write("\u01b9\7g\2\2\u01b9\u01ba\7t\2\2\u01ba\u01bb\7t\2\2\u01bb")
buf.write("\u01bc\7q\2\2\u01bc\u01bd\7o\2\2\u01bd\u01be\7r\2\2\u01be")
buf.write("\u01bf\7c\2\2\u01bfX\3\2\2\2\u01c0\u01c1\7r\2\2\u01c1")
buf.write("\u01c2\7t\2\2\u01c2\u01c3\7q\2\2\u01c3\u01c4\7e\2\2\u01c4")
buf.write("\u01c5\7g\2\2\u01c5\u01c6\7f\2\2\u01c6\u01c7\7k\2\2\u01c7")
buf.write("\u01c8\7o\2\2\u01c8\u01c9\7g\2\2\u01c9\u01ca\7p\2\2\u01ca")
buf.write("\u01cb\7v\2\2\u01cb\u01cc\7q\2\2\u01ccZ\3\2\2\2\u01cd")
buf.write("\u01ce\7h\2\2\u01ce\u01cf\7k\2\2\u01cf\u01d0\7o\2\2\u01d0")
buf.write("\u01d1\7r\2\2\u01d1\u01d2\7t\2\2\u01d2\u01d3\7q\2\2\u01d3")
buf.write("\u01d4\7e\2\2\u01d4\u01d5\7g\2\2\u01d5\u01d6\7f\2\2\u01d6")
buf.write("\u01d7\7k\2\2\u01d7\u01d8\7o\2\2\u01d8\u01d9\7g\2\2\u01d9")
buf.write("\u01da\7p\2\2\u01da\u01db\7v\2\2\u01db\u01dc\7q\2\2\u01dc")
buf.write("\\\3\2\2\2\u01dd\u01de\7h\2\2\u01de\u01df\7w\2\2\u01df")
buf.write("\u01e0\7p\2\2\u01e0\u01e1\7e\2\2\u01e1\u01e2\7c\2\2\u01e2")
buf.write("\u01e3\7q\2\2\u01e3^\3\2\2\2\u01e4\u01e5\7t\2\2\u01e5")
buf.write("\u01e6\7g\2\2\u01e6\u01e7\7v\2\2\u01e7\u01e8\7q\2\2\u01e8")
buf.write("\u01e9\7t\2\2\u01e9\u01ea\7p\2\2\u01ea\u01eb\7g\2\2\u01eb")
buf.write("`\3\2\2\2\u01ec\u01ed\7h\2\2\u01ed\u01ee\7k\2\2\u01ee")
buf.write("\u01ef\7o\2\2\u01ef\u01f0\7h\2\2\u01f0\u01f1\7w\2\2\u01f1")
buf.write("\u01f2\7p\2\2\u01f2\u01f3\7e\2\2\u01f3\u01f4\7c\2\2\u01f4")
buf.write("\u01f5\7q\2\2\u01f5b\3\2\2\2\u01f6\u01f7\7=\2\2\u01f7")
buf.write("d\3\2\2\2\u01f8\u01f9\7x\2\2\u01f9\u01fa\7c\2\2\u01fa")
buf.write("\u01fb\7t\2\2\u01fbf\3\2\2\2\u01fc\u01fd\7q\2\2\u01fd")
buf.write("\u01fe\7p\2\2\u01feh\3\2\2\2\u01ff\u0200\7q\2\2\u0200")
buf.write("\u0201\7h\2\2\u0201\u0202\7h\2\2\u0202j\3\2\2\2\u0203")
buf.write("\u0204\7c\2\2\u0204\u0205\7t\2\2\u0205\u0206\7s\2\2\u0206")
buf.write("\u0207\7w\2\2\u0207\u0208\7k\2\2\u0208\u0209\7x\2\2\u0209")
buf.write("\u020a\7q\2\2\u020al\3\2\2\2\u020b\u020d\t\b\2\2\u020c")
buf.write("\u020e\t\t\2\2\u020d\u020c\3\2\2\2\u020e\u020f\3\2\2\2")
buf.write("\u020f\u020d\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0211\3")
buf.write("\2\2\2\u0211\u0213\13\2\2\2\u0212\u0214\t\13\2\2\u0213")
buf.write("\u0212\3\2\2\2\u0214\u0215\3\2\2\2\u0215\u0213\3\2\2\2")
buf.write("\u0215\u0216\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u0218\t")
buf.write("\b\2\2\u0218n\3\2\2\2\u0219\u021a\7r\2\2\u021a\u021b\7")
buf.write("c\2\2\u021b\u021c\7w\2\2\u021c\u021d\7u\2\2\u021d\u021e")
buf.write("\7c\2\2\u021ep\3\2\2\2\u021f\u0222\5g\64\2\u0220\u0222")
buf.write("\5i\65\2\u0221\u021f\3\2\2\2\u0221\u0220\3\2\2\2\u0222")
buf.write("r\3\2\2\2\u0223\u0226\5g\64\2\u0224\u0226\5i\65\2\u0225")
buf.write("\u0223\3\2\2\2\u0225\u0224\3\2\2\2\u0226t\3\2\2\2\u0227")
buf.write("\u0228\7n\2\2\u0228\u0229\7k\2\2\u0229\u022a\7o\2\2\u022a")
buf.write("\u022b\7r\2\2\u022b\u022c\7c\2\2\u022c\u022d\7v\2\2\u022d")
buf.write("\u022e\7g\2\2\u022e\u022f\7n\2\2\u022f\u0230\7c\2\2\u0230")
buf.write("v\3\2\2\2\u0231\u0232\7<\2\2\u0232x\3\2\2\2\u0233\u0234")
buf.write("\7c\2\2\u0234\u0235\7n\2\2\u0235\u0236\7g\2\2\u0236\u0237")
buf.write("\7c\2\2\u0237\u0238\7v\2\2\u0238\u0239\7q\2\2\u0239\u023a")
buf.write("\7t\2\2\u023a\u023b\7k\2\2\u023b\u023c\7q\2\2\u023cz\3")
buf.write("\2\2\2\u023d\u023e\7v\2\2\u023e\u023f\7k\2\2\u023f\u0240")
buf.write("\7o\2\2\u0240\u0241\7g\2\2\u0241\u0242\7t\2\2\u0242|\3")
buf.write("\2\2\2\u0243\u0244\7r\2\2\u0244\u0245\7c\2\2\u0245\u0246")
buf.write("\7u\2\2\u0246\u0247\7u\2\2\u0247\u0248\7q\2\2\u0248~\3")
buf.write("\2\2\2\25\2\u009b\u00a1\u00ab\u00b6\u00db\u00e0\u00e9")
buf.write("\u00f4\u0111\u0116\u011c\u011e\u012f\u0143\u020f\u0215")
buf.write("\u0221\u0225\3\b\2\2")
return buf.getvalue()
class visualgLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
TIPO_DE_DADO = 1
OPERADOR_BINARIO = 2
OPERADOR_RELACIONAL = 3
OPERADOR_LOGICO = 4
OPERADOR_UNARIO = 5
INICIO = 6
FIM_ALGORITMO = 7
ALGORITMO = 8
NOME_DA_VARIAVEL = 9
COMENTARIO = 10
STRING = 11
VETOR = 12
DE = 13
ABRE_COLCHETES = 14
FECHA_COLCHETES = 15
ABRE_PARENTESES = 16
FECHA_PARENTESES = 17
VIRGULA = 18
ATRIBUIR = 19
INTEIRO = 20
REAL = 21
BOOL = 22
PONTO_PONTO = 23
ESCREVA = 24
LEIA = 25
SE = 26
ENTAO = 27
SENAO = 28
FIM_SE = 29
ESCOLHA = 30
CASO = 31
OUTRO_CASO = 32
FIM_ESCOLHA = 33
MENOS = 34
PARA = 35
ATE = 36
FACA = 37
FIM_PARA = 38
REPITA = 39
ENQUANTO = 40
FIM_ENQUANTO = 41
INTERROMPA = 42
PROCEDIMENTO = 43
FIM_PROCEDIMENTO = 44
FUNCAO = 45
RETORNO = 46
FIM_FUNCAO = 47
PONTO_VIRGULA = 48
VAR = 49
ON = 50
OFF = 51
ARQUIVO = 52
NOME_ARQUIVO = 53
PAUSA = 54
ECO = 55
CRONOMETRO = 56
LIMPATELA = 57
DOIS_PONTOS = 58
ALEATORIO = 59
TIMER = 60
PASSO = 61
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'inicio'", "'fimAlgoritmo'", "'algoritmo'", "'vetor'", "'de'",
"'['", "']'", "'('", "')'", "','", "'<-'", "'..'", "'leia'",
"'se'", "'entao'", "'senao'", "'fimse'", "'escolha'", "'caso'",
"'outrocaso'", "'fimescolha'", "'-'", "'para'", "'ate'", "'faca'",
"'fimpara'", "'repita'", "'enquanto'", "'fimenquanto'", "'interrompa'",
"'procedimento'", "'fimprocedimento'", "'funcao'", "'retorne'",
"'fimfuncao'", "';'", "'var'", "'on'", "'off'", "'arquivo'",
"'pausa'", "'limpatela'", "':'", "'aleatorio'", "'timer'", "'passo'" ]
symbolicNames = [ "<INVALID>",
"TIPO_DE_DADO", "OPERADOR_BINARIO", "OPERADOR_RELACIONAL", "OPERADOR_LOGICO",
"OPERADOR_UNARIO", "INICIO", "FIM_ALGORITMO", "ALGORITMO", "NOME_DA_VARIAVEL",
"COMENTARIO", "STRING", "VETOR", "DE", "ABRE_COLCHETES", "FECHA_COLCHETES",
"ABRE_PARENTESES", "FECHA_PARENTESES", "VIRGULA", "ATRIBUIR",
"INTEIRO", "REAL", "BOOL", "PONTO_PONTO", "ESCREVA", "LEIA",
"SE", "ENTAO", "SENAO", "FIM_SE", "ESCOLHA", "CASO", "OUTRO_CASO",
"FIM_ESCOLHA", "MENOS", "PARA", "ATE", "FACA", "FIM_PARA", "REPITA",
"ENQUANTO", "FIM_ENQUANTO", "INTERROMPA", "PROCEDIMENTO", "FIM_PROCEDIMENTO",
"FUNCAO", "RETORNO", "FIM_FUNCAO", "PONTO_VIRGULA", "VAR", "ON",
"OFF", "ARQUIVO", "NOME_ARQUIVO", "PAUSA", "ECO", "CRONOMETRO",
"LIMPATELA", "DOIS_PONTOS", "ALEATORIO", "TIMER", "PASSO" ]
ruleNames = [ "DIGIT", "TIPO_DE_DADO", "OPERADOR_BINARIO", "OPERADOR_RELACIONAL",
"OPERADOR_LOGICO", "OPERADOR_UNARIO", "INICIO", "FIM_ALGORITMO",
"ALGORITMO", "NOME_DA_VARIAVEL", "COMENTARIO", "STRING",
"VETOR", "DE", "ABRE_COLCHETES", "FECHA_COLCHETES", "ABRE_PARENTESES",
"FECHA_PARENTESES", "VIRGULA", "ATRIBUIR", "INTEIRO",
"REAL", "BOOL", "PONTO_PONTO", "ESCREVA", "LEIA", "SE",
"ENTAO", "SENAO", "FIM_SE", "ESCOLHA", "CASO", "OUTRO_CASO",
"FIM_ESCOLHA", "MENOS", "PARA", "ATE", "FACA", "FIM_PARA",
"REPITA", "ENQUANTO", "FIM_ENQUANTO", "INTERROMPA", "PROCEDIMENTO",
"FIM_PROCEDIMENTO", "FUNCAO", "RETORNO", "FIM_FUNCAO",
"PONTO_VIRGULA", "VAR", "ON", "OFF", "ARQUIVO", "NOME_ARQUIVO",
"PAUSA", "ECO", "CRONOMETRO", "LIMPATELA", "DOIS_PONTOS",
"ALEATORIO", "TIMER", "PASSO" ]
grammarFileName = "visualg.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
997,957 | 04a744b251db17927d1fe8bbf0f3a06d3f72d046 | def pytest_configure(config):
config.addinivalue_line(
"markers", "profile_databricks_cluster"
)
config.addinivalue_line(
"markers", "profile_databricks_sql_endpoint"
)
config.addinivalue_line(
"markers", "profile_apache_spark"
)
|
997,958 | eec04b5a5ab371d5a12797031fb9bc4f37cf11b9 | '''
Created on Aug 3, 2016
@author: YLin2
'''
import sys
import os
from xml.dom import minidom
def fix_class(class_node):
valid_lines = 0
covered_lines = 0
for lines_node in class_node.getElementsByTagName('lines'):
for line in lines_node.getElementsByTagName('line'):
if not line.hasAttribute('hits'):
continue
valid_lines += 1
hit = line.getAttribute('hits')
if hit == '1':
covered_lines += 1
if valid_lines > 0:
class_node.setAttribute('line-rate', repr(float(covered_lines)/float(valid_lines)))
return valid_lines, covered_lines
def fix_package(package_node):
valid_lines = 0
covered_lines = 0
for classes_node in package_node.getElementsByTagName('classes'):
for class_node in classes_node.getElementsByTagName('class'):
current_valid_lines, current_covered_lines = fix_class(class_node)
valid_lines += current_valid_lines
covered_lines += current_covered_lines
if valid_lines > 0:
package_node.setAttribute('line-rate', repr(float(covered_lines)/float(valid_lines)))
return valid_lines, covered_lines
def fix(*args, **kargs):
default_file_path = ''
default_file_name = 'cobertura.xml'
if len(args[0]) > 1:
arg = args[0][1]
else:
arg = default_file_path
if os.path.isdir(arg):
file_name = os.path.join(arg, default_file_name)
else:
file_name = os.path.join(default_file_path, default_file_name)
print 'processing: '+file_name
xml_file = open(file_name, 'r')
xml_doc = minidom.parse(xml_file)
xml_file.close()
xml_root = xml_doc.documentElement
original_copy = open('coverage.original.xml', 'w')
xml_root.writexml(original_copy)
valid_lines = 0
covered_lines = 0
tag_valid_lines = 'lines-valid'
tag_covered_lines = 'lines-covered'
for package_node in xml_doc.getElementsByTagName('package'):
current_valid_lines, current_covered_lines = fix_package(package_node)
valid_lines += current_valid_lines
covered_lines += current_covered_lines
xml_root.setAttribute(tag_valid_lines, repr(valid_lines))
xml_root.setAttribute(tag_covered_lines, repr(covered_lines))
fixed_copy = open(os.path.basename(file_name), 'w')
xml_root.writexml(fixed_copy)
if __name__ == '__main__':
fix(sys.argv)
|
997,959 | 899d859db131b32ea251b6a03cf7743f57e721f1 | # -*- coding: utf-8 -*-
# ridgeRegression.py
import numpy as np
import matplotlib.pyplot as plt
def rssError(yArr, yHatArr):
"""
函数说明:求平方误差
"""
return ((yArr-yHatArr)**2).sum()
def loadDataSet(fileName):
'''
函数说明:数据导入函数
参数:fileName: 数据存放的路径
返回:dataMat: 数据特征集
labelMat: 数据标签集
'''
dataMat = []
labelMat = []
with open(fileName) as fr:
numFeat = len(fr.readline().split("\t")) - 1
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
def ridgeRegre(xMat, yMat, lam=0.2):
'''
函数说明:岭回归
参数:
xMat: x数据集
yMat: y数据集
lam: 缩减系数
返回:
ws : 岭回归的回归系数
'''
xTx = xMat.T * xMat # 按公式进行求解系数
denom = xTx + np.eye(np.shape(xMat)[1]) * lam
if np.linalg.det(denom) == 0.0:
print("This matrix is singular, cannot do inverse")
return
ws = denom * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr):
'''
函数说明:岭回归测试
参数:
xArr: x数据集
yArr: y数据集
返回:
wMat : 回归系数
'''
xMat = np.mat(xArr)
yMat = np.mat(yArr).T
yMean = np.mean(yMat, axis = 0) # axis=0,表示纵轴(列),1代表横轴(行),此处对列进行求平均
yMat = yMat - yMean
xMeans = np.mean(xMat, axis = 0)
xVar = np.var(xMat, axis = 0) # 求方差
xMat = (xMat - xMeans) / xVar # 进行归一化,使得变成标准化
numTestPst = 30 # 30个不同的lam进行测试
wMat = np.zeros((numTestPst, np.shape(xMat)[1])) # 初始化系数矩阵
for i in range(numTestPst):
ws = ridgeRegre(xMat, yMat, lam=np.exp(i-10)) # 对不同的lam值进行求解回归系数,lam以指数级变化,从很小的数开始
wMat[i, :] = ws.T # 对每个lam得到的回归系数进行保存
return wMat
def plotwMat():
'''
函数说明:绘制岭回归系数矩阵
参数:无
返回:无
'''
abX, abY = loadDataSet("abalone.txt")
ridgeWeights = ridgeTest(abX, abY)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ridgeWeights)
ax_title_text = ax.set_title('gaun xi')
ax_xlabel_text = ax.set_xlabel('log(lambada)')
ax_ylabel_text = ax.set_ylabel('ws')
plt.setp(ax_title_text, size=20, weight="bold", color = 'red')
plt.setp(ax_xlabel_text, size=10, weight="bold", color = 'black')
plt.setp(ax_ylabel_text, size=10, weight="bold", color = 'black')
plt.show()
if __name__ == "__main__":
plotwMat()
|
997,960 | 0d52a5764b5f9bb3a04a48414f00c1ebc9c8fc5f | import pygame as pg
import numpy as np
import sys
from SoundManager import SoundManager
from MainMenuUI import MainMenuUI
from SettingsUI import SettingsUI
from GameUI import GameUI
from GameController import GameController
def main():
WIDTH = 1186
HEIGHT = 964
sm = SoundManager("sounds/")
pg.init()
pg.font.init()
mui = MainMenuUI(WIDTH, HEIGHT)
sui = SettingsUI(WIDTH, HEIGHT)
gui = GameUI(WIDTH, HEIGHT)
gc = GameController(60)
clock = pg.time.Clock()
while True:
# The time it takes for a frame to complete. It is used to make the game frame independent.
dt = clock.tick(gc.FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
sys.exit()
if gc.game_state == 0:
if gc.starting_level != 0:
gc.starting_level = 0
gc.set_speed(gc.starting_level)
if pg.mouse.get_pressed()[0]:
mouse = pg.mouse.get_pos()
# If player clicks on play button then go to settings
if WIDTH // 2 - 90 < mouse[0] < WIDTH // 2 + 90 and HEIGHT // 2 - 115 < mouse[1] < HEIGHT // 2 - 35:
gc.game_state = 1
# If player clicks on quit button, exit
if WIDTH // 2 - 90 < mouse[0] < WIDTH // 2 + 90 and HEIGHT // 2 + 35 < mouse[1] < HEIGHT // 2 + 115:
sys.exit()
# At game over the music stops. This ensures that the music will start again.
if not gc.theme_playing:
gc.theme_playing = True
gc.track = "track1.wav"
sm.play_sound(gc.track, 0, -1)
mui.draw_main_menu()
elif gc.game_state == 1:
if pg.mouse.get_pressed()[0]:
mouse = pg.mouse.get_pos()
# Start button
if WIDTH // 2 - 90 < mouse[0] < WIDTH // 2 + 90 and HEIGHT // 2 + 300 < mouse[1] < HEIGHT // 2 + 380:
gc.game_state = 2
gc.level = gc.starting_level
# Track buttons
if WIDTH // 2 - 275 < mouse[0] < WIDTH // 2 - 125:
for i in range(3):
dy = (i+1)*70
if HEIGHT // 2 - 148 + dy < mouse[1] < HEIGHT // 2 - 148 + 50 + dy:
gc.track = "track" + str(i+1) + ".wav"
sm.play_sound(gc.track, 0, -1)
# Even levels buttons
if WIDTH // 2 + 240 < mouse[0] < WIDTH // 2 + 290:
for i in range(5):
dy = 50*(i+1)
if HEIGHT // 2 - 140 + dy < mouse[1] < HEIGHT // 2 - 140 + 50 + dy:
gc.starting_level = 2*i
gc.set_speed(gc.starting_level)
# Even levels buttons
if WIDTH // 2 + 290 < mouse[0] < WIDTH // 2 + 340:
for i in range(5):
dy = 50*(i+1)
if HEIGHT // 2 - 140 + dy < mouse[1] < HEIGHT // 2 - 140 + 50 + dy:
gc.starting_level = 2*i+1
gc.set_speed(gc.starting_level)
sui.draw_settings(gc.starting_level)
elif gc.game_state == 2:
gc.drop_counter += dt
gc.move_counter += dt
# Delay between player's side movement
if gc.move_counter >= gc.h_speed:
gc.can_h_move = True
if not pg.key.get_pressed()[pg.K_UP]:
gc.can_rotate = True
if len(gc.next_pieces) <= 2:
gc.create_piece_sequence()
if not gc.piece_falling:
gc.rotated = False
gc.piece_falling = True
pts = gc.next_pieces[:][0]
del gc.next_pieces[0]
next_piece = gc.next_pieces[:][0]
index = pts[pts > 0][0]
game_over = gc.spawn_piece(pts)
if game_over:
gc.game_state = 3
pf = pts[:]
else:
if pg.key.get_pressed()[pg.K_DOWN]:
gc.drop_counter *= 3
if pg.key.get_pressed()[pg.K_LEFT] and gc.move_counter >= gc.h_speed and gc.can_h_move:
gc.can_h_move = False
gc.move_counter = 0
if gc.h_move(-1, index):
sm.play_sound("piece-move.wav", 1, 0)
if pg.key.get_pressed()[pg.K_RIGHT] and gc.move_counter >= gc.h_speed and gc.can_h_move:
gc.can_h_move = False
gc.move_counter = 0
if gc.h_move(1, index):
sm.play_sound("piece-move.wav", 1, 0)
if pg.key.get_pressed()[pg.K_UP] and gc.can_rotate:
gc.can_rotate = False
sm.play_sound("piece-rotate.wav", 3, 0)
gc.rotated = gc.r_move(pf, index, gc.rotated)
if gc.drop_counter >= gc.speed and gc.piece_falling:
gc.drop_counter = 0
gc.piece_falling = gc.v_move(index, sm)
gui.screen.fill(gui.BLACK)
gui.draw_game_ui(gc, next_piece, np.array_equal(next_piece, gc.I))
elif gc.game_state == 3:
if not gc.game_over_sound_played:
gc.game_over_sound_played = gc.game_over(sm)
if pg.key.get_pressed()[pg.K_SPACE]:
gc.state_initializer(2, gui)
sm.play_sound(gc.track, 0, -1)
if pg.key.get_pressed()[pg.K_ESCAPE]:
gc.state_initializer(0, gui)
sm.play_sound(gc.track, 0, -1)
pg.display.update()
if __name__ == "__main__":
main()
|
997,961 | cc5aa39b0f2c6d2da5e25b08f812ce9d46ae34bd | import RPi.GPIO as GPIO
import threading
interval = 0.01
class Key:
def __init__(self, eventCallback, shutdown):
GPIO.setmode(GPIO.BCM)
GPIO.setup(4,GPIO.IN,pull_up_down=GPIO.PUD_UP)
self.timerStart(interval=interval)
self.eventCallback = eventCallback
self.shutdown = shutdown
self.pressed = False
self.event = 0 #0->not press 1->tap, 2->longpress
self.times = 0
def timerStart(self, interval):
self.scantimer = threading.Timer(interval, self.keyScan, (interval,))
self.scantimer.start()
def keyScan(self, interval):
if (GPIO.input(4) == GPIO.LOW):
self.pressed = True
self.times = self.times + 1
if self.times > 80:
self.times = 0
self.event = 2 #longpress
self.eventCallback(self.event, self.shutdown)
self.event = 0
else:
self.pressed = False
if self.times > 5:
self.event = 1 #tap
self.eventCallback(self.event, self.shutdown)
self.event = 0
self.times = 0
if interval != 0:
self.scantimer = threading.Timer(interval, self.keyScan, (interval,))
self.scantimer.start()
def keycallback(event):
print(event)
if __name__ == "__main__":
key = Key(keycallback) |
997,962 | cc476955321c42bc083811469fc42858ac5cdda9 | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def count_parameters(state_dict):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def upgrade_state_dict(state_dict, codebook_state_dict):
upgrade = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head")
key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head")
key = key.replace("heads.cmd.itm_head.cls", "itm_head")
key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler")
key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale")
key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head")
key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head")
key = key.replace("mm_text_projection", "flava.text_to_mm_projection")
key = key.replace("mm_image_projection", "flava.image_to_mm_projection")
key = key.replace("image_encoder.module", "flava.image_model")
key = key.replace("text_encoder.module", "flava.text_model")
key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token")
key = key.replace("mm_encoder.module", "flava.multimodal_model")
key = key.replace("text_projection", "flava.text_projection")
key = key.replace("image_projection", "flava.image_projection")
upgrade[key] = value.float()
for key, value in codebook_state_dict.items():
upgrade[f"image_codebook.{key}"] = value
return upgrade
@torch.no_grad()
def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = FlavaConfig.from_pretrained(config_path)
else:
config = FlavaConfig()
hf_model = FlavaForPreTraining(config).eval()
codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)
if os.path.exists(checkpoint_path):
state_dict = torch.load(checkpoint_path, map_location="cpu")
else:
state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu")
hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)
hf_model.load_state_dict(hf_state_dict)
hf_state_dict = hf_model.state_dict()
hf_count = count_parameters(hf_state_dict)
state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)
assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
hf_model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
args = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
|
997,963 | c237f3aa5fe6051647480a1a4d5af2a8bc8801e5 | from django.urls import path,re_path
from .views import *
urlpatterns = [
re_path('^register/',register_views),
re_path('^logout/',logout_views),
re_path('^login/',login_views),
] |
997,964 | 5d36f91fa11951340e82e4f2d063bb1d6e703526 | #This program looks at the string Repetition Operator (*)
str1="Hello "
print(str1*300)
|
997,965 | 7623b5dd5577bde6522289a45b857b13717c87cf | from numpy import *
from collections import Counter
import torch
import pickle
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def build_vocab(texts, n=None):
counter = Counter(''.join(texts)) # char level
char2index = {w: i for i, (w, c) in enumerate(counter.most_common(n), start=4)}
char2index['~'] = 0 # pad 不足长度的文本在后边填充0
char2index['^'] = 1 # sos 表示句子的开头
char2index['$'] = 2 # eos 表示句子的结尾
char2index['#'] = 3 # unk 表示句子中出现的字典中没有的未知词
index2char = {i: w for w, i in char2index.items()}
return char2index, index2char
def indexes_from_text(text, char2index):
return [1] + [char2index[c] for c in text] + [2] # 手动添加开始结束标志
def pad_seq(seq, max_length):
seq += [0 for _ in range(max_length - len(seq))]
return seq
def random_batch(batch_size, pairs, src_c2ix, trg_c2ix):
input_seqs, target_seqs = [], []
for i in random.choice(len(pairs), batch_size):
input_seqs.append(indexes_from_text(pairs[i][0], src_c2ix))
target_seqs.append(indexes_from_text(pairs[i][1], trg_c2ix))
seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True)
input_seqs, target_seqs = zip(*seq_pairs)
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
input_var = torch.LongTensor(input_padded).transpose(0, 1)
# seq_len x batch_size
target_var = torch.LongTensor(target_padded).transpose(0, 1)
input_var = input_var.to(device)
target_var = target_var.to(device)
return input_var, input_lengths, target_var, target_lengths
def save_model(model , index):
fw = open('model.' + str(index) + '.m' , 'wb')
pickle.dump(model , fw)
fw.flush()
fw.close()
|
997,966 | c47f5485b92ce610ed5321025dae35b2074b6a8d | from clarifai.rest import ClarifaiApp
import json
# name, grade, 3 bullet points, source
dictionary = {'Pepsi': {0: 'D',
1: 'PepsiCo has managed to decrease its total climate footprint from 2014 to 2017.'
'PepsiCo scores poorly because the brand remains secretive in its sustainability report'
'and refuses to disclose information. PepsiCo mentions target to reduce carbon emissions.',
2: 'https://rankabrand.org/soda/Pepsi'},
'Coke': {0: 'D',
1: 'Coca-Cola Company implements measures to reduce emissions, but has still increased in overall climate footprint.'
'The company mentions using renewable energy, but is not clear about how much.'
'Coca-Cola Company implements measures to purchase its other products, such as coffee, tea and fruits, from sustainable sources',
2: 'https://rankabrand.org/soda/Coca-Cola'},
'Yerba': {0: 'B',
1: 'Guayaki harvests yerba in an organic and ecologically friendly manner.'
' Guayaki actively contributes to environmental protection by working to restore 200,000 acres of rainforest. '
'The company creates 1,000 living wage jobs for local workers.',
2: 'https://magazine.wellwallet.com/gold-indios-guayakis-yerba-mate-ushering-sustainable-economy'},
'Kettle Brand': {0: 'B',
1: 'After cooking chips with vegetable oil, Kettle Brand converts excess oil to biodiesel to fuel their vehicles.'
'In 2019, Kettle Brand chips cut the amount of materials used in packaging by 43%,'
'reducing greenhouse gas emissions from packaging by 51% and waste from packaging by 2 million pounds.',
2: 'https://www.kettlebrand.com/sustainability/'},
'Fiji': {0: 'A',
1: 'In 2007, Fiji Water has managed to keep their total annual carbon footprint low.'
'The company is taking measures to continue to lower their carbon emissions and packaging materials.'
'Fiji Water has goals to reduce the amount of fuel used in transporting their products.',
2: 'https://www.csrwire.com/press_releases/15107-FIJI-Water-Becomes-First-Bottled-Water-Company-to-Release-Carbon-Footprint-of-Its-Products'},
'Smartwater': {0: 'D',
1: 'Coca-Cola Company implements measures to reduce emissions, but has still increased in overall climate footprint.'
'The company mentions using renewable energy, but is not clear about how much.'
'Coca-Cola Company implements measures to purchase its other products, such as coffee, tea and fruits, from sustainable sources'
2: 'https://rankabrand.org/soda/Coca-Cola'}
}
newArray = [] # Array that is returned. Stored as [name,grade,description,source]
app = ClarifaiApp(api_key='a83ecce289b64338a8036f3603e8d551') # api call
def main(url):
model1 = app.models.get('Brand')
#url = input("URL of image: ")
output = model1.predict_by_url(url)['outputs'][0]['data']['concepts']
newJson = json.dumps(output[0]) # dumps json data into newJson
completeJson = json.loads(newJson) # loads json data into completeJson
for key in dictionary: # loops through dictionary
isfound = False # boolean to determine if anything is found
if key == completeJson['name']: # if dictionary key (name) equals the name in Json, executes code block
if completeJson['value'] < .5: # Checks if API is over 50% sure of its prediction
# print('None found, value was only:', completeJson['value'] * 100, "% accurate.")
return None # returns null if api was too unsure
newArray.append(key) # first element of array is the name (or key of dictionary
for size in range(0, 3): # loops from 0-2
newArray.append(dictionary[key][size]) # appends the 0-2 key values of the original key
isfound = True
break
if isfound: #checks if database name was found and matched
#print(newArray)
return newArray
else:
print('None found')
return None
if __name__ == '__main__':
main()
|
997,967 | 01bb8d5af7d73c86604f15b33ec6f9e0eb692ce0 | #!/bin/python3
import unittest2 as unittest
from jumping_on_the_clouds import jumpingOnClouds
"""
https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem
Jump 1 or 2 steps on the cloud ( marked by 0).
The thunder clouds (marked by 1) have to be avoided.
"""
class Test_jumping_on_clouds(unittest.TestCase):
def test_case_0(self):
self.assertEquals(jumpingOnClouds([0, 0, 1, 0, 0, 1, 0]), 4)
def test_case_1(self):
self.assertEquals(jumpingOnClouds([0, 0, 0, 1, 0, 0]), 3)
def test_case_contain_only_0(self):
self.assertEquals(jumpingOnClouds([0, 0, 0, 0, 0, 0]), 3)
if __name__ == '__main__':
unittest.main()
|
997,968 | 46e20fc640690c09ff6fb889ce1d43eb0bda0417 | from pathlib import Path, PurePosixPath
import fnmatch
import hashlib
from cw.directory_walk import directory_walk
from io import StringIO
import os
import re
from typing import Union
class DirectoryChecksum:
"""
Class used to generate a file containing the sha256 hashes of all files in a
directory and its subdirectories. The instance of the class is also used to
validate a directory using a preexisting hash file.
:param dir_path: String or :class:`os.PathLike` object with the path to the target directory.
:param ignore_patterns: Iterable with unix shell-style patterns of file and directory names
to ignore. :func:`fnmatch.fnmatch` is used to match the patterns.
:param: Path relative to the directory to the checksum file. If None it will be
"""
def __init__(self, dir_path: Union[os.PathLike, str], ignore_patterns=None, checksum_file_path="./checksum"):
self.dir_path = dir_path if isinstance(dir_path, Path) else Path(dir_path)
"""
:class:`os.PathLike` object pointing to the target directory,
"""
self.ignore_patterns = set(ignore_patterns or [])
"""
Set of the patterns to ignore.
"""
self.checksum_file_path = self.dir_path / checksum_file_path
"""
Path object to the checksum file.
"""
@property
def has_checksum_file(self):
"""
True if the directory already has a checksum file.
"""
return self.checksum_file_path.is_file()
def create_checksum_file(self, force=False):
"""
Generate a new checksum file in the directory.
:class:`sim_common.directory_checksum.ChecksumFileAlreadyExistsError` will be raised
if the checksum.
:param force: If True the checksum file will be (re)created whether it
exists or not.
:returns: A bytestring with the sha256 hash of the contents of the checksum file.
"""
# Check if the checksum file has already been created and raise an error
# if it has been, except if its forced.
if self.has_checksum_file and not force:
raise ChecksumFileAlreadyExistsError(self.dir_path)
# Initialize dictionary containing the hashes of all files in the directory.
dir_hashes = {}
# Create hashing object and walk through all files in the directory and its
# subdirectories.
hash_generator = hashlib.sha256()
for file_path in directory_walk(self.dir_path, self.ignore_pattern_filter):
with open(file_path, "rb") as f:
# Read the file chunk by chunk and update the hash generator.
for chunk in iter(lambda: f.read(4096), b""):
hash_generator.update(chunk)
# Put the hash in the dictionary with the relative path of the file
# w.r.t. the root directory as the key. The path will be a Posix path.
dir_hashes[str(PurePosixPath(file_path.relative_to(self.dir_path)))] = hash_generator.hexdigest()
# Create new hash generator.
hash_generator = hashlib.sha256()
# Initialize string io object used to generate the contents of the hash file.
str_io = StringIO()
# Write the file paths and hashes to the string io object. One
# pair on each line separated by tabs.
for file_path_str, file_hash in dir_hashes.items():
str_io.write(f"{file_path_str}\t{file_hash}\n")
# Generate the hash of the hash_file contents.
hash_generator.update(str_io.getvalue().encode('utf-8'))
# Save the hashes to the hash file.
with open(self.checksum_file_path, "w") as f:
f.write(str_io.getvalue())
# Return the hash of the checksum file.
return hash_generator.digest()
def delete_checksum_file(self):
if self.checksum_file_path.exists():
self.checksum_file_path.unlink()
def ignore_pattern_filter(self, path):
"""
Checks whether a path doesn't match one of the ignore patterns.
:param path: :class:`os.PathLike` object pointing a file or directory.
:return: True if the path doesn't math any of the ignore patterns, otherwise False.
"""
# Return False for the checksum file.
if self.checksum_file_path.exists():
if self.checksum_file_path.samefile(path):
return False
# Return True of no patterns where given.
if self.ignore_patterns is None:
return True
# Loop through all patterns.
for pattern in self.ignore_patterns:
# Return False if the path matches an ignore pattern.
if fnmatch.fnmatch(path.name, pattern):
return False
# If it reached this point, this means that it doesn't match any of the patterns,
# so return True.
return True
def validate_directory(self, checksum_file_hash=None):
"""
Validates a directory by checking the hash of all the files listed in the
checksum file.
:param checksum_file_hash: Optional bytestring with the hash of the sha256
checksum file.
:return: True if the directory content is valid.
"""
# If the directory doesn't have a checksum file, raise an error.
if not self.has_checksum_file:
return False
# Load in all the text in the checksum file.
with open(self.checksum_file_path, "r") as f:
checksum_file_content = f.read()
# If a hash was given for the checksum file, check the validity
# of the checksum file.
if checksum_file_hash is not None:
hash_generator = hashlib.sha256()
hash_generator.update(checksum_file_content.encode("utf-8"))
if checksum_file_hash != hash_generator.digest():
return False
# If we have reached this point and the checksum_file_hash was given, then we know that
# the checksum file has not been corrupted. If the checksum_file_hash was not given then we
# are not sure, so we need to check if there are any errors in the file.
# This is done by adding and extra matching group to the regex "|(.)". This group will
# match anything that has not been matched by the base regex. Meaning that
# if something is matched by this group, there are syntax errors in the file.
checksum_file_regex_pattern = \
r"(?:(.+)(?:\t)([a-fA-F0-9]{64}))|(.)" if checksum_file_hash is None else r"(?:(.+)(?:\t)([a-fA-F0-9]{64}))"
# Loop through all matches found by the regex.
for match in re.finditer(checksum_file_regex_pattern, checksum_file_content):
# Check if syntax error was found in case no checksum_file_hash was given.
if checksum_file_hash is None:
if match.group(3) is not None:
return False
# Get the file path and file hash and convert these to the correct type.
file_path = self.dir_path / match.group(1)
file_hash = bytes.fromhex(match.group(2))
# Generate the sha256 hash of the contents of the file.
hash_generator = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_generator.update(chunk)
# Return false if the hash doesn't match.
if file_hash != hash_generator.digest():
return False
# If we have reached here, then all of the files have been checked and they are valid.
# so return True.
return True
class ChecksumFileAlreadyExistsError(Exception):
"""
Raised when an attempt is being made to create a checksum file in a directory
that already has one. If the intention is to recreate it, set the force parameter to True.
"""
def __init__(self, dir_path):
super().__init__(f"'{str(dir_path)}' already has a checksum file.")
class NoChecksumFileError(Exception):
"""
Raised when an attempt is being made to validate a directory that doesn't have a
checksum file.
"""
def __init__(self, dir_path):
super().__init__(f"'{str(dir_path)}' has no checksum file.")
class InvalidChecksumFile(Exception):
"""
Raised when the directory has a invalid checksum file.
"""
def __init__(self, dir_path):
super().__init__(f"'{str(dir_path)}' has an invalid checksum file.")
|
997,969 | fd1d33b3febc12d1a74421bd522b5d81483ccfa7 | from xlutils.copy import copy
import xlrd
import xlwt
import time
class Excel_dx():
def du (self,h,l):
#path = 'F:/untitled/我的坚果云/git/zentao/comm/case.xls'
path = './comm/case.xls'
workbook = xlrd.open_workbook(path)
data_sheet = workbook.sheets()[0]
ss =data_sheet.cell_value(h,l)#将字符串转为字典
return ss
def bt(self,a):
name = ['ID','用例名称','API地址','是否执行','请求类型','请求头','测试类型','是否需要登陆',
'依赖caseid','请求参数','状态码','返回结果','执行结果','断言','执行时间']
line = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
meter = dict(zip(name,line))
#print (meter)
A = a
return meter[A]
def xg(self,h,l,vale):
#style = xlwt.XFStyle()
read_file = xlrd.open_workbook('./comm/case.xls', formatting_info=True)
# 参数注释:
# file_path:文件路径,包含文件的全名称
# formatting_info=True:保留Excel的原格式
# 将文件复制到内存
write_data = copy(read_file)
# 读取复制后文件的sheet1
write_save = write_data.get_sheet(0)
# 写入数据
style = xlwt.easyxf('align: wrap on')#自动换行
write_save.write(h,l,vale,style)
# 参数注释:
# x,y:写入目标格的位置坐标
# value:写入数据
# 保存写入数据后的文件到原文件路径
write_data.save('./comm/case.xls')
if __name__=="__main__":
a = Excel_dx()
self = "pass"
aa = a.bt(1)
print (aa) |
997,970 | 9a53f7d6601c42b0fe4216c202d5e0abeeefd993 | """
Import the datas from the json dataset file and return the big array that contains it
"""
import simplejson as json
datasetPath = '../../find\ relationships/dataset/yelp_academic_dataset_business.json'
heatMapData= open("heatMapData.html", "w")
with open(datasetPath) as fin:
for business in fin:
line_contents = json.loads(business)
if line_contents["state"] == "AZ":
latitude = str(line_contents['latitude'])
longitude = str(line_contents['longitude'])
data = 'new google.maps.LatLng(' + latitude + ', ' + longitude + '), \n'
heatMapData.write(data)
heatMapData.close() |
997,971 | d7557c330cf64c830e270314d5d0c21d02da9881 | import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
class SampleArchitecture2(object):
height = 32
width = 32
channels = 3
learning_rate = 0.002
@classmethod
def get_model(cls, features, labels, mode):
# Input Layer
input_layer = tf.reshape(features, [
-1, SampleArchitecture2.height, SampleArchitecture2.width, SampleArchitecture2.channels
])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer, filters=32, kernel_size=[5, 5],
padding="same", activation=tf.nn.relu
)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1, filters=32, kernel_size=[5, 5],
padding="same", activation=tf.nn.relu
)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Convolutional Layer #3 (No Pooling)
conv3 = tf.layers.conv2d(
inputs=pool2, filters=64, kernel_size=[5, 5],
padding="same", activation=tf.nn.relu
)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
# Dense Layer
pool3_flat = tf.reshape(pool3, [-1, 4 * 4 * 64]) # Flatten pool3 which has these dimensions
dense = tf.layers.dense(inputs=pool3_flat, units=64)
# Logits Layer
logits = tf.layers.dense(inputs=dense, units=10)
loss = None
train_op = None
# Calculate Loss (for both TRAIN and EVAL modes)
if mode != learn.ModeKeys.INFER:
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=SampleArchitecture2.learning_rate,
optimizer="SGD"
)
# Generate Predictions
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
# Return a ModelFnOps object
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, loss=loss, train_op=train_op)
|
997,972 | cf4ab6bf374ca62c3356a4efff69683d890c7f62 | class Employ:
def __init__(self, empno, name, sal):
self.empno = empno
self.name = name
self.sal = sal
def __str__(self):
return "Empno %d, Name %s, Salary %d " %(self.empno, self.name, self.sal)
emp = Employ(1, "Satish", 88555)
print(emp) |
997,973 | 142a0b26fcc0a5a799f6b488c430c6f340060d0c | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def leftLeavesSumRec(root, isLeft, summ):
if root is None:
return
#Check whether this node is a leaf node and is left
if root.left is None and root.right is None and isLeft == True:
summ[0] += root.data
leftLeavesSumRec(root.left, 1, summ)
leftLeavesSumRec(root.right, 0, summ)
def leftLeavesSum(root):
summ = [0]
leftLeavesSumRec(root, 0, summ)
return summ[0]
root = Node(20)
root.left = Node(9)
root.right = Node(49)
root.right.left = Node(23)
root.right.right = Node(52)
root.right.right.left = Node(50)
root.left.left = Node(5)
root.left.right = Node(12)
root.left.right.right = Node(12)
print ("Sum of left leaves is")
print(leftLeavesSum(root))
|
997,974 | 0576bb7bba6319c9ce6a1e5f8c8e83802a52e694 | """
http
~~~~
A module to handle making HTTP requests.
"""
import requests
# Exceptions.
class AuthenticationError(RuntimeError):
"""The request received a 401 error from the server."""
class NotFoundError(RuntimeError):
"""The request received a 404 error from the server."""
class ServerError(RuntimeError):
"""The request received a 500 error from the server."""
# HTTP calls.
def get(url:str, session: requests.Session = None):
"""Make an HTTP GET request."""
if session:
resp = session.get(url)
else:
resp = requests.get(url)
if resp.status_code == 401:
raise AuthenticationError('Authentication failed.')
if resp.status_code == 404:
raise NotFoundError('Page not found.')
if resp.status_code >= 500 and resp.status_code < 600:
print(resp.text)
raise ServerError('Encountered a server error.')
return resp.text
# Utility functions.
def build_session():
"""Provide a session object for use in tracking session."""
return requests.Session() |
997,975 | 9e5c5809380697d4fb91d89c4875185934319a7b | #Create Cluster
import sys
import os
sys.path.append(os.path.abspath(__file__ + '/../../'))
from Utils.utils import Utils
import pprint
class CreateCluster:
def __init__(self):
print('Create Clusterr')
self.utils = Utils(sys.argv)
self.hostname = sys.argv[1]
def create_cluster(self):
data = self.utils.read_input(os.path.abspath(__file__ +'/../')+'/create_cluster_spec.json')
validations_url = 'https://'+self.hostname+'/v1/clusters/validations/creations'
print ('Validating the input....')
response = self.utils.post_request(data,validations_url)
if(response['resultStatus'] != 'SUCCEEDED'):
print ('Validation Failed.')
exit(1)
create_cluster_url = 'https://'+ self.hostname + '/v1/clusters'
response = self.utils.post_request(data,create_cluster_url)
print ('Creating Cluster...')
task_url = 'https://'+self.hostname+'/v1/tasks/' + response['id']
print('Create cluster eneded with status: ' + self.utils.poll_on_id(task_url,True))
if __name__== "__main__":
CreateCluster().create_cluster()
|
997,976 | d7547c21b0af5cb982c0ee22922d8872cede91af | # Generated by Django 3.1.7 on 2021-06-28 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vdapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='index',
name='thumbnail',
field=models.FileField(upload_to='video/%y'),
),
]
|
997,977 | 5f48880739e6f88724874f34104a445f72aca9bc | ## Create acceptance uncertainties for the ggH QCD uncertainties (i.e. the so called 9NP scheme)
## by renormalizing the default inputs by total-xsection variation:
##
## acc_variation = variation.Scale( sum_of_weights_nominal / sum_of_weights_variation )
##
## where sum_of_weights_nominal and sum_of_weights_variation are sums of events weights
## of all generated events.
##
## We use the following assumptons:
## 1. ggH for the lep-lep channel consists only from the DSID 345120
## 2. ggH for the had-had channel consists only from the DSID 345123
## 3. ggH for the lep-had channel consists only from the DSIDs 345121 and 345121
## 4. There is no difference in acceptance between positive and negative lepton channels in lep-had
try:
# command line agrument parser
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-ws', '--wsInputFile', help='Name of the combined WS input root file')
args = parser.parse_args()
# have to import root after the parser
from ROOT import TFile
from wsPostProcess import *
# open the WS inpt file
f = TFile.Open(args.wsInputFile)
print "Loading WS input folder tree..."
tree = readFolderTree(f)
print "...done"
# sample
sample = 'ggH'
# normalization factors:
ll_sum_of_weights_nominal = 53957603.57
ll_sum_of_weights = [ ('theory_sig_qcd_0_high', 56492715.88),
('theory_sig_qcd_1_high', 55185917.43),
('theory_sig_qcd_2_high', 54041298.10),
('theory_sig_qcd_3_high', 54047280.43),
('theory_sig_qcd_4_high', 54121620.68),
('theory_sig_qcd_5_high', 53956505.47),
('theory_sig_qcd_6_high', 54076640.70),
('theory_sig_qcd_7_high', 54182534.31),
('theory_sig_qcd_8_high', 54092313.60),
]
lh_sum_of_weights_nominal = 42976109.88
lh_sum_of_weights = [ ('theory_sig_qcd_0_high', 44991163.54),
('theory_sig_qcd_1_high', 43941580.52),
('theory_sig_qcd_2_high', 43010396.20),
('theory_sig_qcd_3_high', 43060393.68),
('theory_sig_qcd_4_high', 43104010.47),
('theory_sig_qcd_5_high', 42974999.18),
('theory_sig_qcd_6_high', 43069327.90),
('theory_sig_qcd_7_high', 43171852.50),
('theory_sig_qcd_8_high', 43095802.77),
]
hh_sum_of_weights_nominal = 53538670.72
hh_sum_of_weights = [ ('theory_sig_qcd_0_high', 56042603.54),
('theory_sig_qcd_1_high', 54729386.89),
('theory_sig_qcd_2_high', 53568183.13),
('theory_sig_qcd_3_high', 53620581.91),
('theory_sig_qcd_4_high', 53695584.28),
('theory_sig_qcd_5_high', 53540003.79),
('theory_sig_qcd_6_high', 53631218.96),
('theory_sig_qcd_7_high', 53751977.36),
('theory_sig_qcd_8_high', 53671009.16),
]
# loop over regions
for regionName,region in tree.iteritems():
if type(region) == dict and sample in region:
# pull out the appropriate normalzation factors for this channels
if 'llAll' in regionName:
sum_of_weights_nominal = ll_sum_of_weights_nominal
sum_of_weights = ll_sum_of_weights
elif 'ehAll' in regionName or 'mhAll' in regionName:
sum_of_weights_nominal = lh_sum_of_weights_nominal
sum_of_weights = lh_sum_of_weights
elif 'hhAll' in regionName:
sum_of_weights_nominal = hh_sum_of_weights_nominal
sum_of_weights = hh_sum_of_weights
else:
raise ValueError("Cannot determine channel for region {}".format(regionName))
print "{}/{}".format(regionName, sample)
# modify the variations by a correction factor sum_of_weights_nominal / sum_of_weights_variation
for histName, sum_of_weights_var in sum_of_weights:
print " {}".format(histName)
if histName not in region[sample] or region[sample][histName].Integral() == 0:
print " not available or has 0 intergal"
else:
# create a renormalized variation
histNameNew = histName.replace("_high", "_acceptance_high")
new_var = region[sample][histName].Clone(histNameNew)
new_var.Scale( sum_of_weights_nominal / sum_of_weights_var )
# store rescaled histograms in the tree:
region[sample][new_var.GetName()] = new_var
# debug printout
print " original overall variation {}".format(region[sample][histName].Integral() / region[sample]['nominal'].Integral() )
print " rescaled overall variation {} (was rescaled by {})".format(region[sample][histNameNew].Integral() / region[sample]['nominal'].Integral(), sum_of_weights_nominal / sum_of_weights_var )
# save the updated tree
if '.root' in args.wsInputFile:
newFileName = args.wsInputFile.replace('.root','_accGGH9NP.root')
else:
newFileName = args.wsInputFile + '_accGGH9NP'
print "Creating the processed WS input file {} ...".format(newFileName)
fOut = TFile.Open(newFileName, 'recreate')
save(tree, fOut)
print "...done"
except SystemExit as e:
pass
except Exception:
raise
# the end
|
997,978 | f72c29d04287f930ac9c5163771c7d2ff9cbf9b7 | import sys
import os
import shutil
import re
def delete_files(directory, file_type):
regex = "[.]" + file_type
for dir_name, folders, dir_files in os.walk(directory):
for files in dir_files:
if re.search(regex, files):
print("directory: ", dir_name)
print("Folder: ", folders)
print("files: ", files)
os.remove(dir_name + "/" + files)
return None
def delete_folders(directory, gen_folder_name):
for dir_name, folders, dir_files in os.walk(directory, topdown = False):
for folder in folders:
if re.search(gen_folder_name, folder):
folder_to_delete = dir_name + '/' + folder + '/'
print(folder_to_delete)
shutil.rmtree(folder_to_delete)
return None
|
997,979 | 8b8242e8c578d62c4cef8dc410f721064c4d9224 | import ROOT
from ROOT import TGraph,TColor
from array import array
import CMS_lumi
from operator import itemgetter
ROOT.gROOT.SetBatch()
hexcolor=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
intcolor=[TColor.GetColor(i) for i in hexcolor]
x_13,y_th_13,y_13_obs,y_th = array( 'd' ), array( 'd' ), array( 'd' ), array( 'd' )
x_13.append(1.000)
x_13.append(1.250)
x_13.append(1.500)
x_13.append(2.000)
x_13.append(2.500)
x_13.append(3.000)
x_13.append(3.500)
x_13.append(4.000)
x_13.append(4.500)
x_13.append(5.000)
#x_th_13.append(6.000)
y_th_13.append(1.3*20.05)
y_th_13.append(1.3*7.92)
y_th_13.append(1.3*3.519)
y_th_13.append(1.3*0.9528)
y_th_13.append(1.3*0.3136)
y_th_13.append(1.3*0.1289)
y_th_13.append(1.3*0.05452)
y_th_13.append(1.3*0.02807)
y_th_13.append(1.3*0.01603)
y_th_13.append(1.3*0.009095)
#y_th_13.append(0.00960796535494)
y_13_obs.append(0.76)
y_13_obs.append(0.82)
y_13_obs.append(0.28)
y_13_obs.append(0.091)
y_13_obs.append(0.044)
y_13_obs.append(0.021)
y_13_obs.append(0.018)
y_13_obs.append(0.021)
y_13_obs.append(0.019)
y_13_obs.append(0.024)
y_th.append(1.3*1.153)
y_th.append(1.3*1.556*0.1)
y_th.append(1.3*3.585*0.01)
y_th.append(1.3*1.174*0.01)
y_th.append(1.3*4.939*0.001)
masses=[2,3,4,5,6]
x=array('d',[2.000,3.000,4.000,5.000,6.000])
theory = TGraph( 5, x, y_th )
theory.SetLineWidth(3)
theory.SetLineColor(ROOT.kAzure)
theory.SetMarkerColor(ROOT.kAzure)
theory_13 = TGraph( 10, x_13, y_th_13 )
theory_13.SetLineWidth(3)
theory_13.SetLineColor(ROOT.kRed)
theory_13.SetMarkerColor(ROOT.kRed)
obs_13 = TGraph( 10, x_13, y_13_obs )
obs_13.SetLineWidth(3)
obs_13.SetLineColor(ROOT.kRed)
obs_13.SetMarkerColor(ROOT.kRed)
limits=[]
sig3=[]
sig5=[]
def rounding(numero):
return '%s' % float('%.2g' % float(numero))
for lumi in ['300','1000','3000','36']:
#for lumi in ['36','new_36','double_36']:
c=ROOT.TCanvas('unodlimit_'+lumi,'',1200,1000)
c.SetLogy()
margine=0.15
c.SetRightMargin(0.10)
c.SetLeftMargin(margine)
c.SetTopMargin(0.10)
c.SetBottomMargin(margine)
theta_exp_result = open('plots/theta_'+lumi+'.txt','r')
theta_exp_lines=theta_exp_result.readlines()
lines_exp=[filter(None, i.split(' ')) for i in theta_exp_lines[1:] ]
y_exp=array('d',[float(lines_exp[i][1]) for i in range(len(lines_exp))])
y_err2down=array('d',[float(lines_exp[i][1])-float(lines_exp[i][2]) for i in range(len(lines_exp))])
y_err1down=array('d',[float(lines_exp[i][1])-float(lines_exp[i][4]) for i in range(len(lines_exp))])
y_err1up=array('d',[float(lines_exp[i][5])-float(lines_exp[i][1]) for i in range(len(lines_exp))])
y_err2up=array('d',[float(lines_exp[i][3])-float(lines_exp[i][1]) for i in range(len(lines_exp))])
zeros=array('d',[0]*len(lines_exp))
print lumi
for i in range(len(lines_exp)):
print masses[i],'&',rounding(lines_exp[i][2]),'&',rounding(lines_exp[i][4]),'&',rounding(lines_exp[i][1]),'&',rounding(lines_exp[i][5]),'&',rounding(lines_exp[i][3]),'\\\\'
exp1sigma=ROOT.TGraphAsymmErrors(5,x,y_exp,zeros,zeros,y_err1down,y_err1up)
exp2sigma=ROOT.TGraphAsymmErrors(5,x,y_exp,zeros,zeros,y_err2down,y_err2up)
explim=TGraph(5,x,y_exp)
explim.SetLineWidth(2)
explim.SetLineStyle(2)
explim.SetTitle('')
limits.append(explim.Clone())
exp2sigma.SetTitle('')
exp1sigma.SetTitle('')
exp1sigma.SetFillColor(ROOT.kGreen+1)
exp2sigma.SetFillColor(ROOT.kOrange)
exp2sigma.SetMaximum(10000)
exp2sigma.SetMinimum(0.0007)
exp2sigma.Draw('a3lp')
exp2sigma.GetXaxis().SetTitle("g_{KK} mass [TeV]")
exp2sigma.GetXaxis().SetRangeUser(1500,6500)
exp2sigma.GetYaxis().SetTitle("Upper cross section limit [pb]")
sizefactor=1.5
exp2sigma.GetXaxis().SetTitleSize(sizefactor*exp2sigma.GetXaxis().GetTitleSize())
exp2sigma.GetYaxis().SetTitleSize(sizefactor*exp2sigma.GetYaxis().GetTitleSize())
exp2sigma.GetXaxis().SetLabelSize(sizefactor*exp2sigma.GetXaxis().GetLabelSize())
exp2sigma.GetYaxis().SetLabelSize(sizefactor*exp2sigma.GetYaxis().GetLabelSize())
#exp2sigma.GetYaxis().SetMoreLogLabels(1)
offset=1.2
exp2sigma.GetXaxis().SetTitleOffset(offset*exp2sigma.GetXaxis().GetTitleOffset())
exp2sigma.GetYaxis().SetTitleOffset(offset*exp2sigma.GetYaxis().GetTitleOffset())
exp1sigma.Draw('3')
explim.Draw('lp')
theory.Draw('l')
legend=ROOT.TLegend(0.335,0.55,0.9,0.9)
legend.SetTextSize(0.030)
legend.SetBorderSize(0)
legend.SetTextFont(42)
legend.SetLineColor(1)
legend.SetLineStyle(1)
legend.SetLineWidth(1)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetHeader('g_{KK}#rightarrow t#bar{t}')
legend.AddEntry(explim,'Expected','l')
legend.AddEntry(exp1sigma,'#pm 1 std. deviation','f')
legend.AddEntry(exp2sigma,'#pm 2 std. deviation','f')
legend.AddEntry(theory,"g_{KK}#rightarrow t#bar{t}",'l')
legend.Draw()
if '3000' in lumi:
CMS_lumi.CMS_lumi(c, 3, 11)
elif '1000' in lumi:
CMS_lumi.CMS_lumi(c, 2, 11)
elif '300' in lumi:
CMS_lumi.CMS_lumi(c, 1, 11)
elif '36' in lumi:
CMS_lumi.CMS_lumi(c, 0, 11)
c.SaveAs('pdf/unodlimit_'+lumi+'.pdf')
c2=ROOT.TCanvas('sigma_'+lumi,'',1200,1000)
c2.SetLogy()
c2.SetRightMargin(0.10)
c2.SetLeftMargin(margine)
c2.SetTopMargin(0.10)
c2.SetBottomMargin(margine)
sigma3_file = open('plots/theta_'+lumi+'_3sigmaSignif.txt','r')
sigma5_file = open('plots/theta_'+lumi+'_5sigmaSignif.txt','r')
sigma3_lines=sigma3_file.readlines()
sigma5_lines=sigma5_file.readlines()
sigma3_list=[[float(j) for j in filter(None, i.split(' '))] for i in sigma3_lines[1:] ]
sigma5_list=[[float(j) for j in filter(None, i.split(' '))] for i in sigma5_lines[1:] ]
sigma3_list=sorted(sigma3_list, key=itemgetter(0))
sigma5_list=sorted(sigma5_list, key=itemgetter(0))
y_sigma3=array('d',[sigma3_list[i][1] for i in range(len(sigma3_list))])
y_sigma5=array('d',[sigma5_list[i][1] for i in range(len(sigma5_list))])
sigma3=TGraph(5,x,y_sigma3)
sigma5=TGraph(5,x,y_sigma5)
for i in range(len(sigma3_list)):
print masses[i],'&',rounding(sigma3_list[i][1]),'&',rounding(sigma5_list[i][1]),'\\\\'
sigma3.SetLineWidth(3)
#sigma3.SetLineStyle(2)
sigma3.SetTitle('')
sigma3.SetLineColor(ROOT.kGreen+1)
sigma5.SetLineWidth(3)
#sigma5.SetLineStyle(2)
sigma5.SetTitle('')
sigma5.SetLineColor(ROOT.kRed+1)
sigma3.SetMaximum(10000)
sigma3.SetMinimum(0.0007)
sig3.append(sigma3.Clone())
sig5.append(sigma5.Clone())
sigma3.Draw('al')
sigma3.GetXaxis().SetTitle("g_{KK} mass [TeV]")
sigma3.GetXaxis().SetRangeUser(1500,6500)
sigma3.GetYaxis().SetTitle("Cross section [pb]")
sigma3.GetXaxis().SetTitleSize(sizefactor*sigma3.GetXaxis().GetTitleSize())
sigma3.GetYaxis().SetTitleSize(sizefactor*sigma3.GetYaxis().GetTitleSize())
sigma3.GetXaxis().SetLabelSize(sizefactor*sigma3.GetXaxis().GetLabelSize())
sigma3.GetYaxis().SetLabelSize(sizefactor*sigma3.GetYaxis().GetLabelSize())
sigma3.GetXaxis().SetTitleOffset(offset*sigma3.GetXaxis().GetTitleOffset())
sigma3.GetYaxis().SetTitleOffset(offset*sigma3.GetYaxis().GetTitleOffset())
sigma5.Draw('l')
theory.Draw('l')
legend2=ROOT.TLegend(0.335,0.55,0.9,0.9)
legend2.SetTextSize(0.030)
legend2.SetBorderSize(0)
legend2.SetTextFont(42)
legend2.SetLineColor(1)
legend2.SetLineStyle(1)
legend2.SetLineWidth(1)
legend2.SetFillColor(0)
legend2.SetFillStyle(0)
legend2.SetHeader('g_{KK}#rightarrow t#bar{t}')
legend2.AddEntry(sigma3,'3#sigma significance','l')
legend2.AddEntry(sigma5,'5#sigma significance','l')
legend2.AddEntry(theory,"g_{KK}#rightarrow t#bar{t}",'l')
legend2.Draw()
if '3000' in lumi:
CMS_lumi.CMS_lumi(c2, 3, 11)
elif '1000' in lumi:
CMS_lumi.CMS_lumi(c2, 2, 11)
elif '300' in lumi:
CMS_lumi.CMS_lumi(c2, 1, 11)
elif '36' in lumi:
CMS_lumi.CMS_lumi(c2, 0, 11)
c2.SaveAs('pdf/sigma_'+lumi+'.pdf')
y_comp = ["Upper cross section limit [pb]",'3#sigma significance','5#sigma significance']
legends_comp = ["300 fb^{-1} (14 TeV)","1000 fb^{-1} (14 TeV)","3000 fb^{-1} (14 TeV)","36 fb^{-1} (14 TeV)","g_{KK}#rightarrow t#bar{t} (14 TeV)","36 fb^{-1} (13 TeV) 0l+1l+2l","g_{KK}#rightarrow t#bar{t} (13 TeV)"]
#legends_comp = ["36 fb^{-1} (14 TeV)","36 fb^{-1} (14 TeV) no systematics (only stat)","36 fb^{-1} (14 TeV) double systematics","g_{KK}#rightarrow t#bar{t} (14 TeV)","36 fb^{-1} (13 TeV) 0l+1l+2l","g_{KK}#rightarrow t#bar{t} (13 TeV)"]
to_compare=[limits,sig3,sig5]
#to_compare=[limits]
comp_names=['limits','sig3','sig5']
for comparison in range(len(to_compare)):
c3=ROOT.TCanvas(comp_names[comparison]+'_comp','',1200,1000)
c3.SetLogy()
c3.SetRightMargin(0.10)
c3.SetLeftMargin(margine)
c3.SetTopMargin(0.10)
c3.SetBottomMargin(margine)
legend3=ROOT.TLegend(0.335,0.55,0.9,0.9)
legend3.SetTextSize(0.030)
legend3.SetBorderSize(0)
legend3.SetTextFont(42)
legend3.SetLineColor(1)
legend3.SetLineStyle(1)
legend3.SetLineWidth(1)
legend3.SetFillColor(0)
legend3.SetFillStyle(0)
legend3.SetHeader('g_{KK}#rightarrow t#bar{t}')
for i in range(len(to_compare[comparison])):
to_compare[comparison][i].SetLineColor(intcolor[i])
to_compare[comparison][i].SetLineWidth(3)
to_compare[comparison][i].SetLineStyle(0)
if i==0:
to_compare[comparison][i].Draw('al')
to_compare[comparison][i].GetXaxis().SetTitle("g_{KK} mass [TeV]")
to_compare[comparison][i].GetXaxis().SetRangeUser(1500,6500)
to_compare[comparison][i].GetYaxis().SetTitle(y_comp[comparison])
to_compare[comparison][i].SetMaximum(10000)
to_compare[comparison][i].SetMinimum(0.0007)
legend3.AddEntry(to_compare[comparison][i],legends_comp[i],'l')
else:
to_compare[comparison][i].Draw('l')
legend3.AddEntry(to_compare[comparison][i],legends_comp[i],'l')
theory.SetLineColor(ROOT.kBlack)
theory.SetLineWidth(3)
theory.SetLineStyle(0)
theory.Draw('l')
legend3.AddEntry(theory,legends_comp[-3],'l')
if comparison==0:
obs_13.SetLineColor(ROOT.kRed)
obs_13.SetLineWidth(3)
obs_13.SetLineStyle(2)
obs_13.Draw('l')
legend3.AddEntry(obs_13,legends_comp[-2],'l')
theory_13.SetLineColor(ROOT.kBlack)
theory_13.SetLineWidth(3)
theory_13.SetLineStyle(2)
theory_13.Draw('l')
legend3.AddEntry(theory_13,legends_comp[-1],'l')
legend3.Draw()
c3.SaveAs('pdf/'+c3.GetName()+'.pdf')
|
997,980 | 65859cb79891dc1caf4dd0ac428569c4d94300d1 | """Tool used to generate a signal according to the model chosen.
Also extracts measurements and parameters for the DCS-AMP framework to run
with.
"""
from typing import List, Tuple
import numpy
class SignalGen:
def __init__(self, **params):
"""Initialize a set of signal parameters for later generation."""
# Dimension of the true signal x
self.N = params.get('N', 1024)
# Dimension of the measurement vector y
self.M = params.get('M', 256)
# Number of timesteps
self.T = params.get('T', 4)
# Type of the random measurement matrix to generate
# (1) : normalized Gaussian matrix
self.A_type = params.get('A_type', 1)
# Active support probability
self.lambda_ = params.get('lambda_', 0.08) # high sparsity default
# Amplitude mean
self.zeta = params.get('zeta', 0)
# Amplitude variance
self.sigma2 = params.get('sigma2', 1)
# Amplitude innovation rate
self.alpha = params.get('alpha', 0.10)
# Active-to-inactive transition probability
self.p01 = params.get('p01', 0.10)
# Desired signal-to-noise ratio, in dB
self.desired_SNR = params.get('desired_SNR', 25)
@property
def rho(self):
return (2 - self.alpha) * self.sigma2 / self.alpha
@property
def p10(self):
return self.lambda_ * self.p01 / (1 - self.lambda_)
def generate_signal(self) -> Tuple[numpy.ndarray]:
s = numpy.zeros((self.N, self.T))
support = [None for _ in range(self.T)]
n_active = [None for _ in range(self.T)]
# Generate initial support
n_active[0] = numpy.random.binomial(self.N, self.lambda_)
support[0] = numpy.random.choice(range(self.N), size=n_active[0])
s[support[0], 0] = 1
# Evolve support over time
for t in range(1, self.T):
draws = numpy.random.random(self.N)
active_mask, inactive_mask = s[:, t-1] == 0, s[:, t-1] == 1
deactivated = draws[active_mask] > self.p01
activated = draws[inactive_mask] < self.p10
s[active_mask, t] = 1 - deactivated.astype(int)
s[inactive_mask, t] = activated.astype(int)
support[t] = s[t] == 1
n_active[t] = len(support[t])
# Generate amplitude process (complex-valued)
theta = numpy.zeros((self.N, self.T), dtype=numpy.complex)
theta[:, 0] = self.zeta * numpy.ones(self.N)
theta[:, 0] += numpy.sqrt(self.sigma2 / 2) * (
numpy.random.randn(self.N, 2).dot([1, 1j])
)
for t in range(1, self.T):
noise = numpy.random.randn(self.N, 2).dot([1, 1j])
theta[:, t] = (
(1 - self.alpha) * (theta[:, t - 1] - self.zeta) +
self.alpha * numpy.sqrt(self.rho / 2) * noise + self.zeta
)
# True signal
x = theta * s
return x, theta, s
def generate_measurements(
self, x: numpy.ndarray
) -> Tuple[List[numpy.ndarray]]:
"""Generate measurement matrices and vectors from a given signal."""
# Generate A matrices
signal_power = 0
A_list = []
for t in range(self.T):
if self.A_type == 1:
# IID Gausian with unit-norm colums
A = (
numpy.random.randn(self.M, self.N) +
1j * numpy.random.randn(self.M, self.N)
) / numpy.sqrt(2 * self.M)
for n in range(self.N):
A[:, n] /= numpy.linalg.norm(A[:, n])
else:
raise ValueError("Invalid A_type: {}".format(self.A_type))
A_list.append(A)
signal_power += numpy.linalg.norm(A.dot(x[:, t])) ** 2
# Extract noise variance for desired SNR
sig2e = signal_power / (self.M * self.T) * 10 ** (-self.desired_SNR/10)
# Generate noisy measurements
y_list = []
for t in range(self.T):
e = numpy.sqrt(sig2e/2) * (
numpy.random.randn(self.M, 2).dot([1, 1j]))
y_list.append(
A[t].dot(x[:, t]) + e
)
return y_list, A_list, sig2e
|
997,981 | dc009dee1a4284bf16e96572971bda5157446677 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines for those models you wish to give write DB access
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
class Fixture(models.Model):
fixture_id = models.AutoField(db_column='Fixture_id', primary_key=True) # Field name made lowercase.
tournament = models.ForeignKey('Tournament', db_column='Tournament_id') # Field name made lowercase.
number = models.IntegerField(db_column='Number') # Field name made lowercase.
info = models.TextField(db_column='Info', blank=True) # Field name made lowercase.
last_updated = models.DateTimeField(db_column='Last_updated',default=datetime.now) # Field name made lowercase.
Active = models.BooleanField(db_column='Status',default='true') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Fixture'
def __unicode__(self):
return str(self.fixture_id)
class Match(models.Model):
match_id = models.AutoField(db_column='Match_id', primary_key=True) # Field name made lowercase.
fixture = models.ForeignKey(Fixture, db_column='Fixture_id') # Field name made lowercase.
home = models.ForeignKey('Team', db_column='Home_id', related_name='match_home') # Field name made lowercase.
away = models.ForeignKey('Team', db_column='Away_id') # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
info = models.TextField(db_column='Info', blank=True) # Field name made lowercase.
last_updated = models.DateTimeField(db_column='Last_updated',default=datetime.now) # Field name made lowercase.
active = models.BooleanField(db_column='Status', default='true') # Field name made lowercase.
score_home = models.IntegerField(db_column='Score_home') # Field name made lowercase.
score_away = models.IntegerField(db_column='Score_away') # Field name made lowercase.
played = models.BooleanField(db_column='Played', default='false') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Match'
permissions = (
("usario_digitador", "Can change a match if it is a data entry user"),
)
def __unicode__(self):
return str(self.match_id)
class Privilege(models.Model):
privilege_id = models.IntegerField(db_column='Privilege_id', primary_key=True) # Field name made lowercase.
description = models.CharField(db_column='Description', max_length=80) # Field name made lowercase.
class Meta:
managed = False
db_table = 'Privilege'
class Team(models.Model):
team_id = models.AutoField(db_column='Team_id', primary_key=True) # Field name made lowercase.
tournament = models.ForeignKey('Tournament', db_column='Tournament_id') # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=60) # Field name made lowercase.
e_mail = models.CharField(db_column='E-mail', max_length=45, blank=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
info = models.TextField(db_column='Info', blank=True) # Field name made lowercase.
last_updated = models.DateTimeField(db_column='Last_updated',default=datetime.now) # Field name made lowercase.
active = models.BooleanField(db_column='Status',default='true') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Team'
permissions = (
("view_team", "Can view a team"),
)
def __unicode__(self):
return self.name
class Tournament(models.Model):
tournament_id = models.AutoField(db_column='Tournament_id', primary_key=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=50) # Field name made lowercase.
date_start = models.DateTimeField(db_column='Date_start') # Field name made lowercase.
date_end = models.DateTimeField(db_column='Date_end', blank=True, null=True) # Field name made lowercase.
home_and_away = models.BooleanField(db_column='Home_and_away') # Field name made lowercase.
info = models.TextField(db_column='Info', blank=True) # Field name made lowercase.
last_updated = models.DateTimeField(db_column='Last_updated',default=datetime.now) # Field name made lowercase.
active = models.BooleanField(db_column='Status',default='True') # Field name made lowercase.
public = models.BooleanField(db_column='Public',default='True') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Tournament'
permissions = (
("view_tournament", "Can view a tournament"),
)
def __unicode__(self):
return self.name
class AuthGroup(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
id = models.IntegerField(primary_key=True)
group = models.ForeignKey(AuthGroup)
permission = models.ForeignKey('AuthPermission')
class Meta:
managed = False
db_table = 'auth_group_permissions'
class AuthPermission(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=50)
content_type = models.ForeignKey('DjangoContentType')
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
class AuthUser(models.Model):
id = models.IntegerField(primary_key=True)
password = models.CharField(max_length=128)
last_login = models.DateTimeField()
is_superuser = models.IntegerField()
username = models.CharField(unique=True, max_length=30)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=75)
is_staff = models.IntegerField()
is_active = models.IntegerField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
permissions = (
("add_dataentry", "Can add a user for data entry"),
("add_usuario_rep", "Can add a user that represents a team"),
("is_rep", "Can view a tournament even if it is private"),
)
class AuthUserGroups(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey(AuthUser)
group = models.ForeignKey(AuthGroup)
class Meta:
managed = False
db_table = 'auth_user_groups'
class AuthUserUserPermissions(models.Model):
id = models.IntegerField(primary_key=True)
user = models.ForeignKey(AuthUser)
permission = models.ForeignKey(AuthPermission)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
class DjangoAdminLog(models.Model):
id = models.IntegerField(primary_key=True)
action_time = models.DateTimeField()
user = models.ForeignKey(AuthUser)
content_type = models.ForeignKey('DjangoContentType', blank=True, null=True)
object_id = models.TextField(blank=True)
object_repr = models.CharField(max_length=200)
action_flag = models.IntegerField()
change_message = models.TextField()
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
|
997,982 | a9fb5c229e72d086f976c014a516d4de1f15d3a0 | from urllib.request import quote
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.utils.text import slugify
from .utils import get_vid_for_direction
class VideoQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
def featured(self):
return self.filter(featured=True)
def free_preview(self):
return self.filter(free_preview=True)
def embed_code(self):
return self.filter(embed_code__isnull=False).exclude(embed_code__iexact="")
def category(self, slug):
cat = Category.objects.get(slug=slug)
return self.filter(category=cat.id)
class VideoManager(models.Manager):
def get_queryset(self):
return VideoQuerySet(self.model, using=self._db)
def get_featured(self):
return self.get_queryset().featured().active()
def get_by_category(self, slug):
return self.get_queryset().active().embed_code().category(slug=slug)
def all(self):
return self.get_queryset().active().embed_code()
DEFAULT_MESSAGE = "Check this awesome video."
class Video(models.Model):
title = models.CharField(max_length=120)
embed_code = models.CharField(max_length=500, null=True, blank=True)
order = models.PositiveIntegerField(default=1)
share_message = models.TextField(max_length=140, default=DEFAULT_MESSAGE)
tags = GenericRelation("TaggedItem", null=True, blank=True)
active = models.BooleanField(default=True)
slug = models.SlugField(null=True, blank=True)
featured = models.BooleanField(default=False)
free_preview = models.BooleanField(default=False)
category = models.ForeignKey("Category")
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False, null=True)
updated = models.DateTimeField(auto_now_add=False, auto_now=True, null=True)
objects = VideoManager()
class Meta:
ordering = ['order', 'timestamp']
unique_together = ('slug', 'category')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("categories:video_detail", kwargs={"cat_slug": self.category.slug, "slug": self.slug})
def get_share_link(self):
full_url = "{0}{1}".format(settings.FULL_DOMAIN_NAME, self.get_absolute_url())
return quote("{0} {1}".format(self.share_message, full_url))
def get_next_url(self):
video = get_vid_for_direction(self, "next")
if video:
return video.get_absolute_url()
return None
def get_previous_url(self):
video = get_vid_for_direction(self, "previous")
if video:
return video.get_absolute_url()
return None
def has_preview(self):
if self.free_preview:
return True
return False
def video_post_save_receiver(sender, instance, created, *args, **kwargs):
slug_title = slugify(instance.title)
if created:
slug_exists = Video.objects.filter(slug=slug_title)
if slug_exists:
new_slug = "{0} {1}".format(instance.title, instance.id)
instance.slug = slugify(new_slug)
instance.save()
else:
instance.slug = slug_title
instance.save()
post_save.connect(video_post_save_receiver, sender=Video)
class CategoryQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
def featured(self):
return self.filter(featured=True)
class CategoryManager(models.Manager):
def get_queryset(self):
return CategoryQuerySet(self.model, using=self._db)
def get_featured(self):
return self.get_queryset().featured().active()
def all(self):
return self.get_queryset().active()
class Category(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(null=True, blank=True)
image = models.ImageField(upload_to='images/', null=True, blank=True)
tags = GenericRelation("TaggedItem", null=True, blank=True)
slug = models.SlugField(unique=True, default="abc")
active = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
objects = CategoryManager()
class Meta:
ordering = ['title', 'timestamp']
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("categories:detail", kwargs={"slug": self.slug})
def get_image_url(self):
return "{0}{1}".format(settings.MEDIA_URL, self.image)
def category_post_save_receiver(sender, instance, created, *args, **kwargs):
slug_title = slugify(instance.title)
if created:
slug_exists = Category.objects.filter(slug=slug_title)
if slug_exists:
new_slug = "{0} {1}".format(instance.title, instance.id)
instance.slug = slugify(new_slug)
instance.save()
else:
instance.slug = slug_title
instance.save()
post_save.connect(category_post_save_receiver, sender=Category)
TAG_CHOICES = (
("python", "python"),
("django", "django"),
("css", "css"),
("bootstrap", "bootstrap"),
)
class TaggedItem(models.Model):
tag = models.SlugField(choices=TAG_CHOICES)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.tag
|
997,983 | 7b1e2894c30e9bc9b7d74af96b404a563eb00989 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_sublayersdialog.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SublayersDialog(object):
def setupUi(self, SublayersDialog):
SublayersDialog.setObjectName("SublayersDialog")
SublayersDialog.resize(400, 300)
self.gridLayout = QtWidgets.QGridLayout(SublayersDialog)
self.gridLayout.setObjectName("gridLayout")
self.mSublayersTreeWidget = QtWidgets.QTreeWidget(SublayersDialog)
self.mSublayersTreeWidget.setRootIsDecorated(False)
self.mSublayersTreeWidget.setItemsExpandable(False)
self.mSublayersTreeWidget.setObjectName("mSublayersTreeWidget")
self.mSublayersTreeWidget.headerItem().setText(0, "1")
self.gridLayout.addWidget(self.mSublayersTreeWidget, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(SublayersDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(SublayersDialog)
self.buttonBox.accepted.connect(SublayersDialog.accept)
self.buttonBox.rejected.connect(SublayersDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SublayersDialog)
def retranslateUi(self, SublayersDialog):
_translate = QtCore.QCoreApplication.translate
SublayersDialog.setWindowTitle(_translate("SublayersDialog", "Interlis sublayers"))
|
997,984 | aa62987f1ef2b3e511fc5e64673eb9d63017183e | import numpy as np
from config_loader import config
class Block:
def __init__(self, spawn_point):
self.FIELDWIDTH = config['FIELD-WIDTH']
self.FIELDHEIGHT = config['FIELD-HEIGHT']
p_shapes = self._possible_shapes()
self.shape_id = np.random.randint(0, len(p_shapes))
self.shape = p_shapes[self.shape_id]
#self.shape = p_shapes[-1]
self.anchor = spawn_point
self.WIDTH = np.max(self.shape, axis=0)[1] + 1
self.HEIGHT = np.max(self.shape, axis=0)[0] + 1
self.COLOR = self._get_color()
def get_anchor(self):
return self.anchor
def get_shape(self):
return self.shape
def set_anchor(self, x, y):
self.anchor = [x, y]
def _possible_shapes(self):
shapes = [
[[0, 0], [0, 1], [0, 2], [0, 3]],
[[0, 0], [0, 1], [1, 1], [1, 2]],
[[0, 0], [1, 0], [1, 1], [2, 1]],
[[0, 0], [0, 1], [1, 0], [1, 1]],
[[0, 0], [0, 1], [0, 2], [1, 1]],
[[0, 0], [0, 1], [0, 2], [1, 2]],
[[0, 0], [0, 1], [0, 2], [1, 0]],
]
return shapes
def get_coords(self):
coords = [[self.anchor[0] + x, self.anchor[1] + y] for [x, y] in self.shape]
return coords
def remove_coord(self, coord):
x = self.anchor[0] - coord[0]
y = self.anchor[1] - coord[1]
self.shape.remove([x, y])
def _get_color(self):
pos_colors = ["red", "yellow", "green", "white", "grey", "magenta", "cyan"]
return pos_colors[np.random.randint(0, len(pos_colors))]
def set_shape(self, shape):
self.shape = []
for [x, y] in shape:
self.shape.append([x, y])
self.WIDTH = np.max(self.shape, axis=0)[1] + 1
self.HEIGHT = np.max(self.shape, axis=0)[0] + 1
def rotate(self):
original = []
for i in range(self.HEIGHT):
row = []
for j in range(self.WIDTH):
if [i, j] in self.shape:
row.append(1)
else:
row.append(0)
original.append(row)
rotated = list(zip(*original[::-1]))
new_shape = []
for x, row in enumerate(rotated):
for y, value in enumerate(row):
if value == 1:
new_shape.append([x, y])
return new_shape
|
997,985 | 4978e75295e4f0f545b6565ca73863ef80d259fe | #!/usr/bin/env python
import re
import sys
import time
import requests
import logging
import hookio.keys
from six.moves.html_parser import HTMLParser
from six.moves.html_entities import name2codepoint
from six import unichr
log = logging.getLogger(__name__)
max_retries = 3
class RolesParser(HTMLParser):
roles = newroles = states = savedata = None
def handle_starttag(self, tag, attrs):
if tag.lower() != 'div':
return
if attrs and any(n.lower() == 'class' and 'availableRoles' in v.split() for n, v in attrs):
log.debug('Start processing roles')
self.newroles = []
self.states = ['top']
return
if self.newroles is not None:
log.debug('Pushing div for role %d', len(self.newroles))
self.states.append('role')
self.save_bgn()
def handle_endtag(self, tag):
if tag.lower() != 'div':
return
if self.states is None:
return
tp = self.states.pop()
assert tp in ('role', 'top')
if tp == 'role':
role = self.save_end()
log.debug('Found role %r', role)
self.newroles.append(role)
return
if tp == 'top':
assert not self.states
self.roles = self.newroles
log.debug('Processed roles: %r', self.roles)
self.newroles = None
self.states = None
def handle_data(self, data):
if self.savedata is None:
return
self.savedata = self.savedata + data
def handle_entityref(self, name):
if self.savedata is None:
return
c = unichr(name2codepoint[name])
log.debug("Named ent: &%s; = %r", name, c)
self.handle_data(c)
def handle_charref(self, name):
if self.savedata is None:
return
if name[:1].lower() in ('x', 'u'):
c = unichr(int(name[1:], 16))
else:
c = unichr(int(name))
log.debug("Num ent: &%s; = %r", name, c)
self.handle_data(c)
def save_bgn(self):
"""Begins saving character data in a buffer.
Retrieve the stored data via the save_end() method. Use of the
save_bgn() / save_end() pair may not be nested.
"""
self.savedata = ''
def save_end(self):
"""Ends buffering character data and returns all data saved since
the preceding call to the save_bgn() method.
"""
data = self.savedata
self.savedata = None
return data
def test_roles(cache):
roles = cache.get('hook.io/roles', None)
if roles is not None:
t, roles = roles
if time.time() - t > 86400:
roles = None
if roles is None:
for i in range(max_retries):
try:
roles = request_roles()
except Exception:
if i == max_retries-1:
raise
continue
cache.set('hook.io/roles', [time.time(), roles])
break
assert roles == sorted(hookio.keys.roles)
def request_roles():
r = requests.get('https://hook.io/roles')
r.raise_for_status()
text = r.text
if False:
p = RolesParser()
p.feed(text)
p.close()
roles = sorted(p.roles)
else:
roles = sorted(set(re.findall('>\\s*(\\w+(?:::\\w+)+)\\s*<', text)))
assert roles
return roles
if __name__=='__main__':
roles = request_roles()
print('\n'.join(roles))
sys.stderr.write('%s\n' % (hookio.keys.roles == roles,))
|
997,986 | 1eabe6a9b2439b90e81235744a5f50faf8d51be0 | import functools
import logging
import os
import pandas as pd
import time
from dotenv import load_dotenv
from luigi import Target
from pathlib import Path
from pymongo import MongoClient, ASCENDING, DESCENDING
from utils.constants import ENV_PATH, TRENDS_DIR
load_dotenv(dotenv_path=ENV_PATH)
MONGO_SERVER = os.environ['MONGO_SERVER']
MONGO_PORT = int(os.environ['MONGO_PORT'])
MONGO_DATABASE = os.environ['MONGO_DATABASE']
# Mongo requires a server port of type int.
# quick utility functions
def connect_db(server=MONGO_SERVER, port=MONGO_PORT):
client = MongoClient(server, port)
return client
def get_database(client, db=MONGO_DATABASE):
return client[db]
def get_collection(client, col, db=MONGO_DATABASE):
return client[db][col]
def post_document(data, col):
assert isinstance(data, dict)
result = col.insert_one(data)
return result
def retrieve_all_data(col, limit=3500):
data = [x for x in col.find().sort('datestamp', DESCENDING).limit(limit)]
return data
def find_by_id(col, idx):
return col.find_one({'_id': idx})
def find_by_luigi_at(col, dt):
return col.find({'scope.luigi_at': dt})
def find_by_luigi_loc(col, loc):
return col.find({'scope.luigi_loc': loc})
|
997,987 | 0af39d5277243cb274a959f2ccd3fec7ae374600 | from nose.tools import assert_equal
from pbfalcon import gen_config
import pprint
def test_get_falcon_overrides():
text = 'three=four; one = two;'
overrides = gen_config.get_falcon_overrides(text, 'foo')
got = pprint.pformat(overrides)
expected = "{'one': 'two', 'three': 'four'}"
assert_equal(expected, got)
# It should work even if the text is not semicolon delimited,
# just in case.
text = '\n[General]\nthree=four\none = two\n'
overrides = gen_config.get_falcon_overrides(text, 'foo')
got = pprint.pformat(overrides)
expected = "{'one': 'two', 'three': 'four'}"
assert_equal(expected, got)
|
997,988 | 39b5a9b3786d9e263dccb10622850ad6165b398f | import os
import argparse
import utils.drive_utils as drive_utils
def main(params) -> None:
service = drive_utils.build_service("ro")
if params.init:
return
if params.all:
drive_utils.download_all(service, params.out_dir, verbose=not params.silent)
return
print(str(params.method))
drive_utils.download_checkpoint(service, str(params.dataset), params.noise_std,
str(params.method), params.out_dir, verbose=not params.silent)
if __name__ == '__main__':
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser = drive_utils.add_download_args(parent=parent_parser)
args = parent_parser.parse_args()
if args.method == drive_utils.PossibleMethods.lag and args.dataset != drive_utils.PossibleDatasets.ffhq:
parent_parser.error("the LAG method is supported for the FFHQ dataset only.\nAborting")
if args.init and (os.path.exists('token.json') or os.path.exists('token.pickle')):
parent_parser.error("token.{pickle, json} detected. No need for additional init.\nAborting")
main(args)
|
997,989 | bce41a3d9031305c227db03b0a6be07943b9fa3e | #!/usr/bin/python
import os
print ("Hello Fuck")
os.system('ipconfig')
|
997,990 | 5732c007daae78bb0b6e3103345ccd3c60331372 | from flask import Flask, render_template, json, request, redirect
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from flask import session
app = Flask(__name__)
mysql = MySQL()
#MySql configurations
app.config['MYSQL_DATABASE_USER']='root'
app.config['MYSQL_DATABASE_PASSWORD']='fafhrd123'
app.config['MYSQL_DATABASE_DB']='Iedapp1'
app.config['MYSQL_DATABASE_HOST']='localhost'
mysql.init_app(app)
@app.route("/")
def main():
return render_template('index.html')
@app.route('/showSignUp')
def showSignup():
return render_template('signup.html')
@app.route('/signUp', methods = ['POST'])
def signUp():
# read the posted values from the UI
try:
__userid = request.form['inputUserid']
__name = request.form['inputName']
__email = request.form['inputEmail']
__password = request.form['inputPassword']
if __name and __email and __password:
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(__password)
#print(_hashed_password)
#cursor.callproc('sp_createUser',(__name,__email,_hashed_password))
cursor.callproc('sp_createUser',(__userid,__name,__email,__password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User created successfully !'})
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['inputEmail']
_password = request.form['inputPassword']
# connect to mysql
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if (str(data[0][3])==_password):
session['user'] = data[0][0]
return redirect('/userHome')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
except Exception as e:
print(str(e))
return render_template('error.html',error = str(e))
finally:
cursor.close()
con.close()
@app.route('/showSignin')
def showSignin():
return render_template('signin.html')
@app.route('/userHome')
def userHome():
if session.get('user'):
return render_template('userHome.html')
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/logout')
def logout():
session.pop('user',None)
return redirect('/')
@app.route('/getWish')
def getWish():
try:
if session.get('user'):
_user = session.get('user')
print("HOOOOLAAAAAAAAAAAA")
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_GetWishByUser',(_user,))
wishes = cursor.fetchall()
print(wishes)
wishes_dict = []
for wish in wishes:
wish_dict = {
'Id': wish[0],
'nam': wish[1],
'usernam': wish[2],
'passwor': wish[3],
'money':wish[4],
'reward':wish[5]}
wishes_dict.append(wish_dict)
print(json.dumps(wishes_dict))
return json.dumps(wishes_dict)
else:
return render_template('error.html', error = 'Unauthorized Access')
except Exception as e:
return render_template('error.html', error = str(e))
@app.route('/Showadd')
def showaddup():
return render_template('sysadd.html')
@app.route('/add', methods = ['POST'])
def addUp():
# read the posted values from the UI
print("HOOOOLAAAAAAAAAAAA")
try:
__userid = request.form['inputUserid']
__password = request.form['inputPassword']
__monsub = request.form['submoney']
__reward = request.form['addpoints']
if __userid and __password:
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(__password)
#print(_hashed_password)
#cursor.callproc('sp_createUser',(__name,__email,_hashed_password))
cursor.callproc('sp_add',(__monsub,__reward,__userid))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'Transaction Succesfull!'})
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
@app.route('/add2', methods = ['POST'])
def addUp2():
# read the posted values from the UI
print("GGGGGGGGGGGGGGGGGGGGGAAAAAAAAAAAA")
try:
_username = request.form['inputEmail']
__monsub = request.form['submoney']
print(__monsub)
if _username:
_user = session.get('user')
print("Hello")
conn = mysql.connect()
cursor = conn.cursor()
#_hashed_password = generate_password_hash(__password)
#print(_hashed_password)
#cursor.callproc('sp_createUser',(__name,__email,_hashed_password))
cursor.callproc('sp_addtoanother',(_user,__monsub,_username))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'Transfer Succesfull!'})
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
if __name__ == '__main__':
app.secret_key = 'why would I tell you my secret key?'
app.run(host='0.0.0.0', threaded=True, debug=True, port=5000) |
997,991 | bb5c574e9480773b787bbbd7a75416fa6cc1ba24 | import numpy as np
np.random.seed(2020)
from multiprocessing import Pool
from tqdm import tqdm
import time
from controller.Controller import *
from settings import *
from controller.ES_trainer import ES_abstract
from scipy import stats
class NES_trainer(ES_abstract):
def __init__(self, loss_function, pop_size, learn_rate, args, theta_init=None):
"""
Natural Evolution Strategy algorithm
:param loss_function (function) rollout of the Neurosmash environment, function to be optimized
:param pop_size (float) population size
:param p_theta (function) probability density function of theta
:param learn_rate (float) learning rate
:param args
"""
super().__init__(loss_function, pop_size, args)
if theta_init is None:
self.weights = np.random.normal(0, 1, self.dim) # weights = mu
self.cov_matrix = np.eye(self.dim)
sigma_values = np.triu(self.cov_matrix)[np.triu_indices(self.dim)]
self.theta = np.append(self.weights, sigma_values)
else:
print(theta_init)
self.theta = theta_init
self.weights = self.get_mu(theta_init)
self.cov_matrix = self.get_Sigma(theta_init)
self.grad = self.grad_log_mvn_pdf
self.learn_rate = learn_rate
self.p_theta = lambda theta: stats.multivariate_normal(mean=self.get_mu(theta).flatten(), cov=self.get_Sigma(theta)@self.get_Sigma(theta).T)
def train(self, n_iter, parallel=False, verbose=True):
'''
Natural Evolution Strategy algorithm
:param n_iter (int) number of iterations
:param parallel (bool) If true, run the Neurosmash environment in parallel
:return trained controller (Controller) trained controller object with trained weights
:return fitness (matrix) fitness score for each member for each iteration
'''
pop_size, learn_rate = self.pop_size, self.learn_rate
p_theta, theta = self.p_theta, self.theta
grad = self.grad
reward = np.zeros((n_iter,4)) # score of i) mean weight ii) best performer iii) worst performer iv) sampled population average
print('using NES trainer for controller')
tic = time.perf_counter()
for i in tqdm(range(n_iter)):
# Reset gradient and Fisher matrix
grad_J = np.zeros((self.dim+np.sum(np.arange(self.dim+1)), 1))
F = np.zeros((self.dim+np.sum(np.arange(self.dim+1)), self.dim+np.sum(np.arange(self.dim+1))))
# Ensure covariance matrix is positive-semidefinite
# while not self.is_pos_def(self.get_Sigma(theta)):
# Sigma = self.get_Sigma(theta)
# Sigma += np.eye(self.dim) * 0.1
# theta = self.get_theta_from_cov(Sigma, theta)
# print(f'{i} Matrix not positive semi-definite')
# Plan B: Sample from A, where A=Sigma @ Sigma.T, therefore
# Specify current pi( | theta) and current gradient
cur_p_theta = p_theta(theta)
cur_grad = grad(theta)
# Gradient descent
fitness = np.zeros(pop_size)
for j in tqdm(range(pop_size)):
x = np.reshape(cur_p_theta.rvs(), (self.dim, 1))
controller = Controller(self.args)
controller.set_weight_array(x)
fitness[j] = self.loss_func(controller, r_rounds=2)
log_der = np.reshape(cur_grad(x), (len(theta), 1))
grad_J += fitness[j] * log_der / pop_size
F += log_der @ log_der.T / pop_size
theta += learn_rate * (np.linalg.inv(F) @ grad_J).flatten()
reward[i] = self.get_reward_stats(theta[:self.dim], fitness)
print(f'Mean reward: {reward[i,0]}')
print(f'Best reward: {reward[i,1]}')
print(f'Worst reward: {reward[i,2]}')
print(f'Sampled population reward: {reward[i,3]}')
# Save parameters
np.save(self.args.path_to_ctrl_params+f"NES{i}", theta)
toc = time.perf_counter()
print(f'Duration of training the controller: {toc - tic:0.4f} seconds')
self.theta = theta
self.weights = self.get_mu(theta)
self.cov_matrix = self.get_Sigma(theta)
trained_controller = Controller(self.args)
trained_controller.set_weight_array(self.weights)
return trained_controller, reward
def grad_log_mvn_pdf(self, theta):
'''
theta is of the form: [mu1, mu2, sigma1 ^ 2, rho * sigma1 * sigma2, sigma2 ^ 2]
'''
Sigma_inv = np.linalg.inv(self.get_Sigma(theta))
mu = self.get_mu(theta)
mu_grad = lambda x: Sigma_inv @ (x - mu)
Sigma_grad = lambda x: 0.5 * (Sigma_inv @ (x - mu) @ (x - mu).T @ Sigma_inv - Sigma_inv)
idx_matrix = np.arange(1, (self.dim * self.dim) + 1).reshape((self.dim, self.dim))
idx = np.triu(idx_matrix).flatten()
idx = idx[idx > 0] - 1
return lambda x: np.concatenate([mu_grad(x).flatten(), Sigma_grad(x).flatten()[idx]])
def get_mu(self, theta):
return np.reshape(theta[:self.dim], (self.dim, 1))
def get_Sigma(self, theta):
Sigma_values = theta[self.dim:]
Sigma = np.zeros((self.dim,self.dim))
ind_u = np.triu_indices(self.dim)
Sigma[ind_u] = Sigma_values
Sigma = Sigma + Sigma.transpose() - np.diag(np.diag(Sigma))
return Sigma
def get_theta_from_cov(self, Sigma, theta):
"""
Takes a full symetrical covariance matrix and returns it in array without symmetrical weights
:param Sigma: covariance matrix
:return: theta: array of all trainable parameters
"""
sigma_values = np.triu(Sigma)
ind_u = np.triu_indices(self.dim)
sigma_values= sigma_values[ind_u]
return np.append(self.get_mu(theta), sigma_values)
def is_pos_def(self, x):
return np.all(np.linalg.eigvals(x) > 0)
|
997,992 | b9ca62d26862f952dcf88d5b507485dad74434cc | import tensorflow as tf
import pickle
import numpy as np
import datetime
from Match_LSTM import MatchLSTM
from Rnet import Rnet
import json
import os
import DataUtils
from nltk.tokenize import word_tokenize
from tqdm import *
from sklearn.metrics import accuracy_score
import random
tf.flags.DEFINE_string("mode", "pretrained", "pretrained/tranfer")
# Training hyperparameter config
tf.flags.DEFINE_integer("batch_size", 64, "batch size")
tf.flags.DEFINE_integer("epochs", 160, "epochs")
tf.flags.DEFINE_float("learning_rate", 1e-3, "learning rate")
tf.flags.DEFINE_float("grad_clip", 5.0, "")
# LSTM config
tf.flags.DEFINE_integer("hidden_layer", 300, "")
tf.flags.DEFINE_integer("pad", 610, "")
tf.flags.DEFINE_float("dropout", 0.3, "")
tf.flags.DEFINE_string("Ddim", "2", "")
tf.flags.DEFINE_boolean("bidi", True, "")
tf.flags.DEFINE_string("rnnact", "tanh", "")
tf.flags.DEFINE_string("bidi_mode", "concatenate", "")
tf.flags.DEFINE_boolean("use_cudnn", True, "")
# word vector config
tf.flags.DEFINE_string(
"embedding_path", "glove.6B.300d.txt", "word embedding path")
# Tensorflow config
tf.flags.DEFINE_integer("num_checkpoints", 5,
"Number of checkpoints to store (default: 5)")
tf.flags.DEFINE_string("out_dir", "runs/", "path to save checkpoint")
tf.flags.DEFINE_boolean("allow_soft_placement", True,
"Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False,
"Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
if token == "``" or token=="''":
token = '"'
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans
def load_data_from_file(dsfile):
q, q_l = [], [] # a set of questions
sents, s_l = [], [] # a set of sentences
labels = [] # a set of labels
with open(dsfile) as f:
for l in f:
label = l.strip().split("\t")[2]
qtext = l.strip().split("\t")[0]
stext = l.strip().split("\t")[1]
q_tok = word_tokenize(qtext.lower())
s_tok = word_tokenize(stext.lower())
q.append(q_tok)
q_l.append(min(len(q_tok), FLAGS.pad))
sents.append(s_tok)
s_l.append(min(len(s_tok), FLAGS.pad))
labels.append(int(label))
return q, sents
def make_model_inputs(qi, si, q, sents, y):
inp = {'qi': qi, 'si': si, 'q':q, 'sents':sents, 'y':y}
return inp
def load_set(fname, vocab=None, iseval=False):
examples = load_data_SQUAD(fname)
if not iseval:
if vocab == None:
q, sents = load_data_from_file("SemEval/train.txt")
vocab = DataUtils.Vocabulary(q + sents)
update = []
for e in examples:
update += [e["context_tokens"], e["ques_tokens"]]
vocab.update(update)
else:
update = []
for e in examples:
update += [e["context_tokens"], e["ques_tokens"]]
vocab.update(update)
pad = FLAGS.pad
qis, sis, q, sents, y = [], [], [], [], []
for e in examples:
qi = e["ques_tokens"]
si = e["context_tokens"]
qis.append(qi)
sis.append(si)
q.append(e["ques_tokens"])
sents.append(e["context_tokens"])
y.append(e["y"])
qis = vocab.vectorize(qis, 50)
sis = vocab.vectorize(sis, pad)
inp = make_model_inputs(qis, sis, q, sents, y)
if iseval:
return (inp, y)
else:
return (inp, y, vocab)
def load_data_SQUAD(filename):
examples = []
total = 0
with open(filename, "r") as fh:
source = json.load(fh)
for article in tqdm(source["data"]):
for para in article["paragraphs"]:
context = para["context"].replace(
"''", "' ").replace("``", "' ").lower()
context_tokens = word_tokenize(context)
spans = convert_idx(context, context_tokens)
for qa in para["qas"]:
total += 1
ques = qa["question"].replace(
"''", '" ').replace("``", '" ').lower()
ques_tokens = word_tokenize(ques)
ques_chars = [list(token) for token in ques_tokens]
answer = qa["answers"][0]
answer_text = answer["text"]
answer_start = answer['answer_start']
answer_end = answer_start + len(answer_text)
answer_span = []
for idx, span in enumerate(spans):
if not (answer_end <= span[0] or answer_start >= span[1]):
answer_span.append(idx)
y1, y2 = answer_span[0], answer_span[-1]
y = [y1,y2]
example = {"context_tokens": context_tokens, "ques_tokens": ques_tokens,
"y":y,}
if y2 >= 610:
print(context)
print(y2)
print(len(context))
examples.append(example)
random.shuffle(examples)
print("{} questions in total".format(len(examples)))
return examples
def read_data(trainf, valf):
global vocab, inp_tr, inp_val, inp_test, y_train, y_val, y_test
inp_tr, y_train, vocab = load_set(trainf, iseval=False)
inp_val, y_val = load_set(valf, vocab=vocab, iseval=True)
def train_step(sess, model, data_batch):
q_batch, s_batch, y_batch = data_batch
feed_dict = {
model.queries : q_batch,
#model.queries_length : ql_batch,
model.hypothesis : s_batch,
#model.hypothesis_length : sl_batch,
model.dropout : FLAGS.dropout,
model.y_SQUAD : y_batch
}
_, loss = sess.run([model.train_op_SQUAD, model.loss_SQUAD], feed_dict=feed_dict)
return loss
def train_step(sess, model, data_batch):
q_batch, s_batch, y_batch = data_batch
feed_dict = {
model.queries : q_batch,
#model.queries_length : ql_batch,
model.hypothesis : s_batch,
#model.hypothesis_length : sl_batch,
model.dropout : FLAGS.dropout,
model.y_SQUAD : y_batch
}
_, loss = sess.run([model.train_op_SQUAD, model.loss_SQUAD], feed_dict=feed_dict)
return loss
def test_step(sess, model, test_data):
q_test, s_test, y_test = test_data
final_pred = []
final_loss = []
for i in range(0, len(y_test), FLAGS.batch_size):
feed_dict = {
model.queries : q_test[i:i+FLAGS.batch_size],
#model.queries_length : ql_test[i:i+FLAGS.batch_size],
model.hypothesis : s_test[i:i+FLAGS.batch_size],
#model.hypothesis_length : sl_test[i:i+FLAGS.batch_size],
model.y_SQUAD : y_test[i:i+FLAGS.batch_size],
model.dropout : 1.0
}
loss = sess.run([model.loss_SQUAD], feed_dict=feed_dict)
final_loss.append(loss)
print("loss in valid set :{}".format(np.mean(final_loss)))
return np.mean(final_loss)
if __name__ == "__main__":
trainf = os.path.join('SQUAD/train-v1.1.json')
valf = os.path.join('SQUAD/dev-v1.1.json')
best_map = 100
best_epoch = 0
print("Load data")
read_data(trainf, valf)
pickle.dump(vocab, open("vocab.pkl","wb"))
print("Load Glove")
emb = DataUtils.GloVe(FLAGS.embedding_path)
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
model = MatchLSTM(FLAGS, vocab, emb)
checkpoint_dir = os.path.abspath(os.path.join(FLAGS.out_dir, "checkpoints"))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
test_data = [ inp_val['qi'],
inp_val['si'],
y_val
]
sess.run(tf.global_variables_initializer())
for e in range(FLAGS.epochs):
t = tqdm(range(0, len(y_train), FLAGS.batch_size), desc='train loss: %.6f' %0.0, ncols=90)
for i in t:
data_batch = [ inp_tr['qi'][i:i+FLAGS.batch_size],
inp_tr['si'][i:i+FLAGS.batch_size],
y_train[i:i+FLAGS.batch_size]
]
loss = train_step(sess, model, data_batch)
t.set_description("epoch %d: train loss %.6f" % (e, loss))
t.refresh()
curr_map = test_step(sess, model, test_data)
print("best loss in dev: %.6f" %best_map)
if curr_map < best_map:
best_map = curr_map
best_epoch = e
save_path = saver.save(sess, os.path.join(checkpoint_dir, "checkpoint"), e)
print("saved in %s" %save_path)
|
997,993 | afb3d765608ea9cc654f6f0bfd516c01c0a4c2c5 | #json模拟数据库
#在文本文件中保存json字符,通过文件读写来操作数据
import json
#创建文本文件(数据库文件)
# with open(r"user.txt","w") as f:
# users='[{"uname":"zhangsan","upwd":"123"},{"uname":"lisi","upwd":"123"},{"uname":"wangwu","upwd":"123"}]'
# f.write(users)
#读数据(查询)
def readData():
with open(r"user.txt","r") as f:
jsonData=f.read()
usersList=json.loads(jsonData)
return usersList
#写数据(修改)
def writeData(usersList):
jsonData=json.dumps(usersList,ensure_ascii=False)
with open(r"user.txt","w") as f:
f.write(jsonData)
print("----数据写入成功!")
#登录
def login():
name=input("请输入用户名:")
password=input("请输入密码:")
usersList=readData() #读取所有用户列表
msg="失败"
for user in usersList:
if name==user["uname"] and password==user["upwd"]:
msg="成功"
print("----恭喜登陆成功!")
if msg=="失败":
print("----登录失败!")
return msg
#注册(在数据库中增加用户)
def reg():
name=input("请输入新用户名:")
password=input("请输入密码:")
newuser={"uname":name,"upwd":password} #新用户
usersList=readData()
usersList.append(newuser) #将新用户添加到用户列表
writeData(usersList)
print("-----新用户添加成功!")
#-------------------------
if __name__ == '__main__':
login() |
997,994 | a01cbbbb7c1c0df31fb7abfe04731fa0debbc234 | from math import sqrt, atan
import numpy as np
import matplotlib.pyplot as plt
def mans_atan (x):
count = 0
a = 1.
sum = a
while count < 500:
count+=1
k = 1.*((2*count-1)**2)/(2*count*(2*count+1))*(x**2)/(1+(x**2))
a = a * k
sum = sum + a
z = (x/(np.sqrt(1+(x**2))))
y = z * sum
return y
a = -3.
b = 3.
x = np.arange(a,b,0.01)
y = mans_atan(x)
plt.plot(x,y)
plt.grid()
plt.show()
delta_x = 1.e-3 # =0,001
funa = mans_atan (a)
funb = mans_atan (b)
if funa * funb > 0:
print "[%.2f,%.2f] intervala saknu nav"%(a,b)
print "vai saja intervala ir paru saknu skaits"
exit()
print "turpinajums, kad sakne ir:"
print "vertibas intervala galapunktos - ",
print "f(%.2f)=%.2f un f(%.2f)=%.2f"%(a,funa,b,funb)
k=0
while b-a > delta_x:
k = k + 1
x = (a+b)/2
funx = mans_atan(x)
print "%3d. a=%.5f f(%.5f)=%8.5f b=%.5f"%(k,a,x,funx,b)
if funa * funx > 0:
a = x
else:
b = x
print "rezultats: "
print "a=%.9f f(%.9f)=%8.9f b=%.9f"%(a,x,funx,b)
print "apreikins veikts ar %d iteraciju"%(k)
|
997,995 | 20711aee91f0832fdd080b9faa4f9c41f616fc52 | '''
Write a program that prints the numbers from 1 to 100. But for multiples of three print
“Fizz” instead of the number and for the multiples of five print “Buzz” and for the multiples
of both 5 and 3 print 'FizzBuzz'
'''
# My Solution:
for i in range(1,101):
if i % 5 == 0 and i % 3 == 0:
print('Fizzbuzz')
elif i % 5 == 0:
print('buzz')
elif i % 3 == 0:
print('Fizz')
else:
print(i) |
997,996 | 7739a3cb19d1cf3fb597b7d763e3905c04ee5d10 | import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from 机器学习.逻辑回归.LogisticRegression import LogisticRegression
iris = datasets.load_iris()
x = iris.data
y = iris.target
x = x[y < 2, :2]
y = y[y < 2]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=666)
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train)
score = log_reg.score(x_test, y_test)
def x2(x1):
return (-log_reg.coef_[0] * x1 - log_reg.interception_) / log_reg.coef_[1]
x1_plot = np.linspace(4, 8, 1000)
x2_plot = x2(x1_plot)
plt.scatter(x[y == 0, 0], x[y == 0, 1], color='r')
plt.scatter(x[y == 1, 0], x[y == 1, 1], color='b')
plt.plot(x1_plot, x2_plot)
plt.show()
plt.scatter(x_test[y_test == 0, 0], x_test[y_test == 0, 1], color='r')
plt.scatter(x_test[y_test == 1, 0], x_test[y_test == 1, 1], color='b')
plt.plot(x1_plot, x2_plot)
plt.show()
# KNN的决策边界
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier()
knn_clf.fit(x_train, y_train)
|
997,997 | f02553385cce3676ff5c63051a76f91acb6ab05f | from collections import Counter
from typing import List
class Solution:
def isNStraightHand(self, hand: List[int], groupSize: int) -> bool:
hand.sort()
s = Counter(hand)
for v in hand:
if s[v] > 0:
ck = 0
while ck < groupSize and s[v + ck] > 0:
s[v + ck] -= 1
ck += 1
if ck < groupSize:
return False
return True
|
997,998 | ca7c59c44c1e44fb614eb60d6a7ac243430ab7b8 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Object'
db.delete_table('core_object')
# Adding model 'Miracle'
db.create_table('core_miracle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')()),
('coord_x', self.gf('django.db.models.fields.CharField')(max_length=250)),
('coord_y', self.gf('django.db.models.fields.CharField')(max_length=250)),
('instag_tags', self.gf('django.db.models.fields.CharField')(max_length=250)),
('google_tags', self.gf('django.db.models.fields.CharField')(max_length=250)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=250)),
))
db.send_create_signal('core', ['Miracle'])
# Deleting field 'Image.object'
db.delete_column('core_image', 'object_id')
# Adding field 'Image.miracle'
db.add_column('core_image', 'miracle',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['core.Miracle']),
keep_default=False)
def backwards(self, orm):
# Adding model 'Object'
db.create_table('core_object', (
('slug', self.gf('django.db.models.fields.SlugField')(max_length=250)),
('coord_y', self.gf('django.db.models.fields.CharField')(max_length=250)),
('coord_x', self.gf('django.db.models.fields.CharField')(max_length=250)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250)),
('instag_tags', self.gf('django.db.models.fields.CharField')(max_length=250)),
('google_tags', self.gf('django.db.models.fields.CharField')(max_length=250)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('core', ['Object'])
# Deleting model 'Miracle'
db.delete_table('core_miracle')
# Adding field 'Image.object'
db.add_column('core_image', 'object',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['core.Object']),
keep_default=False)
# Deleting field 'Image.miracle'
db.delete_column('core_image', 'miracle_id')
models = {
'core.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miracle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Miracle']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '250'}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Year']"})
},
'core.miracle': {
'Meta': {'object_name': 'Miracle'},
'coord_x': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'coord_y': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'description': ('django.db.models.fields.TextField', [], {}),
'google_tags': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instag_tags': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'})
},
'core.vote': {
'Meta': {'object_name': 'Vote'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Image']"}),
'user_ip': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '1', 'decimal_places': '0'})
},
'core.year': {
'Meta': {'object_name': 'Year'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['core'] |
997,999 | 83a9ff3168c7f5bb2c20122a9027dd3cb161dc34 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='selectel_cloud_api',
version='1.2',
packages=find_packages(),
install_requires='selectel_cloud_api',
url='https://github.com/RustoriaRu/SelectelCloudApi',
license='MIT',
author='vir-mir',
keywords='selectel.ru selectel api, cloud',
author_email='virmir49@gmail.com',
description='api select cloud api',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.