blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8357bfda0c42a9168dc0100350fa5fbb6a520850 | f5b3f58e51ae1c6c2a3c8e60d10b2588aeac4e2d | /Python/source/ben/Player.py | 66a2761874a9ee9b847a9676bd2ee92141df16d9 | [] | no_license | benbrock26/Video-Poker | a282e405a4e5e80e4cbb2b6759058e04974caac2 | 089fcfa676656bc3c192ac866168a1c40e6f649b | refs/heads/master | 2021-05-04T15:05:17.171879 | 2018-03-28T00:10:58 | 2018-03-28T00:10:58 | 120,219,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,637 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 19 16:20:36 2018
@author: Ben Brock and Shazia Zaman
"""
from Card import Card
from Hand import Hand
from Deck import Deck
from PokerHandUtility import PokerHandUtility
class Player(object):
'''
Player Constructor
Set the players name, initializes the players hand, initializes list of hands,
initializes players' bank roll and bet amount.
@param: name
@return: NONE
'''
def __init__(self, name):
self.__name = name
self.__current_hand = Hand()
self.__list_of_hands = []
self.__poker_hands = []
self.__bank_roll = 0
self.__bet_amount = 0
self.__current_hand_size = 0
# Poker Hand Utility object which evaluates any 5 card hand
self.__poker_hand_utility = PokerHandUtility()
'''
Players Destructor
clears the list of hands LIST
@param: self
@return: NONE
'''
def __del__(self):
del self.__list_of_hands[:]
def get_poker_hand_utility(self):
return self.__poker_hand_utility
def get_poker_hands(self):
return self.__poker_hands
'''
add_card
Add input new card object to the Players' hand
@param: new_card - input Card object
@return: NONE
'''
def add_card(self, new_card):
self.__current_hand.add_card(new_card)
self.__current_hand_size = self.__current_hand_size + 1
'''
add_funds
Adds the specified amount to the player's funds
@param: new_funds: The amount to be added to the player's funds
@return: NONE
'''
def add_funds(self, new_funds):
self.__bank_roll = self.__bank_roll + float(new_funds)
'''
remove_funds
Removes the specified amount from the player's funds
@param: amount_of_funds: The amount to be removed from the player's funds
@return: NONE
'''
def remove_funds(self, amount_of_funds):
# probably should check to make sure the player as enough funds in their
# bank roll before blindly deduction cash from the account.
# we dont want our account to go in the red or become negative
self.__bank_roll = self.__bank_roll - float(amount_of_funds)
'''
get_funds
Retrieves the player's funds
@param: self
@return: int: The integer amount of funds the player has
'''
def get_funds(self):
return self.__bank_roll
'''
get_bet_amount
Retrieves the bet amount
@param: self
@return: int: The amount of money the player bet
'''
def get_bet_amount(self):
return self.__bet_amount
'''
set_bet_amount
Sets the bet amount
@param: bet_amount : The amount of money the player is betting
@return: NONE
'''
def set_bet_amount(self, bet_amount):
self.__bet_amount = bet_amount
'''
get_name
Retrieves the player's name
@param: self
@return string: The player's name
'''
def get_name(self):
return self.__name
'''
five_card_stud_hand
get_hand
Retrieves the player's current hand
@param: self
@return: LIST : The player's hand
'''
def get_hand(self):
#return self.__current_hand.get_cards()
return self.__current_hand
def get_current_hand_size(self):
return len(self.__current_hand.get_cards())
def show_hand(self):
if self.__current_hand.get_cards():
for card in self.__current_hand.get_cards():
card.print_card()
else:
print "PLAYERS HAND IS EMPTY NO CARDS ARE IN THE HAND\n"
def show_hand_by_index(self):
'''
The pythonic way to do it is from the PEP 8 style guide:
https://stackoverflow.com/questions/53513/how-do-i-check-if-a-list-is-empty
'''
if self.__current_hand.get_cards():
index = 0
for card in self.__current_hand.get_cards():
card.print_card_by_index(index)
index = index + 1
else:
print "PLAYERS HAND IS EMPTY NO CARDS ARE IN THE HAND\n"
def show_hand_ver1(self):
if self.__current_hand.get_cards():
idx = 0
for card in self.__current_hand.get_cards():
self.__current_hand.get_cards()[idx].print_card()
idx = idx + 1
else:
print "PLAYERS HAND IS EMPTY NO CARDS ARE IN THE HAND\n"
def get_card_at_index(self, position):
return self.__current_hand[position]
def show_hand_single_card_format(self):
if self.__current_hand.get_cards():
for card in self.__current_hand.get_cards():
card.print_single_card()
else:
print "PLAYERS HAND IS EMPTY NO CARDS ARE IN THE HAND\n"
def draw(self, deck):
self.__current_hand.add_card(deck.draw_card())
return self
'''
reset
Resets the player's bet and hand
@param: self
@return: NONE
'''
def reset(self):
# reset the bet amount
self.__bet_amount = 0
# clear the players' current hand
self.__current_hand.clear()
'''
get_list_of_players_hands
Retrieves the players' history of list of hands
@param: self
@return: LIST : the list of players previous hands while playing poker
'''
def get_list_of_players_hands(self):
return self.__list_of_hands
'''
add_hand_to_list_of_players_hands
Add the latest current hand to the list of players hands
This can be used to study the players hands later
@param: self
@param: list : five_stud_hand
@return: NONE
'''
def add_hand_to_list_of_players_hands(self, five_stud_hand):
self.__list_of_hands.append(five_stud_hand)
def get_list_of_players_hands_size(self):
return len(self.__list_of_hands)
'''
This was done to make the Card class iterable
'''
def __eq__(self, other):
return self.__dict__ == other.__dict__
'''
toString method
@return - String respresentation in a customized card hand order
'''
def toString(self):
return "Hand: \t\t{} Test\t| {} | {} | {} | {}".format(self.__current_hand.get_cards()[0].print_card(),
self.__current_hand.get_cards()[1].print_card(),
self.__current_hand.get_cards()[2].print_card(),
self.__current_hand.get_cards()[3].print_card(),
self.__current_hand.get_cards()[4].print_card())
## Unit Test of the Player Class ####
def main():
print "...Start of Player Class Unit Testing...\n"
from Player import Player
bob = Player("bob")
print "Players name:\t{}\n".format(bob.get_name())
deck = Deck()
deck.shuffle()
print "Number of cards in the deck:\t{}\n" .format(deck.get_deck_size())
bob.draw(deck).draw(deck).draw(deck).draw(deck).draw(deck)
print "Players' current hand size:\t{}\n".format(bob.get_current_hand_size())
assert bob.get_current_hand_size() == 5, "Invalid: Must be 5 cards in a hand, not %i" % bob.get_current_hand_size()
print "\n...Printing Bob's hand via the show_hand() method.....\n"
bob.show_hand()
print "\n...Printing Bob's hand via the get_hand() method.....\n"
for card in bob.get_hand().get_cards():
card.print_card()
print "\nUpdated Number of cards in the deck:\t{}\n".format(deck.get_deck_size())
print "\nONLY ADD 5 STUD CARD HANDS TO LIST OF HISTORY OF HAND COMMANDS\n"
bob.add_hand_to_list_of_players_hands(bob.get_hand())
print "Number of hands listed in the HISTORY of Commands:\t{}\n".format(bob.get_list_of_players_hands_size())
#print "CARD:\t{}".format(bob.get_hand().get_cards()[0].print_card())
#position = 2
#print "CARD POSITION:\t{} is {}".format(position, bob.get_card_at_index(position))
bob.show_hand_ver1()
#print bob.toString()
if __name__ == '__main__':
main()
| [
"benbroc@gmail.com"
] | benbroc@gmail.com |
8192c7550d3b208dff239641eabbdf984865471f | b900c9742d0a1832cdd7dc1ae979a02edd801f3d | /SI364Midterm/api_info_template.py | 440dbb69dbd8f775de03daaa4a51b85840270076 | [] | no_license | catieo/364midterm | 57d591416807e4430f6725518b38bf3b38833e55 | 1a6d5780a6501558c000fef0a36575460e1d102c | refs/heads/master | 2021-01-24T10:15:21.890605 | 2018-03-25T23:36:18 | 2018-03-25T23:36:18 | 123,046,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | #Enter the values I submitted on canvas here!
client_id = ""
client_secret = ""
access_token = ""
| [
"noreply@github.com"
] | catieo.noreply@github.com |
38161679d7a7df53da17d5e1b6d803fcaf924202 | b25d8d52308d752e6bc12f8791322f43e4f70bbd | /src/s17/suggestionbox/browser.py | b1d1eccc767053b4743abb3c1dbdea795bc7c82a | [] | no_license | simplesconsultoria/s17.suggestionbox | 890797fb3c674cc6b6a2264ba8f0f83e97fecc5d | d0d4a5a7bb5dc5042ae76768cc4c9fa0d8df395f | refs/heads/master | 2021-01-19T09:43:58.917038 | 2014-02-21T12:56:36 | 2014-02-21T12:56:36 | 4,553,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | # -*- coding: utf-8 -*-
from five import grok
from plone.directives import dexterity
from Products.CMFPlone.utils import getToolByName
from s17.suggestionbox.content import ISuggestionBox, ISuggestion
grok.templatedir("templates")
class SuggestionBoxView(dexterity.DisplayForm):
grok.context(ISuggestionBox)
grok.name("view")
grok.template('suggestionbox_view')
grok.require("zope2.View")
class SuggestionView(dexterity.DisplayForm):
grok.context(ISuggestion)
grok.name("view")
grok.template('suggestion_view')
grok.require('zope2.View')
def images(self):
ct = getToolByName(self.context, 'portal_catalog')
images = ct(portal_type='Image', path='/'.join(self.context.getPhysicalPath()))
if images:
images = [image.getObject() for image in images]
return images
else:
return None
def files(self):
ct = getToolByName(self.context, 'portal_catalog')
files = ct(portal_type='File', path='/'.join(self.context.getPhysicalPath()))
if files:
return files
else:
return None
| [
"lepri@simplesconsultoria.com.br"
] | lepri@simplesconsultoria.com.br |
98df76e6cd0cb4efe72554be8125b1323a68dc77 | 900b3ca238dd78a577ff94e0a2e4d1a48094f5db | /2-python高级使用/2-Celery自动搜索任务/start.py | b311f803c33110b7ec3f77f09d7cb43c330edeaa | [] | no_license | zxperson/note | 0b71e899389947d73b6ed4f577092fddddc5517f | 96b03d9ac2105a6891bd2ab02e2f8109db0c057c | refs/heads/master | 2020-08-18T11:47:32.079780 | 2019-10-17T13:00:35 | 2019-10-17T13:00:35 | 215,785,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # 启动异步任务
from celery_tasks.task_1.tasks import my_task_1
from celery_tasks.task_2.tasks import my_task_2
my_task_1.delay(1,2)
my_task_2.delay(4,5)
"""
启动之前,先要把 celery_tasks 传到 乌班图中,然后启动worker:
celery -A celery_tasks.main worker --loglevel=info
""" | [
"whit@163.com"
] | whit@163.com |
ca9175d20feb2612e9e3e55ea110a7ea4d89989a | 9126ac934bdfa2855f3638af866dc7a1c43c15a1 | /application.py | 77360ff0aa1a04fa8294858324311153f3afa706 | [] | no_license | stephanlascar/markme | 14e2eab990dc57e230acf15402c09d714652ed5b | 7801b388a12797c5bf05dc779feb407a4198eea9 | refs/heads/master | 2016-09-06T11:14:41.220608 | 2015-03-02T15:33:56 | 2015-03-02T15:33:56 | 28,190,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # -*- coding: utf-8 -*-
import os
import markme
app = markme.create_app(os.environ['MONGOLAB_URI'])
| [
"stephan.lascar@gmail.com"
] | stephan.lascar@gmail.com |
98e2e59c2d13a455a8e3895c9e8d255c77dfd898 | d1536f1379d298d2660f4e8e62b977ac563e654c | /Dict_assignment.py | 28354e91c4991a4696db2ec96af1dcdcc39ddd23 | [] | no_license | rohitpawar4507/Zensar_Python | 9816f224ad50fe804a136c62cbd33cfc4801265a | cc56db3bf5d6bea543fbf5334b1517e9d6c14c8d | refs/heads/master | 2023-03-14T09:27:34.358446 | 2021-03-11T16:52:46 | 2021-03-11T16:52:46 | 346,773,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | ### Assignment ###
d1={1:'Nashik',2:"Pune",3:"Aurangabad"}
#1.Find the length of dictionary
print("The length of dictionary is ..",len(d1))
#2.print dict in sorted manner
#print("The dictionary in sorted manners..",sorted(d1))
#3.Check whether the value is present in dictionary
value="Pune"
if value in d1.values():
print(f"The given value is exits '{value}':")
else:
print(f"The given value does not exits '{value}:")
#4. convert tuple into dictionary
tup=((1,'a'),(3,'b'))
print(dict(tup))
#5. Create a dictionary student with student details.
student={1:"Rohit",2:"Man",3:"Raj",4:"Vijay"}
print(student)
#6. add elements to student dict
d1[5]="Ram"
d1[6]="Sham"
print(d1)
#7. Delete one element from student dict
'''d1.pop("Ram")
print("Dict after deleting one elements,",d1)
'''
#8. Return a element with key 101 from student dict
value="Rohit"
if value in student.values():
print(f"The given value is exits '{value}':")
else:
print(f"The given value does not exits '{value}:")
#9. print only keys from dict student
print("Printing the keys using for loop")
for x in student: # Printing the keys
print(x,end=' ,')
#10. print only value from dict student
print("\nPrinting the Values using for loop")
for x in student.values(): # Printing the values
print(x,end=' ,') | [
"RohitEmail"
] | RohitEmail |
4ee5c7635d1d388cb4d468d7dc04515ac9df2ccd | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/v1_0/personalcontacts_v1_0/azext_personalcontacts_v1_0/vendored_sdks/personalcontacts/aio/_personal_contacts.py | 0c6cf4d05bbae3c59e563ab9a028bb2e8874efa7 | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 3,990 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PersonalContactsConfiguration
from .operations import UsersOperations
from .operations import UsersContactFoldersOperations
from .operations import UsersContactFoldersContactsOperations
from .operations import UsersContactsOperations
from .. import models
class PersonalContacts(object):
"""PersonalContacts.
:ivar users: UsersOperations operations
:vartype users: personal_contacts.aio.operations.UsersOperations
:ivar users_contact_folders: UsersContactFoldersOperations operations
:vartype users_contact_folders: personal_contacts.aio.operations.UsersContactFoldersOperations
:ivar users_contact_folders_contacts: UsersContactFoldersContactsOperations operations
:vartype users_contact_folders_contacts: personal_contacts.aio.operations.UsersContactFoldersContactsOperations
:ivar users_contacts: UsersContactsOperations operations
:vartype users_contacts: personal_contacts.aio.operations.UsersContactsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = PersonalContactsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.users = UsersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders = UsersContactFoldersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contact_folders_contacts = UsersContactFoldersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.users_contacts = UsersContactsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PersonalContacts":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
325c140017e2ea231db8de746f39d33226b103b1 | 9e16beee96c5ada941c2c573155dd577b903ae8b | /Tkinter GUI examples/gui structure samples/gui structure example2.py | 0678f5001dd14d89b518a00b0078f3140703e359 | [] | no_license | JohnRGold/Python-examples | 849d94a5cfa6206c60d3c026f8a3cbd576a7035a | 2b62211c460e3fb9207af066a4737f7381e126e7 | refs/heads/master | 2023-02-03T01:44:24.547985 | 2020-12-09T19:15:52 | 2020-12-09T19:15:52 | 317,896,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,296 | py | """Fully functional. Is capable of completely locking the parent window via wm_attributes() method,
since the main menu inherits from Tk. Second menu is defined explicitly inside the appropriate button
method. However, OOP-ing this would be relatively simple, as the Create_Toplevel() method creates a
Toplevel object anyway."""
import tkinter as tk
from tkinter import ttk
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.minsize(300, 100)
self.button = ttk.Button(self, text="Call toplevel!", command=self.Create_Toplevel)
self.button.pack(side="top")
def Create_Toplevel(self):
# THE CLUE
self.wm_attributes("-disabled", True)
# Creating the toplevel dialog
self.toplevel_dialog = tk.Toplevel(self)
self.toplevel_dialog.focus_set()
self.toplevel_dialog.minsize(300, 100)
# Tell the window manager, this is the child widget.
# Interesting, if you want to let the child window
# flash if user clicks onto parent
self.toplevel_dialog.transient(self)
# This is watching the window manager close button
# and uses the same callback function as the other buttons
# (you can use which ever you want, BUT REMEMBER TO ENABLE
# THE PARENT WINDOW AGAIN)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog_label = ttk.Label(self.toplevel_dialog, text='Do you want to enable my parent window again?')
self.toplevel_dialog_label.pack(side='top')
self.toplevel_dialog_yes_button = ttk.Button(self.toplevel_dialog, text='Yes', command=self.Close_Toplevel)
self.toplevel_dialog_yes_button.pack(side='left', fill='x', expand=True)
self.toplevel_dialog_no_button = ttk.Button(self.toplevel_dialog, text='No')
self.toplevel_dialog_no_button.pack(side='right', fill='x', expand=True)
def Close_Toplevel(self):
# IMPORTANT!
self.wm_attributes("-disabled", False) # IMPORTANT!
self.toplevel_dialog.destroy()
# Possibly not needed, used to focus parent window again
self.deiconify()
if __name__ == "__main__":
app = SampleApp()
app.mainloop() | [
"johnggold@protonmail.com"
] | johnggold@protonmail.com |
71a0194808aa59ac6b949bcd9c95ae8dd73b0377 | a8d3b441680979654f8a4bf20b72d38f4a5913aa | /scripts/list_incidents.py | c88021facf763e36249ff188bdb0afa3dbb8a0cd | [] | no_license | Cjhosu/pyProjects | 658d73b452f2731ca3b6b28adaa2fe06b202bdbd | 9a7a5d82939f2aece8440f7261ff77a0f293c8ba | refs/heads/master | 2022-05-02T08:40:17.576762 | 2022-04-04T02:51:23 | 2022-04-04T02:51:23 | 94,718,085 | 0 | 0 | null | 2021-06-10T17:33:41 | 2017-06-18T23:06:08 | Python | UTF-8 | Python | false | false | 2,304 | py | import requests
import os
import subprocess
import time
from datetime import datetime, timedelta
api_key_from_env = os.environ.get('PDAPI_DATABASE')
API_KEY = api_key_from_env
last_hour = (datetime.now() - timedelta(days=2)).isoformat()
SINCE = last_hour
UNTIL = ''
SERVICE_IDS = ['PSLDOMB']
LIMIT = '1'
TIME_ZONE = 'UTC'
class Incident:
OFFSET = 0
is_more = True
host_name = ''
service_desc = ''
def list_incidents(self):
while self.is_more == True:
print(API_KEY)
url = 'https://api.pagerduty.com/incidents'
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=API_KEY)
}
payload = {
'since': SINCE,
'until': UNTIL,
'service_ids[]': SERVICE_IDS,
'time_zone': TIME_ZONE,
'offset': self.OFFSET,
'limit': LIMIT,
}
r = requests.get(url, headers=headers, params=payload)
incident = r.json()
try:
incident_key = incident["incidents"][0]["incident_key"]
except:
return
self.key_serializer(self,incident_key)
self.OFFSET += 1
print(incident)
self.is_more = incident["more"]
def key_serializer(self, incident_key):
key_list = incident_key.split(';')
if ('event_source=service') in key_list:
for item in key_list:
pair = item.split('=')
head , sep, tail = pair[1].partition('-')
dictrep = {pair[0] :head}
self.set_metric(self, dictrep)
bash = 'echo "production.dba_on_call_incident.'+self.service_desc+'.'+self.host_name+':1|s" | nc -u -w1 production.statsd.service.infrastructure.consul 8125'
print(bash)
"""subprocess.call(bash, shell=True)
time.sleep(3)"""
def set_metric(self, dictrep):
if 'host_name' in dictrep:
self.host_name = dictrep['host_name']
elif 'service_desc' in dictrep:
self.service_desc = dictrep['service_desc']
if __name__ == '__main__':
Incident.list_incidents(Incident)
| [
"choward@covermymeds.com"
] | choward@covermymeds.com |
0c4b93b339d5366e37f2015901d6af676884bd67 | c085d947765474637b199152948fd63a22ed2777 | /Application/migrations/0002_auto_20200719_1856.py | 1c21805a9325174d0d3672d8f035cf5e66b913cc | [] | no_license | venkatarao3139/OnlineCourseRegisreration | 992ec8cf019f766821421a331905efe78fbe1c11 | 911d8a4f18330386c6abe6c9eae7c5568787f196 | refs/heads/master | 2022-11-19T01:41:56.792820 | 2020-07-20T06:31:36 | 2020-07-20T06:31:36 | 280,671,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | # Generated by Django 3.0.8 on 2020-07-19 13:26
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Application', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='enrolecoueses',
old_name='course',
new_name='Course_Name',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='status',
new_name='Enroll_status',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='fac',
new_name='Faculty_Name',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='no',
new_name='Register_No',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='std_cont',
new_name='Student_Contact',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='stdname',
new_name='Student_Name',
),
migrations.RenameField(
model_name='enrolecoueses',
old_name='dat',
new_name='date',
),
migrations.AlterField(
model_name='enrolecoueses',
name='timing',
field=models.TimeField(default=datetime.time(0, 0)),
),
]
| [
"venkatarao.dharamna59@gmail.com"
] | venkatarao.dharamna59@gmail.com |
e742907f523101322df4966977e82fafc1446f34 | 2c4ad0e41e495b1be29ac54f3552f5a4bcfb8d8b | /apps/comments/views.py | 0c004c9e007472a3dba19ad976acbe6ce31052d7 | [] | no_license | buzzzzx/blogforzly | 7de8f01e767e01f30d7dab8ffb2243484de24f4a | 163a26c7518ed13c7f3a58cd12d455748b60ab6d | refs/heads/master | 2022-03-09T14:43:00.098795 | 2019-08-06T13:13:08 | 2019-08-06T13:13:08 | 114,436,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import Comment
from .forms import CommentForm
from blog.models import Post
from utils.send_email import send
# Create your views here.
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
# comment.text = form.text
comment.save()
# send email
send(flag=1, nickname=comment.name, semail=comment.email, text=comment.text, postname=comment.post.title)
return redirect(post)
else:
comment_list = post.comment_set.all()
context = {
'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
return redirect(post)
| [
"buzzzzx233@gmail.com"
] | buzzzzx233@gmail.com |
1cdc2bcc69cfb9db96d5c781083c1bc817ff9f01 | 387cf5f72ed6679a4d9e04bddd16998a190c4caf | /problems/programmers/lv4/pgs-12983-wrong.py | 69760552fe4acfa3898004c7c8b095f9f458bbe3 | [] | no_license | CodyBuilder-dev/Algorithm-Coding-Test | db4ee1e7565fbcef3140192225167eff42ad5c02 | cca5c4ba8bc31679ab00aceccfd8d9d39c232f72 | refs/heads/master | 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """
제목 : 단어 퍼즐
아이디어 : 거스름돈과 비슷한 dp
(1) DP 값 저장
- key는 strs의 i번재 단어까지 서서 t의 j번째를 만드는 것
- j번째 까지냐 vs j번째 만이냐
- value는 최소값
(2) 초기화
- dp[0][0] ~ dp[0][len(t)-1]는 초기화할 수 있음
(3) 점화식
- dp[i][j] 는, min(dp[i-1][j], dp[i][j-k] (k = strs에 담긴 원소들의 길이))인가?
"""
from math import inf
def solution(strs, t):
dp = [[inf]*len(t) for _ in range(len(strs))]
for i in range(len(t)):
print(strs[0]*(i+1))
dp[0][i] = t[:i+1].count(strs[0]*(i+1))
return dp
# 테스트 케이스
print(solution(["ba","na","n","a"],"banana"),3)
print(solution(["app","ap","p","l","e","ple","pp"],"apple"),2)
print(solution(["ba","an","nan","ban","n"],"banana"),-1)
print(solution(["bax","dxv","zxc"],"baobab"))
print(solution) | [
"imspecial1@u.sogang.ac.kr"
] | imspecial1@u.sogang.ac.kr |
19f0b797f0e3d4ed068398e6a9c235fa8cc5c020 | 5628ec1b260b41ee427b2d00de886af4d87e9dc0 | /01_of_week_1_Single_Number.py | f82885c574037c37aa1bb1bf7ed311cbea5e787f | [] | no_license | bmanandhar/LeetCodeLockAril2020 | 464eaab08e94c4ae3828bc7a888b12cf66749d2b | 7d5c554d47bcf355aee6b788d7a82322681e5249 | refs/heads/master | 2021-05-20T15:52:56.938325 | 2020-04-12T16:50:47 | 2020-04-12T16:50:47 | 252,355,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | """
Given a non-empty array of integers, every element appears twice except for one. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
Example 1:
Input: [2,2,1]
Output: 1
Example 2:
Input: [4,1,2,1,2]
Output: 4
"""
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
once_num = nums[0]
len_nums = len(nums)
for i in range(1, len_nums):
once_num = once_num ^ nums[i]
return once_num
"""
XOR operator
""" | [
"bijaya.manandhar@yahoo.com"
] | bijaya.manandhar@yahoo.com |
5d86db5ca4849c4a3d056fe445f5af21bcb558e8 | 4c7fc810eb442b386969bf345b4dc6ef3152c783 | /src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py | c74865617cf76d513cfa10379dcd8d43d8b825e4 | [
"Apache-2.0"
] | permissive | newcodevelop/transformers | fbcef5d703b12febf6e76e84e3f0493769fb9d37 | e8d1bd7427021d2114ec159b2c90c6b1fcddeae7 | refs/heads/main | 2023-03-15T11:45:09.906184 | 2022-08-30T07:26:17 | 2022-08-30T07:26:17 | 254,360,734 | 0 | 1 | Apache-2.0 | 2020-04-09T12:07:09 | 2020-04-09T12:07:08 | null | UTF-8 | Python | false | false | 36,192 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support TF Encoder-Decoder architectures"""
import tempfile
import warnings
from typing import Optional
import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, unpack_inputs
from ...tf_utils import shape_list
from ...utils import (
DUMMY_INPUTS,
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
from .configuration_encoder_decoder import EncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "EncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
" encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
" fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
" labels, no need to pass them yourself anymore."
)
ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
[`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
(see the examples for more information).
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
Parameters:
config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
Provide for sequence to sequence training to the decoder. Indices can be obtained using
[`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `({0})`.
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
"""
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
r"""
[`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
[`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
load_weight_prefix = "tf_encoder_decoder_model"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[TFPreTrainedModel] = None,
decoder: Optional[TFPreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
" `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
encoder = TFAutoModel.from_config(config.encoder, name="encoder")
if decoder is None:
decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
f" {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = tf.keras.layers.Dense(
units=self.decoder.config.hidden_size,
kernel_initializer=get_initializer(config.encoder.initializer_range),
name="enc_to_dec_proj",
)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
# Add `decoder_input_ids` because `self.decoder` requires it.
input_ids = tf.constant(DUMMY_INPUTS)
dummy = {"input_ids": input_ids, "decoder_input_ids": input_ids}
return dummy
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Initializing *TFEncoderDecoderModel* from a pytorch checkpoint is not supported currently.
If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is:
```python
>>> # a workaround to load from pytorch checkpoint
>>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel
>>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
>>> _model.encoder.save_pretrained("./encoder")
>>> _model.decoder.save_pretrained("./decoder")
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
... )
>>> # This is only for copying some specific attributes of this particular model.
>>> model.config = _model.config
```
Example:
```python
>>> from transformers import TFEncoderDecoderModel
>>> model = TFEncoderDecoderModel.from_pretrained("ydshieh/bert2bert-cnn_dailymail-fp16")
```"""
from_pt = kwargs.pop("from_pt", False)
if from_pt:
raise ValueError(
"Initializing `TFEncoderDecoderModel` from a pytorch checkpoint is not supported currently. Use a"
" tensorflow checkpoint instead. If only the pytorch checkpoints are available, create the encoder and"
" decoder models separately, and use them to initialize `TFEncoderDecoderModel`. Check"
" `TFEncoderDecoderModel.from_encoder_decoder_pretrained()` for more details."
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> TFPreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
`encoder_from_pt` should be set to `True`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
`decoder_from_pt` should be set to `True`.
model_args (remaining positional arguments, *optional*):
All remaning positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import TFEncoderDecoderModel
>>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "gpt2")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2gpt2")
>>> # load fine-tuned model
>>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
kwargs_encoder["name"] = "encoder"
kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_encoder.get("from_pt", None):
del kwargs_encoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
encoder.save_pretrained(tmp_dirname)
del encoder
encoder = TFAutoModel.from_pretrained(tmp_dirname, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
kwargs_decoder["name"] = "decoder"
kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_decoder.get("from_pt", None):
del kwargs_decoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
decoder.save_pretrained(tmp_dirname)
del decoder
decoder = TFAutoModelForCausalLM.from_pretrained(tmp_dirname, **kwargs_decoder)
# Make sure these 2 `tf.keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
if encoder.name != "encoder":
raise ValueError("encoder model must be created with the name `encoder`.")
if decoder.name != "decoder":
raise ValueError("decoder model must be created with the name `decoder`.")
# instantiate config with corresponding kwargs
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@unpack_inputs
@add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import TFEncoderDecoderModel, BertTokenizer
>>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "gpt2")
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> # forward
>>> input_ids = tokenizer.encode(
... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
... ) # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
>>> # training
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2gpt2")
>>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
>>> # generation
>>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# Let the user be responsible for the expected format.
if encoder_outputs is not None:
if return_dict and not isinstance(encoder_outputs, ModelOutput):
raise ValueError(
"If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
)
if encoder_outputs is None:
encoder_inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"inputs_embeds": inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"training": training,
}
# Add arguments to encoder from `kwargs_encoder`
encoder_inputs.update(kwargs_encoder)
# Handle the case where the inputs are passed as a single dict which contains `labels`.
# The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
# parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
if "labels" in encoder_inputs:
labels = encoder_inputs.pop("labels")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_input_ids" in encoder_inputs:
decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_attention_mask" in encoder_inputs:
decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
encoder_outputs = self.encoder(**encoder_inputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
decoder_inputs = {
"input_ids": decoder_input_ids,
"attention_mask": decoder_attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": attention_mask,
"inputs_embeds": decoder_inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"use_cache": use_cache,
"past_key_values": past_key_values,
"return_dict": return_dict,
"training": training,
}
# Add arguments to decoder from `kwargs_decoder`
decoder_inputs.update(kwargs_decoder)
decoder_outputs = self.decoder(**decoder_inputs)
logits = decoder_outputs[0]
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
loss = self.hf_compute_loss(labels, logits)
if not return_dict:
past_key_values = None
if use_cache:
past_key_values = decoder_outputs[1]
# The starting index of the remaining elements in `decoder_outputs`
start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
if not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
output = tuple([x for x in output if x is not None])
return output
return TFSeq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.output_attentions and output.cross_attentions is not None
else None
)
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
cross_attentions=cross_attns,
)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
past_key_values = decoder_inputs.get("past_key_values")
if past_key_values is None:
past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
input_dict = {
"input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
# TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
"encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
"past_key_values": past_key_values,
"use_cache": use_cache,
}
return input_dict
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
" respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
" model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
| [
"noreply@github.com"
] | newcodevelop.noreply@github.com |
1c8b03bf7d466036c4ee88b52660d0fb375c3018 | 2eff2779f0170ecf67915b5188c902b096d27683 | /modules.py | c157bf56e2f61fae7cd295a2c20de43024aae863 | [
"MIT"
] | permissive | sjyttkl/transformer-pointer-generator | 6dce026e65a4c670f85731bf12b5ab7b5d65797f | a801002ff13da715adcaab3e215b2457efb76ea0 | refs/heads/master | 2020-05-25T10:22:48.718177 | 2020-04-08T14:46:56 | 2020-04-08T14:46:56 | 187,758,383 | 0 | 0 | MIT | 2020-04-08T01:58:44 | 2019-05-21T03:56:00 | Python | UTF-8 | Python | false | false | 11,667 | py | # -*- coding: utf-8 -*-
#/usr/bin/python3
'''
date: 2019/5/21
mail: cally.maxiong@gmail.com
page: http://www.cnblogs.com/callyblog/
'''
import numpy as np
import tensorflow as tf
def ln(inputs, epsilon = 1e-8, scope="ln"):
'''Applies layer normalization. See https://arxiv.org/abs/1607.06450.
inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer())
gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer())
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
def get_token_embeddings(vocab_size, num_units, zero_pad=True):
'''Constructs token embedding matrix.
Note that the column of index 0's are set to zeros.
vocab_size: scalar. V.
num_units: embedding dimensionalty. E.
zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero
To apply query/key masks easily, zero pad is turned on.
Returns
weight variable: (V, E)
'''
with tf.variable_scope("shared_weight_matrix", reuse=tf.AUTO_REUSE):
embeddings = tf.get_variable('weight_mat',
dtype=tf.float32,
shape=(vocab_size, num_units),
initializer=tf.contrib.layers.xavier_initializer())
if zero_pad:
embeddings = tf.concat((tf.zeros(shape=[1, num_units]),
embeddings[1:, :]), 0)
return embeddings
def scaled_dot_product_attention(Q, K, V,
num_heads,
causality=False, dropout_rate=0.,
training=True,
scope="scaled_dot_product_attention"):
'''See 3.2.1.
Q: Packed queries. 3d tensor. [N, T_q, d_k].
K: Packed keys. 3d tensor. [N, T_k, d_k].
V: Packed values. 3d tensor. [N, T_k, d_v].
causality: If True, applies masking for future blinding
dropout_rate: A floating point number of [0, 1].
training: boolean for controlling droput
scope: Optional scope for `variable_scope`.
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
d_k = Q.get_shape().as_list()[-1]
# dot product
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) # (N, T_q, T_k)
# scale
outputs /= d_k ** 0.5
# key masking, delete key 0
outputs = mask(outputs, Q, K, type="key")
# causality or future blinding masking
if causality:
outputs = mask(outputs, type="future")
# softmax
attn_dists = tf.nn.softmax(tf.reduce_sum(tf.split(outputs, num_heads, axis=0), axis=0))
outputs = tf.nn.softmax(outputs)
attention = tf.transpose(outputs, [0, 2, 1])
tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
# query masking, delete query <pad>
outputs = mask(outputs, Q, K, type="query")
# dropout
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
# weighted sum (context vectors)
outputs = tf.matmul(outputs, V) # (N, T_q, d_v)
return outputs, attn_dists
def mask(inputs, queries=None, keys=None, type=None):
"""Masks paddings on keys or queries to inputs
inputs: 3d tensor. (N, T_q, T_k)
queries: 3d tensor. (N, T_q, d)
keys: 3d tensor. (N, T_k, d)
e.g.,
>> queries = tf.constant([[[1.],
[2.],
[0.]]], tf.float32) # (1, 3, 1)
>> keys = tf.constant([[[4.],
[0.]]], tf.float32) # (1, 2, 1)
>> inputs = tf.constant([[[4., 0.],
[8., 0.],
[0., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "key")
array([[[ 4.0000000e+00, -4.2949673e+09],
[ 8.0000000e+00, -4.2949673e+09],
[ 0.0000000e+00, -4.2949673e+09]]], dtype=float32)
>> inputs = tf.constant([[[1., 0.],
[1., 0.],
[1., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "query")
array([[[1., 0.],
[1., 0.],
[0., 0.]]], dtype=float32)
"""
padding_num = -2 ** 32 + 1
if type in ("k", "key", "keys"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1)) # (N, T_k)
masks = tf.expand_dims(masks, 1) # (N, 1, T_k)
masks = tf.tile(masks, [1, tf.shape(queries)[1], 1]) # (N, T_q, T_k)
# Apply masks to inputs
paddings = tf.ones_like(inputs) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs) # (N, T_q, T_k)
elif type in ("q", "query", "queries"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # (N, T_q)
masks = tf.expand_dims(masks, -1) # (N, T_q, 1)
masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]]) # (N, T_q, T_k)
# Apply masks to inputs
outputs = inputs*masks
elif type in ("f", "future", "right"):
diag_vals = tf.ones_like(inputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(inputs)[0], 1, 1]) # (N, T_q, T_k)
paddings = tf.ones_like(masks) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs)
else:
print("Check if you entered type correctly!")
return outputs
def multihead_attention(queries, keys, values,
num_heads=8,
dropout_rate=0,
training=True,
causality=False,
scope="multihead_attention"):
'''Applies multihead attention. See 3.2.2
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
d_model = queries.get_shape().as_list()[-1]
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Linear projections
Q = tf.layers.dense(queries, d_model, use_bias=False) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=False) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=False) # (N, T_k, d_model)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, d_model/h)
# Attention
outputs, attn_dists = scaled_dot_product_attention(Q_, K_, V_, num_heads, causality, dropout_rate, training)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2) # (N, T_q, d_model)
# Residual connection
outputs = queries + outputs
# Normalize
outputs = ln(outputs)
return outputs, attn_dists
def ff(inputs, num_units, scope="positionwise_feedforward"):
'''position-wise feed forward net. See 3.3
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Inner layer
outputs = tf.layers.dense(inputs, num_units[0], activation=tf.nn.relu)
# Outer layer
outputs = tf.layers.dense(outputs, num_units[1])
# Residual connection
outputs += inputs
# Normalize
outputs = ln(outputs)
return outputs
def label_smoothing(inputs, epsilon=0.1):
'''Applies label smoothing. See 5.4 and https://arxiv.org/abs/1512.00567.
inputs: 3d tensor. [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
'''
V = tf.cast(tf.shape(inputs)[-1], tf.float32) # number of channels
return ((1-epsilon) * inputs) + (epsilon / V)
def positional_encoding(inputs,
maxlen,
masking=True,
scope="positional_encoding"):
'''Sinusoidal Positional_Encoding. See 3.5
inputs: 3d tensor. (N, T, E)
maxlen: scalar. Must be >= T
masking: Boolean. If True, padding positions are set to zeros.
scope: Optional scope for `variable_scope`.
returns
3d tensor that has the same shape as inputs.
'''
E = inputs.get_shape().as_list()[-1] # static
N, T = tf.shape(inputs)[0], tf.shape(inputs)[1] # dynamic
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# position indices
position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (N, T)
# First part of the PE function: sin and cos argument
position_enc = np.array([
[pos / np.power(10000, (i-i%2)/E) for i in range(E)]
for pos in range(maxlen)])
# Second part, apply the cosine to even columns and sin to odds.
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
position_enc = tf.convert_to_tensor(position_enc, tf.float32) # (maxlen, E)
# lookup
outputs = tf.nn.embedding_lookup(position_enc, position_ind)
# masks
if masking:
outputs = tf.where(tf.equal(inputs, 0), inputs, outputs)
return tf.to_float(outputs)
def noam_scheme(d_model, global_step, warmup_steps=4000.):
'''Noam scheme learning rate decay
d_model: encoder and decoder embedding
global_step: scalar.
warmup_steps: scalar. During warmup_steps, learning rate increases
until it reaches init_lr.
'''
step = tf.cast(global_step + 1, dtype=tf.float32)
return d_model ** -0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5) | [
"maxiong@juexiaotime.com"
] | maxiong@juexiaotime.com |
0e48b30a06104cba35625dfe97b6f03f276fffcb | c553f9d608c435cd7f19c9be0ef512307295a837 | /daemin/greedy/실전문제/1.모험가길드.py | cfc80dc0c3cd461720a12db2077c822dd132f7b8 | [] | no_license | Green0v0/Algorithm | 2d089e7c016997c1fb5e1094ddeeb80cd1ce0485 | ab9b387e63550ef1b5dfe0f851163b16fbd42c88 | refs/heads/main | 2023-05-24T05:37:17.125671 | 2021-06-16T05:35:52 | 2021-06-16T05:35:52 | 330,944,982 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # n = int(input())
# k = list(map(int,input().split(" ")))
n= 5
data = [2,3,1,2,2]
data.sort() # 원본 변경 / True 가 내림차순이다.
result = 0 # 그룹 갯수
count = 0 # 모험가의 수
"""
1. 기본 로직은 공포도가 작은 탐험가 부터 (숫자가 작은) 그룹을 이뤄 나가는것이다.
2. count 에 일단 모험가를 1개 넣어주고 다음 모험가(i)가 1보다 작거나 같으면 그룹이 되어나간다.
3. 1보다 크다면 result(그룹)이 되지 못하고 반복문으로 올라가서 다음 모험가를 데리고 count 에 1을 더해준다.
4 . 그러면서 조건에 만족할때 그룹개수를 증가시킨다.
"""
for i in data:
count +=1
if count >= i: # i가 크거나같으면 공포도에 따른 그룹 구성원이 안맞는데도 그룹을 이룬다.
result +=1
count=0
print(result)
# 첫번째 코드 실패 //
# (4,3,2,2,2,1,1,1,1,1) 이 케이스를 입력했을때 4,3,2가 남으면 더이상 그룹이 될 수없는데 그냥 실행이된다.
#
#
# while True:
# m = min(k)
# for _ in range(m):
# k.pop() # 이부분이 문제임! pop을 하니까 마지막 4,3,2 에서 2때문에 pop을 두번해서 3까지 날려버림..
# count += 1
# if len(k)==0:
# break
# print(count)
| [
"noreply@github.com"
] | Green0v0.noreply@github.com |
442049f9a967f6262fc2c1a0afc3ec8b8dbce63d | d02e279c61c111d250812946f299828330947ed6 | /easy/remove duplicates from sorted array.py | a530a8fb95464ecd776b7a09d7109cf33a851c59 | [] | no_license | wlawt/leetcode | b1599528e027bd8bfd2581f3bc56bb3680118c4b | c00fdce2f5f1ed1acc15f74f98c99b7139fedb50 | refs/heads/master | 2023-02-20T05:42:34.307169 | 2021-01-22T01:48:30 | 2021-01-22T01:48:30 | 326,090,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
i = 0
for j, num in enumerate(nums):
if nums[i] == nums[j]:
continue
i += 1
nums[i] = nums[j]
return i+1 | [
"williamlaw.wtl@gmail.com"
] | williamlaw.wtl@gmail.com |
3b603565c919ef6876ce0e0fd38176333bf18b69 | 78bb5eb64ec5f21b7beb68b6e89ea45f999c6687 | /leetoj/191.py | b2ae838d4394dabe5ae235f4febae8734fd969b8 | [
"MIT"
] | permissive | R11happy/misc | f003180e5c4b4389765f17f12d228edef7b249b3 | 1f642e9ee6bdf3e553402b522745c4f4ef2cd922 | refs/heads/master | 2021-01-23T08:20:58.980636 | 2016-03-30T15:06:29 | 2016-03-30T15:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # Number of 1 Bits
# https://leetcode.com/problems/number-of-1-bits/
class Solution:
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
cnt = 0
while n:
cnt += n&1
n >>= 1
return cnt
# print Solution().hammingWeight(11)
| [
"jayson.hjs@gmail.com"
] | jayson.hjs@gmail.com |
83f7bc9d5180996418febbc8a0f8a83a4fbb3d89 | b88435c95597d165d7634f9cebe83bcd787bcce6 | /task.py | 9c0909bf5e812834e1bd1f87c40dbb4ba509fbc6 | [] | no_license | 44Schwarz/exit-nodes-analysis | d40cd4e64abd89117ec876cfdb04f262975fea93 | 3d6eb9d3d96748446f7841fabf69f258d151841b | refs/heads/master | 2020-08-04T16:04:56.555620 | 2019-10-02T08:58:03 | 2019-10-02T08:58:03 | 212,196,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,468 | py | #!/usr/bin/env python3
import os
import json
import lzma
import tarfile
import urllib.request
from urllib.error import URLError
import pandas as pd
url_path = 'https://collector.torproject.org/archive/exit-lists/'
path_to_extract = '/tmp/exit-nodes'
files = ['exit-list-2019-07.tar.xz', 'exit-list-2019-08.tar.xz']
months = ('July', 'August', 'Both')
def unpack_archive(file):
try:
with lzma.open(file) as f:
with tarfile.open(fileobj=f) as tar:
tar.extractall(path_to_extract)
except FileNotFoundError:
pass
def read_file(file):
lines = list()
try:
with open(file) as f:
next(f)
next(f) # skip 2 header lines
for line in f:
title, data = line.strip().split(' ', 1)
if title == 'ExitAddress':
lines.append(data.split(' ', 1)) # split into IP address and date
except FileNotFoundError:
pass
return lines
def parse_files(list_of_files):
nodes_list = list()
res = dict()
for file in list_of_files:
lines = read_file(file)
nodes_list.extend(lines)
titles = ('ExitAddress', 'AddressDate')
df = pd.DataFrame(nodes_list, columns=titles)
if df.empty:
return res
gr = df.groupby(['ExitAddress']).size().reset_index(name='Count')
ip = gr.sort_values(by='Count', ascending=False)['ExitAddress'].iloc[0] # most frequent IP address
filtered_by_ip = df.loc[df['ExitAddress'] == ip].sort_values(by='AddressDate')
date_first = filtered_by_ip['AddressDate'].iloc[0] # first seen
date_last = filtered_by_ip['AddressDate'].iloc[-1] # last seen
unique_ips = df['ExitAddress'].unique().tolist() # for calculating difference
res['ip'] = ip
res['first_seen'] = date_first
res['last_seen'] = date_last
res['unique_ips'] = unique_ips
return res
def get_all_files():
results = list()
list_of_files = list()
for file in files:
list_of_files.append([])
dest_dir = file.split('.', 1)[0] # archive name without extension
for (dirpath, dirnames, filenames) in os.walk(os.path.join(path_to_extract, dest_dir)):
for filename in filenames:
list_of_files[-1].append(os.path.join(path_to_extract, dest_dir, dirpath, filename)) # full file path
for files_for_month in list_of_files:
parsing = parse_files(files_for_month) # calculate separate results for months
if parsing:
results.append(parsing)
if len(results) == len(files): # if results exist for both months
first_ips, second_ips = [results[i].get('unique_ips') for i in (0, 1)]
results[0]['unique_ips'] = difference(first_ips, second_ips)
results[1]['unique_ips'] = difference(second_ips, first_ips)
if results[0]['ip'] == results[1]['ip']: # both months have the same most frequent IP
# Therefore take first_seen from the 1st month, last_seen from the 2nd month
results.append({'ip': results[0]['ip'], 'first_seen': results[0]['first_seen'],
'last_seen': results[1]['last_seen'], 'unique_ips': []})
else:
# Calculate results for both months together
parsing = parse_files(sum(list_of_files, []))
if parsing:
results.append(parsing)
results[-1]['unique_ips'] = []
write_result(results)
def write_result(results):
data = dict()
if len(results) == len(months):
for i, month in enumerate(months):
data[month] = results[i]
json_data = json.dumps(data)
with open('results.json', 'w') as f:
f.write(json_data)
def difference(first, second): # find difference first - second
second = set(second)
return [el for el in first if el not in second]
if __name__ == '__main__':
try:
os.makedirs(path_to_extract)
except FileExistsError:
pass
if not os.path.isdir(path_to_extract):
print(f"{path_to_extract} is not a directory")
exit()
for archive in files:
try:
urllib.request.urlretrieve(os.path.join(url_path, archive), os.path.join(path_to_extract, archive))
except URLError: # proceed even if file wasn't downloaded
print(f"Error while retrieving a file {archive}")
unpack_archive(os.path.join(path_to_extract, archive))
get_all_files()
| [
"sirmitya@gmail.com"
] | sirmitya@gmail.com |
b1cd7818425684f94cdc1bae824fd9ee13392995 | 1c56115ba312563fd2eba6f410c9ca8a23b796cf | /standard/bin/wheel | 4d1f9281f9fadf5c9fab3264444f728aab28cf31 | [] | no_license | nahkranoth/WebGL-Particles | caa19b76f1793f834705bac988289070f1ad738a | 01128ed04696755d6592b2aba11bf7c4bdc6ab87 | refs/heads/master | 2021-05-08T18:52:10.900788 | 2018-01-30T13:37:03 | 2018-01-30T13:37:03 | 119,540,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/joey/Documents/Misc/JavascriptTestbed/standard/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joey@squla.com"
] | joey@squla.com | |
00b204dd1c59a7f8d99f85a898d26452b44fb647 | 0cbd245ba67ada0dd04e8a61471b2bc2bbacdc47 | /App09_RealEstate_DataMiner/app9.py | 8c7df6853b363cbc8fa964ed55f68f41a46db523 | [] | no_license | ptsouth97/pythonapps | 7ed0a121f35669d0bb177d88ef9aa09828bea813 | ee239a02c553fb9d2672f50a4b4c49b4ea4396f0 | refs/heads/master | 2021-01-12T04:31:31.687181 | 2017-02-05T21:07:42 | 2017-02-05T21:07:42 | 77,632,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import os
import csv
from data_types import Purchase
import statistics
def main():
print_header()
filename = get_data_file()
# print(filename)
data = load_file(filename)
query_data(data)
def print_header():
print('------------------------')
print(' Real Estate App')
print('------------------------')
print()
def get_data_file():
base_folder = os.path.dirname(__file__)
return os.path.join(base_folder, 'data', 'SacramentoRealEstateTransactions2008.csv')
def load_file(filename):
with open(filename, 'r', encoding='utf-8') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
# print(type(row), row)
# print("Bed count: {}".format(row['beds']))
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
# print(purchases[0].__dict__)
# header = fin.readline().strip()
# reader = csv.reader(fin, delimiter=',')
# for row in reader:
# print(row)
# beds = row[4]
# def load_file_basic(filename):
# with open(filename, 'r', encoding='utf-8') as fin:
# header = fin.readline().strip()
# print('found header: ' + header)
#
# lines = []
# for line in fin:
# line_data = line.strip().split(',')
# lines.append(line_data)
#
# print(lines[:5])
# def get_price(p):
# return p.price
def query_data(data):
# if data was sorted by price:
# data.sort(key=get_price)
data.sort(key= lambda p: p.price)
# most expensive house
high_purchase = data[-1]
print("The most expensive house is ${:,} with {} beds and {} baths".format(high_purchase.price, high_purchase.beds, high_purchase.baths))
# least expensive house
low_purchase = data[0]
print("The least expensive house is ${:,} with {} beds and {} baths".format(low_purchase.price, low_purchase.beds, low_purchase.baths))
# average price house
# average price of 2 bedroom homes
# prices = []
# for pur in data:
# prices.append(pur.price)
# LIST COMPREHENSIONS
prices = [
p.price # projection or items
for p in data # the set to process
]
ave_price = statistics.mean(prices)
print("The average home price is ${:,}".format(int(ave_price)))
two_bed_homes = [
p
for p in data # the set to process
if p.beds == 2 # test condition
]
ave_price = statistics.mean([p.price for p in two_bed_homes])
ave_baths = statistics.mean([p.baths for p in two_bed_homes])
ave_sqft = statistics.mean([p.sq__ft for p in two_bed_homes])
print("The average price of a 2-bedroom home is ${:,}, baths={}, sq ft={:,}".format(int(ave_price), round(ave_baths, 1), round( ave_sqft, 1)))
if __name__ == '__main__':
main()
| [
"ptsouth97@gmail.com"
] | ptsouth97@gmail.com |
48b47a669edfd7614332e5b5562190fc0cd87264 | 1ae64c00be74b1e66f84d59e8f4ebeb7e2720808 | /Tests/test_HomePage.py | d09e519b0753cfe0945903a019555a0314cc1c35 | [] | no_license | 1234ronit/100ms | b48888001d51f903a5f19b720b8809f36dfb3e5b | 1fce60c8c94c6f17e119866f722529d5e4a0c9ca | refs/heads/main | 2023-08-21T03:00:58.802166 | 2021-10-11T12:56:53 | 2021-10-11T12:56:53 | 415,644,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | from Config.config import TestData
from Pages.HomePage import HomePage
from Pages.LoginPage import LoginPage
from Tests.test_base import BaseTest
class Test_Home(BaseTest):
def test_home_page_video_conf(self):
self.loginPage = LoginPage(self.driver)
self.loginPage.do_login(TestData.USER_NAME, TestData.PASSWORD)
self.homePage = HomePage(self.driver)
self.homePage.do_start_video_conf()
"""
def test_home_page_title(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login(TestData.USER_NAME, TestData.PASSWORD)
title = homePage.get_home_page_title(TestData.HOME_PAGE_TITLE)
assert title == TestData.HOME_PAGE_TITLE
def test_home_page_account(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login(TestData.USER_NAME, TestData.PASSWORD)
account = homePage.get_account_name_value()
assert account == TestData.HOME_PAGE_ACCOUNT
def test_home_page_header(self):
self.loginPage = LoginPage(self.driver)
homePage = self.loginPage.do_login(TestData.USER_NAME, TestData.PASSWORD)
header = homePage.get_header_value()
assert header == TestData.HOME_PAGE_HEADER
"""
| [
"36254743+1234ronit@users.noreply.github.com"
] | 36254743+1234ronit@users.noreply.github.com |
f71aa4a95d21c4966d26433be321c6323c914553 | 1a08f936e48693fab3148101e8457a5b767d0f68 | /mod_rgb/change_mod_rgb_address.py | d0d2c15ba36e0eb73ad699320fe93819444c2b65 | [
"MIT"
] | permissive | rob-smallshire/mod-rgb | 287741aedd9a605549322d71a3b9261a2c1cea92 | f08aaed966fddab009254a287f8c0a6097b7ae5c | refs/heads/master | 2016-09-06T10:50:21.247994 | 2015-02-24T10:12:34 | 2015-02-24T10:12:34 | 31,170,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,323 | py | #!/usr/bin/env python3
"""Change MOD-RGB address.
Usage:
change-mod-rgb-address.py <old-address> <new-address>
[--device=<dev-num>] [(-q | --quiet)] [(-n | --no-action)]
change-mod-rgb-address.py -h | --help
Options:
--device=<dev-num> Device number [default: 1].
-q --quiet Do not prompt for jumper changes.
-n --no-action Omit reprogramming command.
-h --help Show this screen.
"""
import os
import sys
from docopt import docopt, DocoptExit
from smbus import SMBus
from mod_rgb.control import ensure_mod_rgb_device, change_address
def main(args=None):
if args is None:
args = docopt(__doc__, version='Change MOD-RGB address 1.0')
try:
old_address = int(args['<old-address>'], base=0)
new_address = int(args['<new-address>'], base=0)
except ValueError:
print("Address must be an integer", file=sys.stderr)
print(DocoptExit.usage, file=sys.stderr)
return os.EX_USAGE
try:
device_num = int(args['--device'], base=0)
except ValueError:
print("Device number must be an integer", file=sys.stderr)
print(DocoptExit.usage, file=sys.stderr)
return os.EX_USAGE
prompt = not args['--quiet']
act = not args['--no-action']
try:
bus = SMBus(device_num)
except OverflowError as e:
print("Could not attach to I2C/SMBus. Device number {!r} is invalid".format(device_num), file=sys.stderr)
return os.EX_DATAERR
except IOError as e:
print("Could not attach to I2C/SMBus. I/O error:", e, file=sys.stderr)
return os.EX_IOERR
try:
ensure_mod_rgb_device(bus, old_address)
except CompatibilityError as e:
print(e, file=sys.stderr)
return os.EX_UNAVAILABLE
except AddressRangeError as e:
print(e, file=sys.stderr)
return os.EX_USAGE
except IOError as e:
print("I/O error:", e, file=sys.stderr)
print("Incorrect <old-address> {} ({})?"
.format(old_address, hex(old_address)), file=sys.stderr)
return os.EX_IOERR
if prompt:
print("The DMX_EN jumper on device {} ({}) must be closed"
.format(old_address, hex(old_address)))
print("Press RETURN to continue.")
input()
try:
change_address(bus, old_address, new_address, act)
except AddressRangeError as e:
print(e, file=sys.stderr)
return os.EX_USAGE
except IOError as e:
print("I/O error:", e, file=sys.stderr)
return os.EX_IOERR
if prompt:
print("Now open the DMX_EN jumper on device {} ({}) to "
"avoid inadvertently changing it."
.format(new_address, hex(new_address)))
print("Press RETURN to continue.")
input()
try:
ensure_mod_rgb_device(bus, new_address)
except CompatibilityError as e:
print("Reprogramming FAILED!", file=sys.stderr)
print(e, file=sys.stderr)
return os.EX_UNAVAILABLE
except IOError as e:
print("I/O error on device at <new-address> {} ({}): {}"
.format(new_address, hex(new_address), e), file=sys.stderr)
return os.EX_IOERR
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
| [
"robert@smallshire.org.uk"
] | robert@smallshire.org.uk |
e2223056d56f87835517a312220bed8860ed627b | 4000b76810c173e1dc9403e6e98176e676ae50a5 | /data_visualize.py | e74467515007d04cd6e6f510c1f975cfa17ca366 | [] | no_license | barrelo89/Gait-IEEE-Access | 93707d82af6b1940d6d06598a007400966169455 | 5849850f43292386962a94bd287d6235b79312b9 | refs/heads/master | 2022-04-02T02:07:34.804735 | 2020-01-08T06:06:02 | 2020-01-08T06:06:02 | 79,977,850 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | import os
import pandas as pd
import matplotlib.pyplot as plt
path = "processed_data/"
activity_name = ['walk']#['open', 'run', 'type', 'walk']
col = ['accx','accy','accz','gyrox','gyroy','gyroz']#, 'HR'
img_save_path = 'img'
type_path = activity_name[0]
target_path = os.path.join(img_save_path, type_path)
if not os.path.exists(target_path):
os.makedirs(target_path)
for activity in activity_name:
print(activity)
folder_path = os.path.join(path, activity)
file_list = os.listdir(folder_path)
for file_name in file_list:
print(file_name)
file_path = os.path.join(folder_path, file_name)
data = pd.read_csv(file_path, names=col, delimiter=',').values[2:, :]
data = data.astype(float)
#accx = data[:, 0]
#accy = data[:, 1]
#accz = data[:, 2]
#gyrox = data[:, 3]
#gyroy = data[:, 4]
#gyroz = data[:, 5]
#heart_rate = data[:, 6]
figure, axes = plt.subplots(6, sharex = True)
data_name = ['Acc X', 'Acc Y', 'Acc Z', 'Gyro X', 'Gyro Y', 'Gyro Z']#, 'Heart Rate'
for idx, ax in enumerate(axes):
ax.plot(data[500:20000, idx])#[500:5000, idx]
ax.set_title(data_name[idx])
ax.set_yticks([int(data[:, idx].min()), int(data[:, idx].max())])
ax.axvline(100, color = 'k', linestyle = 'dotted', label = 'Period')
ax.axvline(200, color = 'k', linestyle = 'dotted')
ax.axvline(300, color = 'k', linestyle = 'dotted')
ax.axvline(400, color = 'k', linestyle = 'dotted')
ax.axvline(500, color = 'k', linestyle = 'dotted')
ax.axvline(600, color = 'k', linestyle = 'dotted')
ax.axvline(700, color = 'k', linestyle = 'dotted')
ax.axvline(800, color = 'k', linestyle = 'dotted')
plt.tight_layout()
plt.savefig(os.path.join(target_path, file_name.split('.')[0] + '.pdf'))
plt.close()
#end
| [
"noreply@github.com"
] | barrelo89.noreply@github.com |
c667b5bd38756b36006f7fa9499470cf8b2d0666 | 7aed4db02c3d70e2a73ebc2cd8b96f6130e1467d | /news/admin.py | ec69aa0ea85f5b9a826bb986b1b550f18e53cbd5 | [] | no_license | xuqidog/minicms | 9fde77f83719f355ff2f39b974f85a7313098a74 | 7e52466e399a23399defd541770e0fbbb3c23d57 | refs/heads/master | 2020-04-06T03:55:02.658771 | 2017-03-04T08:32:34 | 2017-03-04T08:32:34 | 83,104,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | from django.contrib import admin
from .models import Column, Article
# Register your models here.
class ColumnAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'intro', 'nav_display', 'home_display')
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'author', 'pub_date', 'update_time')
admin.site.register(Column, ColumnAdmin)
admin.site.register(Article, ArticleAdmin) | [
"xuqidong@xuqidong.local"
] | xuqidong@xuqidong.local |
e4bf0122a1873ff6d44339010924a8840544533c | aa2103feadc4e12098aee10d98eed1881b128b00 | /Exact/linearProgram.py | 6c166f7d3cb011be17d54c167cbd166fb98fd367 | [] | no_license | NicoleKappelhof/Bachelor-thesis | 9ed384d1337477e0318ec0eafdad32c6bda419de | 23d5f6af00e0fdccab652ec91e31a5d07c0f39d0 | refs/heads/master | 2021-01-17T00:42:15.954032 | 2016-07-10T15:25:59 | 2016-07-10T15:25:59 | 63,002,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,955 | py | """
parameters:
ImproveStartTime or ImproveStartGap can be set to a specific value. If this value is reached,
the MIP will no longer continue to prove optimality but rather improve the bound
MIPFocus, set to 1 for focus on feasible solution, set to 2 for focus on optimal solution,
set to 3 for improving the bound
MIPGap, the solver will terminate when the relative gap between the lower and upper
objective bound is less than MIPGap times the upper bound. Default is 1e-4, maximum is 1.
Method, set to 0 for primal simplex, 1 for dual simplex, 2 for the parallel barrier and
3 for the concurrent (only for root node)
NodeMethod, set to 0 for primal simplex, 1 for dual simplex (default), 2 for the parallel barrier and
3 for the concurrent (only for non-root nodes)
NodefileStart, if the amount of memory in GB used to store nodes exceeds the specified
parameter value, nodes are written to disk (a setting of 0.5 is recommended)
Sifting, if you have many more variables than restrictions (100 to 1 or more). The sifting parameter
is also set by Gurobi according to this ratio, so you prob wont have to touch it
Thread, each thread in parallel MIP requires a copy of the model. Reducing the
Threads parameter can sometimes significantly reduce memory usage. By default, concurrent and barrier
will use all available cores in your machine.
Heuristics, controls the fraction of runtime spent on feasibility heuristics. Increasing the parameter can
lead to more and better feasible solutions, but it will also reduce the rate of progress in the best bound.
Default is 0.05, maximum is 1.
Cuts, (finer grain: FlowCoverCuts, MIRCuts) can be set to Aggressive (2), Conservative (1), Automatic (-1),
or None (0). The more specific parameters override the more general.
Very easy models can sometimes benefit from turning cuts off, while extremely difficult models can benefit
from turning them to their Aggressive setting.
Presolve, the aggressiveness level of presolve. Options are Aggressive (2), Conservative (1), Automatic (-1),
or None (0). More aggressive application of presolve takes more time, but can sometimes lead to a significantly
tighter model.
PreDepRow, the presolve dependent row reduction, which eliminates linearly dependent constraints from the constraint
matrix. The default setting (-1) applies the reduction to continuous models but not to MIP models. Setting 0 turns
the reduction off for all models. Setting 1 turns it on for all models.
Aggregate, controls whether presolve performs constraint aggregation, 1 is default (and maximum), 0 is minimum
SubMIPNodes, maximum number of nodes explored by the RINS heuristic. Exploring more nodes can produce
better solutions, but it generally takes longer. Range 0-MAXINT, 500 is default.
TuneTimeLimit, maximal total tuning runtime of tuning (in seconds). The default setting (-1) chooses a time limit automatically.
"""
from gurobipy import *
import itertools
from ThreeOrMore import Distances
#N=[[1,1,1],[2,7,1],[3,1,1],[4,5,1],[5,9,1],[6,9,1],[7,9,1],[8,9,1]]
#distanceObject = Distances.distances(N)
def lpK_sets(N, distanceO,D,K):
"""
Set LP
"""
# Create a new model
m = Model("lpK_sets")
# Create variables of combinations of nodes
combinations=[]
removedCombinations=[]
for i in range(1,K+1):
combinations += list(itertools.combinations(N, i))
variables=[]
for i,combo in enumerate(combinations):
d, tour=distanceO.minimumTourDistanceWithCheck(list(combo),D)
if d is False and tour is None:
removedCombinations.append(combo)
continue
comboints=[node[0] for node in tour]
variablename = 'combo' + '-'.join(map(str,comboints))
variables.append(m.addVar(vtype=GRB.BINARY,obj=d, name=variablename))
# Integrate new variables
m.modelSense = GRB.MINIMIZE
m.update()
# Every node needs to be serviced once and only once
combinations = [combo for combo in combinations if combo not in removedCombinations]
for node in N:
containingNode = [i for i,combo in enumerate(combinations) if node in combo]
m.addConstr(quicksum(variables[i] for i in containingNode) == 1,"service constraint %s" %node[0])
m.params.tuneResults = 1
m.params.TuneTimeLimit=max(2*(len(N)-20),0)
m.tune()
if m.tuneResultCount > 0:
# Load the best tuned parameters into the model
m.getTuneResult(0)
m.params.NodefileStart = 0.8
#m.params.LogToConsole=1
m.optimize()
m.printAttr('X')
print('Obj: %g' % m.objVal)
distance = 0
tours=[]
for v in m.getVars():
if v.x==0:
continue
assert v.varName.startswith('combo')
nodes = v.varName.split('combo')[1]
tour=[]
for node in nodes.split('-'):
tour.append(N[int(node)-1])
d=distanceO.getDistanceTour(tour,D)
distance+=d
tours.append(tour)
assert distance >= m.objVal-0.01 and distance <= m.objVal + 0.01, ' %s, %s' %(distance, m.objVal)
solution = [D] +tours +[distance]
return solution
def lpK_edges(N, distanceO,D,K):
"""
The obvious LP
"""
# Create a new model
m = Model("lpK_edges")
# Create variables of edges between nodes
variables=[]
for node1 in N:
variables.append([])
i=node1[0]
for node2 in [node for node in N if node[0]>i]:
j=node2[0]
variable = 'x' + str(node1[0])+'-'+str(node2[0])
variables[i-1].append(m.addVar(vtype=GRB.BINARY,obj=distanceO.EuclideanNode(node1,node2), name=variable))
# Create variables of edges between depots and nodes
toDepotVariables=[]
fromDepotVariables=[]
for node in N:
i=node[0]-1
toDepotVariables.append([])
fromDepotVariables.append([])
depotString, d = distanceO.closestDepotToString(D[0],D[1],node)
variable = depotString + 'n' + str(node[0])
fromDepotVariables[i].append(m.addVar(vtype=GRB.BINARY,obj=d, name=variable))
variable = 'n' +str(node[0])+depotString
toDepotVariables[i].append(m.addVar(vtype=GRB.BINARY,obj=d, name=variable))
# Integrate new variables
m.modelSense = GRB.MINIMIZE
m.update()
# Add in-out constraints nodes
for i in range(0,len(N)):
iVariables=variables[i] + [variables[k][i-k-1] for k in range(0,i)]
m.addConstr(sum(iVariables) + quicksum(fromDepotVariables[i])+ quicksum(toDepotVariables[i])== 2,"in-out %s" % str(i))
# Add tour constraints
combinations = list(itertools.combinations(range(0,len(N)),K))
for combo in combinations:
tourConstraints(m,combo,variables,K,toDepotVariables,fromDepotVariables)
m.params.tuneResults = 1
m.params.TuneTimeLimit=3*len(N)
m.params.LogToConsole=0
m.tune()
#
if m.tuneResultCount > 0:
# Load the best tuned parameters into the model
m.getTuneResult(0)
m.params.MIPGap=0.05
m.optimize()
m.printAttr('X')
edges=[]
for v in m.getVars():
if v.x==0:
continue
if v.varName.startswith('x'):
nodes = v.varName.split('x')[1]
nodes=nodes.split('-')
assert len(nodes)==2
edges.append([nodes[0],nodes[1]])
elif v.varName.startswith('depot'):
e = v.varName.split('depot')[1]
node = e.split('n')[1]
depot = e.split('n')[0]
s = 'depot' + str(depot)
edges.append([node, s])
else:
assert v.varName.startswith('n')
e = v.varName.split('n')[1]
depot = e.split('depot')[1]
node = e.split('depot')[0]
s = 'depot' + str(depot)
edges.append([node, s])
print('Obj: %g' % m.objVal)
solution = [D] + edges + [m.objVal]
return solution
def lpK_flow(N, distanceO,D,K):
"""
The flow LP
"""
# Create a new model
m = Model("lpK_flow")
# Create variables for edges between nodes and depots
fromDepot = []
toDepot=[]
for node in N:
i = node[0]-1
depotString, d = distanceO.closestDepotToString(D[0],D[1], node)
variable = depotString+'-'+str(node[0])
fromDepot.append([])
fromDepot[i].append(m.addVar(vtype=GRB.BINARY,obj=d, name=variable))
variable = str(node[0])+ '-' + depotString
toDepot.append([])
toDepot[i].append(m.addVar(vtype=GRB.BINARY,obj=d, name=variable))
# Create variables for edges between nodes and nodes
levels=[0]*K
for i in range(1,K):
levels[i-1] = levelK(N,distanceO,m,D, i)
levelLast=[]
for node in N:
i=node[0]-1
levelLast.append([])
variable = 'lvl'+str(K)+ '-' + 'ending' '-' + str(node[0])
levelLast[i].append(m.addVar(vtype=GRB.BINARY,obj=0, name=variable))
levels[K-1] = levelLast
# Create variables for edges between nodes and service nodes
serviced=[0]*K
for i in range(1,K+1):
serviced[i-1]= servicedK(N,distanceO,m,i)
# Integrate new variables
m.modelSense = GRB.MINIMIZE
m.update()
#in-out constraints
for node in N:
i=node[0]-1
for j in range(0,K):
level = levels[j]
m.addConstr(quicksum(level[i])<= 1, 'out level %s- %s'%(j, str(i+1)))
m.addConstr(2*fromDepot[i][0]-(quicksum(levels[0][i]))-quicksum(serviced[0][i])==0, 'in-out level 1- %s'%str(i+1))
for k in range(1,K):
m.addConstr(2*quicksum(levels[k-1][j][i] for j in range(0,len(N)))-quicksum(levels[k][i])-serviced[k][i][0] - levels[k-1][i][i]==0, 'in-out level %s-%s'%(k,str(i+1)))
m.addConstr(quicksum(levels[-1][i])-toDepot[i][0]==0, 'to depot-%s'%str(i+1))
# Service constraints
for node in N:
i=node[0]-1
m.addConstr(quicksum(serviced[k][i][0] for k in range(0,K))==1, 'service Constraint- %s'%str(i))
m.params.tuneResults = 1
m.params.TuneTimeLimit=max(2*(len(N)-20),0)
m.tune()
if m.tuneResultCount > 0:
# Load the best tuned parameters into the model
m.getTuneResult(0)
m.params.LogToConsole=1
m.optimize()
m.display()
m.printAttr('X')
edges=[]
for v in m.getVars():
if v.x==0 or v.varName.startswith('serviced'):
continue
if v.varName.startswith('lvl'):
nodes = v.varName.split('-')[1:]
if nodes[0]=='ending':
continue
assert len(nodes)==2
edges.append([nodes[0],nodes[1]])
elif v.varName.startswith('depot'):
e = v.varName.split('depot')[1]
node = e.split('-')[1]
depot = e.split('-')[0]
s = 'depot' + str(depot)
edges.append([node, s])
else:
e = v.varName.split('-')
assert e[1].startswith('depot'), '%s' %e
node = e[0]
depot = e[1].split('depot')[1]
s = 'depot' + str(depot)
edges.append([node, s])
print('Obj: %g' % m.objVal)
solution = [D] + edges + [m.objVal]
return solution
def levelK(N,distanceO,m,D, level):
levelK=[]
for node1 in N:
i=node1[0]-1
levelK.append([])
for node2 in N:
d=distanceO.EuclideanNodeWithCheck(node1,node2,D)
if d <0:
levelK[i].append(0)
continue
variable = 'lvl' +str(level) + '-' + str(node1[0])+ '-' + str(node2[0])
levelK[i].append(m.addVar(vtype=GRB.BINARY,obj=d, name=variable))
return levelK
def servicedK(N,distanceO,m,level):
servicedK=[]
for node in N:
i=node[0]-1
servicedK.append([])
variable = 'serviced'+str(level)+ '-' + str(node[0])
servicedK[i].append(m.addVar(vtype=GRB.BINARY,obj=0, name=variable))
return servicedK
def singleTourConstraint(m,combination,variables,K,toDepotVariables,fromDepotVariables):
constraints=[]
variableList = list(combination)
depotTours=[]
strartNode=variableList[0]
endNode=variableList[-1]
depotTours.append(quicksum(fromDepotVariables[strartNode]))
depotTours.append(quicksum(toDepotVariables[endNode]))
for i in range(1, len(variableList)):
j = variableList[i-1]
k=variableList[i]
if k < j:
constraints.append(variables[k][j-k-1])
else:
constraints.append(variables[j][k-j-1])
tourString = '-'.join(map(str,[variable+1 for variable in variableList]))
m.addConstr(2*quicksum(constraints) - (quicksum(depotTours))<= 2*(K-2),"New tour constraint %s" %(tourString))
return
def singleTourConstraint2(m,combination,variables,K,depotVariablesIn,depotVariablesOut):
constraints=[]
variableList = list(combination)
strartNode=variableList[0]
endNode=variableList[-1]
depotTours=[]
depotTours += depotVariablesOut[strartNode-1]
depotTours+= depotVariablesIn[endNode-1]
for i in range(1, len(variableList)):
j = variableList[i-1]
k=variableList[i]
constraints.append(variables[j-1][k-1])
tourString = '-'.join(map(str,[variable+1 for variable in variableList]))
m.addConstr(sum(constraints) - (sum(depotTours))<= K -3,"New tour constraint %s" %(tourString))
return
def tourConstraints(m,combination,variables,K,toDepotVariables,fromDepotVariables):
variableList = list(combination)
permutations = list(itertools.permutations(variableList, len(variableList)))
tours=[list(permutations[0])]
reversedTours = [list(reversed(tours[0]))]
for p in permutations[1:]:
p=list(p)
if p in reversedTours:
continue
tours.append(p)
reversedTours.append(list(reversed(p)))
for tour in tours:
singleTourConstraint(m,tour,variables,K,toDepotVariables,fromDepotVariables)
return
def tourConstraints2(m,combination,variables,K,depotVariablesIn,depotVariablesOut):
variableList = list(combination)
permutations = list(itertools.permutations(variableList, len(variableList)))
tours=[list(permutations[0])]
for p in permutations[1:]:
p=list(p)
tours.append(p)
for tour in tours:
singleTourConstraint2(m,tour,variables,K,depotVariablesIn,depotVariablesOut)
return
def variblesOfEdgesToDepots(set1, toDepotVariables,fromDepotVariables):
variables=[]
for i in set1:
variables.append(toDepotVariables[i][0])
variables.append(toDepotVariables[i][1])
variables.append(fromDepotVariables[0][i])
variables.append(fromDepotVariables[1][i])
return variables
def lpK_DoubleEdges(N, distanceO,D,K):
"""
Variant of the Obvious LP where the edges are directed
"""
# Create a new model
m = Model("lpK_DoubleEdges")
# Create variables of edges between nodes
variables=[]
for node1 in N:
variables.append([])
i=node1[0]
for node2 in N:
j=node2[0]
variable = 'x' + str(node1[0])+'-'+str(node2[0])
variables[i-1].append(m.addVar(vtype=GRB.BINARY,obj=distanceO.EuclideanNode(node1,node2), name=variable))
# Create variables of edges between depots and nodes
depotVariablesOut=[]
for node1 in N:
depotVariablesOut.append([])
i=node1[0]
for j,depot in enumerate(D):
variable = 'd' + str(j+1)+'n'+str(node1[0])
depotVariablesOut[i-1].append(m.addVar(vtype=GRB.BINARY,obj=distanceO.Euclidean(node1[1],node1[2],depot,0), name=variable))
# Create variables of edges between depots and nodes
depotVariablesIn=[]
for node1 in N:
depotVariablesIn.append([])
i=node1[0]
for j,depot in enumerate(D):
variable = 'n' +str(node1[0])+'d'+ str(j+1)
depotVariablesIn[i-1].append(m.addVar(vtype=GRB.BINARY,obj=distanceO.Euclidean(node1[1],node1[2],depot,0), name=variable))
# Integrate new variables
m.modelSense = GRB.MINIMIZE
m.update()
combinations = list(itertools.combinations(range(0,len(N)),K))
for combo in combinations:
tourConstraints2(m,combo,variables,K,depotVariablesIn,depotVariablesOut)
# Add in-out constraints nodes
for i in range(0,len(N)):
m.addConstr(variables[i][i] == 0,"out %s" % str(i))
for i in range(0,len(N)):
m.addConstr(sum(variables[i]) + sum(depotVariablesIn[i]) == 1,"out %s" % str(i))
for i in range(0,len(N)):
inEdges = [variables[j][i] for j in range(0,len(N))]
m.addConstr(sum(inEdges)+ sum(depotVariablesOut[i]) == 1,"in %s" % str(i))
m.optimize()
m.display()
m.printAttr('X')
edges=[]
for v in m.getVars():
if v.x==0:
continue
if v.varName.startswith('x'):
nodes = v.varName.split('x')[1]
nodes=nodes.split('-')
assert len(nodes)==2
edges.append([nodes[0],nodes[1]])
elif v.varName.startswith('d'):
e = v.varName.split('d')[1]
node = e.split('n')[1]
depot = e.split('n')[0]
s = 'depot' + str(depot)
edges.append([node, s])
else:
assert v.varName.startswith('n')
e = v.varName.split('n')[1]
depot = e.split('d')[1]
node = e.split('d')[0]
s = 'depot' + str(depot)
edges.append([node, s])
solution = [D] + edges + [m.objVal]
return solution
def doLP(N, distanceObject, modelname,D,K):
functions=globals().copy()
method = functions.get(modelname)
if not method:
raise Exception('Method %s not found' %modelname)
solution =method(N,distanceObject,D,K)
return solution
def bestPossibleTrips(N,K,distanceO):
"""
LP to find the cheapest way to construct paths with at most K nodes from N,
in such a way that every node from N is in one path
"""
# Create a new model
m = Model("bestPossibleTrips")
# Create variables of combinations of nodes
combinations=[]
removedCombinations=[]
for i in range(1,K+1):
combinations += list(itertools.combinations(N, i))
variables=[]
for i,combo in enumerate(combinations):
d, tour=distanceO.minimumTourDistanceNoDepots(combo)
comboints=[node[0] for node in tour]
variablename = 'combo' + '-'.join(map(str,comboints))
variables.append(m.addVar(vtype=GRB.BINARY,obj=d, name=variablename))
# Integrate new variables
m.modelSense = GRB.MINIMIZE
m.update()
# Every node needs to be serviced once and only once
combinations = [combo for combo in combinations if combo not in removedCombinations]
for node in N:
containingNode = [i for i,combo in enumerate(combinations) if node in combo]
m.addConstr(quicksum(variables[i] for i in containingNode) == 1,"service constraint %s" %node[0])
# Maximum length of tours
combinations = [combo for combo in combinations if combo not in removedCombinations]
oldNum=0
for i in range(0,K):
length = float(K-i)
numTours = int((len(N)-oldNum)/length)
containingNode = [i for i,combo in enumerate(combinations) if len(combo)==length]
m.addConstr(quicksum(variables[i] for i in containingNode)>= numTours,"maximumTour length %s" %length)
oldNum += numTours*length
m.optimize()
m.display()
m.printAttr('X')
print('Obj: %g' % m.objVal)
edges=[]
for v in m.getVars():
if v.x==0:
continue
assert v.varName.startswith('combo')
nodes = v.varName.split('combo')[1]
nodes=nodes.split('-')
for i in range(1,len(nodes)):
edges.append([nodes[i-1],nodes[i]])
solution = [[-1,-1]] +edges +[m.objVal]
return solution
| [
"noreply@github.com"
] | NicoleKappelhof.noreply@github.com |
4273284926f575718667dff4ea9f1afc83b9c5d6 | d609ea951b45911a8e9afd904d58273d50f05a3e | /cmds/scheduler.py | a6bde4ea60c82fbc95faff3e7681c59cce32221b | [
"MIT"
] | permissive | lochungtin/Sonority | bdf077a9c2d666acd1c25bb4859391e440a51f61 | 104edc9da4de804a46b97b5db571236274888fbe | refs/heads/main | 2023-04-08T21:44:09.987998 | 2021-04-26T10:54:56 | 2021-04-26T10:54:56 | 361,232,778 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | import asyncio
import discord
from discord.ext import commands
from utils.cogext import CogExt, setBot
from utils.logger import Logger
timeUnitMlp = [1, 60, 3600, 86400]
class Scheduler(CogExt):
# test ping command
@commands.command()
async def ping(self, ctx):
Logger.debug("{} issued a ping command".format(ctx.author))
await ctx.send("{} pong".format(ctx.message.author.mention))
# schedule ping
@commands.command()
async def timer(self, ctx, time, *args):
timeArr = time.split(":")
timeArgs = len(timeArr)
totalSecs = 0
# invalid timecode
if (timeArgs > 4):
errEmbed = discord.Embed(
color=discord.Colour.magenta(),
title="Error: Invalid Timecode",
description="""
Timecode Format: `d*:h*:m*:s+`
`*` meaning 0 or more digits
`+` meaning 1 or more digits
"""
)
await ctx.send(embed=errEmbed)
Logger.debug("Invalid timecode received - no timers scheduled")
return
for i in range(0, timeArgs):
totalSecs += (int(timeArr[(timeArgs - 1) - i]) * timeUnitMlp[i])
Logger.debug("{} issued a set timer command. Timer will expire in {}s".format(ctx.author, totalSecs))
pingChannel = "Universal"
pingChannelID = ""
# parse args
text = ""
if (len(args) > 0):
if args[0].startswith("-"):
# has modifier
if args[0].lower()[1] == 'm':
Logger.debug("Timer ping will be issued in the mobile only ping channel")
pingChannel = "Mobile"
pingChannelID = ""
elif args[0].lower()[1] == 'd':
Logger.debug("Timer ping will be issued in the desktop only ping channel")
pingChannel = "Desktop"
pingChannelID = ""
else:
Logger.debug("Timer ping will be issued in the universal ping channel")
else:
Logger.debug("Timer ping will be issued in the universal ping channel")
text = " ".join(args[args[0].startswith("-") * 1:])
# send response message to schedule request
responseEmbed = discord.Embed(
color = discord.Colour.teal(),
title="Timer set",
description="""
Your timer as been set
Duration: `{}s`
Ping Channel: `{}`
""".format(totalSecs, pingChannel)
)
await ctx.send(embed=responseEmbed)
# wait
await asyncio.sleep(totalSecs)
# times up, send alarm
timesupEmbed = discord.Embed(
color = discord.Colour.teal(),
title="Times Up",
description="""
Your timer for `{}s` has ended
Event Desciption: {}
""".format(totalSecs, text)
)
await ctx.send(ctx.message.author.mention)
await ctx.send(embed=timesupEmbed)
# extension binding to bot
def setup(bot):
bot.add_cog(Scheduler(bot))
setBot(bot)
| [
"lochungtin@gmail.com"
] | lochungtin@gmail.com |
caef56b59f3154376c50d4336649aec1100d0102 | 2f6d017dedc68588b2615d65c1e8ca8bcdd90446 | /api/dynamic_tests_v2/cumsum.py | 64fc792e50a19fb1e753faa601710dbef87b366e | [] | no_license | hysunflower/benchmark | 70fc952a4eb1545208543627539d72e991cef78a | c14f99c15b4be9e11f56ea378ca15d9c3da23bab | refs/heads/master | 2022-06-30T07:04:14.986050 | 2022-06-15T02:43:04 | 2022-06-15T02:43:04 | 224,449,279 | 1 | 0 | null | 2019-11-27T14:29:29 | 2019-11-27T14:29:29 | null | UTF-8 | Python | false | false | 1,479 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class CumsumConfig(APIConfig):
def __init__(self):
super(CumsumConfig, self).__init__('cumsum')
self.feed_spec = {"range": [-1, 1]}
class PDCumsum(PaddleDynamicAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = paddle.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
class TorchCumsum(PytorchAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = torch.cumsum(x=x, axis=config.axis)
self.feed_list = [x]
self.fetch_list = [result]
if __name__ == '__main__':
test_main(
pd_dy_obj=PDCumsum(), torch_obj=TorchCumsum(), config=CumsumConfig())
| [
"noreply@github.com"
] | hysunflower.noreply@github.com |
c83eadf7b9b9967c1507e6da8273883512787e28 | 13ea58f72fa96e2455609fb452b5f3b98e94f846 | /sfepy/postprocess/plot_cmesh.py | 4319e5a25f131980a112ea817a562980f7b29e29 | [
"BSD-3-Clause"
] | permissive | vondrejc/sfepy | 4284ee47979b89d9e504b72b91689a9ce0c3a5ec | 8e427af699c4b2858eb096510057abb3ae7e28e8 | refs/heads/master | 2021-01-24T00:09:18.722674 | 2014-08-20T12:37:03 | 2014-08-20T14:25:56 | 12,810,199 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | """
Functions to visualize the CMesh geometry and topology.
"""
import matplotlib.pyplot as plt
from sfepy.postprocess.plot_dofs import _get_axes
def plot_wireframe(ax, cmesh, color='k', show=False):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
dim = cmesh.dim
edges = cmesh.get_conn(1, 0)
ax = _get_axes(ax, dim)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
if dim == 3:
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], color)
else:
ax.plot(cc[:, 0], cc[:, 1], color)
if show:
plt.show()
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10, show=False):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
if dim == 3:
ax.scatter(coors[:, 0], coors[:, 1], coors[:, 2], s=size, c=color)
else:
ax.scatter(coors[:, 0], coors[:, 1], s=size, c=color)
if show:
plt.show()
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ii,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ii,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = _get_axes(ax, dim)
eps = 0.1
oeps = 1.0 - eps
for ii in xrange(conn.num):
for ic, ie in enumerate(conn.indices[off[ii]:off[ii+1]]):
# Shift labels towards the cell centre.
cc = oeps * coors[ie] + eps * centres[ii]
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ic,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ic,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
5a97f46e6ef95ec6db4828165519cb303313b83f | 82f8564c030f27129836fa116ad7d195cb291719 | /npcrenderer.py | b5b28318630b38c74fb3eac5d0b38345c061cde4 | [] | no_license | timoruokonen/latrappe | 5247c69ce7b8c392e0c5870ea96d842ab5c74d98 | 1b752da838836171144bc142bf59becfdd798a93 | refs/heads/master | 2021-01-01T17:22:54.260586 | 2013-02-28T20:24:42 | 2013-02-28T20:24:42 | 7,369,929 | 1 | 1 | null | 2018-12-26T07:07:13 | 2012-12-29T20:01:21 | Python | UTF-8 | Python | false | false | 3,129 | py | import pygame
from TileUtils import *
from AnimatedSprite import AnimatedSprite
from latrappe import *
class NpcRenderer:
def __init__(self, surface, tile_width=32, tile_height=32):
self.surface = surface
# FIXME: Just test images for now
self.MAP_TILE_WIDTH = tile_width
self.MAP_TILE_HEIGHT = tile_height
self.font = pygame.font.Font(None, 17)
self.npc_dead_img = pygame.image.load("monk_dead.png")
self.npc_alive_img = pygame.image.load("monk.png")
self.npcimage = self.npc_alive_img
self.work_animation_images = TileUtils.load_sliced_sprites(32, 32, 'monk_working.png')
self.work_animation = AnimatedSprite(self.work_animation_images, 30)
self.sleep_animation_images = TileUtils.load_sliced_sprites(32, 32, 'monk_sleeping.png')
self.sleep_animation = AnimatedSprite(self.sleep_animation_images, 30)
self.brewing_animation_images = TileUtils.load_sliced_sprites(32, 32, 'monk_brewing.png')
self.brewing_animation = AnimatedSprite(self.brewing_animation_images, 8)
self.hunting_animation_images = TileUtils.load_sliced_sprites(32, 32, 'monk_hunting.png')
self.hunting_animation = AnimatedSprite(self.hunting_animation_images, 8)
self.npc_animation = self.work_animation
def draw_npc(self, npc):
# default image
npcimage = self.npc_alive_img
action = npc.schedule.get_current_action()
if npc.alive:
if (type(action) == ProduceAction) and (type(npc.occupation) == Hunter):
self.npc_animation = self.hunting_animation
self.surface.blit(self.hunting_animation.image, (npc.x, npc.y))
elif (type(action) == ProduceAction) and (type(npc.occupation) == Brewer):
self.npc_animation = self.brewing_animation
self.surface.blit(self.brewing_animation.image, (npc.x, npc.y))
elif type(action) == ProduceAction:
self.npc_animation = self.work_animation
self.surface.blit(self.work_animation.image, (npc.x, npc.y))
elif type(action) == Action and action.name == "Sleep":
self.npc_animation = self.sleep_animation
self.surface.blit(self.sleep_animation.image, (npc.x,npc.y))
else:
self.surface.blit(npcimage, (npc.x, npc.y))
else:
# if npc is dead, show dead npc image no matter what
self.npcimage = self.npc_dead_img
self.surface.blit(npcimage, (npc.x, npc.y))
# draw npc name
text = self.font.render(npc.name, True, (255,255, 255))
textRect = text.get_rect()
textRect.left = npc.x - (self.MAP_TILE_WIDTH / 2)
textRect.top = npc.y + (self.MAP_TILE_HEIGHT)
self.surface.blit(text, textRect)
def update(self, time):
self.work_animation.update(time)
self.brewing_animation.update(time)
self.sleep_animation.update(time)
self.hunting_animation.update(time)
| [
"jruokonen@ovi.com"
] | jruokonen@ovi.com |
83b6c3223a9ea60b7456b4e43317b1614cfe87e0 | 7ce05272d21c903abc85ebc74544009aacd80c82 | /Advance_Python/Socket_Programming/socket_programs/client.py | cead5a5a2d925f83e46f72d6bbd4a1b3d48a2ce3 | [] | no_license | sachinyadav3496/PythonInternBatch2018 | 8899a866f60a39b4c7eff4f5bc79ec2586833403 | 8e2610ad80c39ea747e8a6547ebe540e7b019a79 | refs/heads/master | 2021-06-26T09:18:58.178457 | 2020-10-03T09:49:32 | 2020-10-03T09:49:32 | 136,880,809 | 18 | 34 | null | 2020-10-03T09:49:33 | 2018-06-11T05:56:26 | Jupyter Notebook | UTF-8 | Python | false | false | 670 | py | import socket
server_socket = socket.socket()
host = socket.gethostbyname(socket.gethostname()) # Give server address if server is on differnt machine
port = 12345 #port no on which server is listing
server_socket.connect((host,port))
print("Got Connection from server at {}:{} ".format(host,port))
while True :
smsg = server_socket.recv(1024)
if smsg.decode().strip().lower() == 'bye' :
print("Connection is Terminated by server")
server_socket.close()
break
print("\t\t\tServer -> ",smsg.decode())
msg = input("client->")
server_socket.send(msg.encode())
if msg == 'bye' :
server_socket.close()
break
| [
"sachinyadav3496@gmail.com"
] | sachinyadav3496@gmail.com |
cc37339f965654b3bb80a3839e738438166c83ac | 4b2125441bebb98aba6c05d935ab28f6dfeab7d2 | /tableRowShowoff.py | cfd4dea1c56692c723094bc326918bb43c872cf3 | [
"Unlicense",
"MIT"
] | permissive | PiKaLeX/Kivy_Tutorial | 0cf23d7f5e3c47f4da3672e38130dd65457cb3c5 | c111908e3cf904f7c1901e75532db76c6be77295 | refs/heads/main | 2023-04-11T04:22:16.510567 | 2021-04-25T00:18:43 | 2021-04-25T00:18:43 | 356,843,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | from kivymd.app import MDApp
from kivy.lang import Builder
from kivymd.uix.datatables import MDDataTable
from kivy.metrics import dp
kv = """
Screen:
MDRectangleFlatButton:
text: 'Click me to get table contents'
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
on_press: app.table()
"""
class Main(MDApp):
def table(self):
self.tables = MDDataTable(
orientation="lr-tb",
size_hint=(0.9, 0.6),
use_pagination=True,
column_data=[
("No.", dp(30)),
("Column 1", dp(30)),
("Column 2", dp(30)),
("Column 3", dp(30)),
("Column 4", dp(30)),
("Column 5", dp(30)),
],
row_data=[
(f"{i + 1}", "1", "2", "3", "4", "5") for i in range(50)
],
)
self.tables.open()
def build(self):
return Builder.load_string(kv)
Main().run() | [
"alexandre.castonguay.garceau@gmail.com"
] | alexandre.castonguay.garceau@gmail.com |
a4ec925ffdf9afa9aff09c57049d796f503f32ea | 524c168b1b7ab4644a612f692645ae56487dea8c | /fwork-backend/tina/projects/migrations/0013_auto_20141210_1040.py | ac9ab59da8ca199cb8221bccf33e483a8493f55f | [] | no_license | phamhongnhung2501/Taiga.Tina | b4fa730a9f9601e23ab19c6d937e7daf0386b1e2 | 8bc44de3a364ccd0e49e767b098589898dcabc10 | refs/heads/master | 2022-12-14T09:55:11.205228 | 2019-07-08T07:42:38 | 2019-07-08T07:42:38 | 195,760,755 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-07-08T07:39:32 | Python | UTF-8 | Python | false | false | 995 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db import connection
from tina.projects.userstories.models import *
from tina.projects.tasks.models import *
from tina.projects.issues.models import *
from tina.projects.models import *
def _fix_tags_model(tags_model):
table_name = tags_model._meta.db_table
query = "select id from (select id, unnest(tags) tag from %s) x where tag LIKE '%%,%%'"%(table_name)
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
id = row[0]
instance = tags_model.objects.get(id=id)
instance.tags = [tag.replace(",", "") for tag in instance.tags]
instance.save()
def fix_tags(apps, schema_editor):
_fix_tags_model(Project)
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_auto_20141210_1009'),
]
operations = [
migrations.RunPython(fix_tags),
]
| [
"hongnhunglyzin97@gmail.com"
] | hongnhunglyzin97@gmail.com |
9fa52bc8569de172d5163c6758b3e6344b17a715 | 3fae6def3b1bb197cdeaa2d8be13b11440b80ff6 | /functions/expand_labels.py | 13c4a4cb677fbba69f7301bfd71795dbbe154358 | [] | no_license | DCMLab/standards | 85203ccf03b7565b48a855e5a142c94da9e958c4 | 17cc3645e674841d234a63a0a1f7ab9bc662d2bc | refs/heads/main | 2023-08-17T07:20:20.375477 | 2023-08-04T11:03:19 | 2023-08-04T11:03:19 | 253,857,609 | 4 | 0 | null | 2023-08-24T15:23:33 | 2020-04-07T16:56:36 | Python | UTF-8 | Python | false | false | 57,858 | py | ################################################################################
# Internal libraries
################################################################################
import argparse, os, re, sys, logging
from collections.abc import Iterable
from inspect import getfullargspec
from fractions import Fraction as frac
logging.getLogger().setLevel(logging.INFO)
################################################################################
# External libraries
################################################################################
import pandas as pd
import numpy as np
################################################################################
# Helpers
################################################################################
from .feature_matrices import name2tpc, transform
from split_labels import split_labels
################################################################################
# Constants
################################################################################
class SliceMaker(object):
""" This class serves for storing slice notation such as :3 as a variable or
passing it as function argument.
Examples
--------
SM = SliceMaker()
some_function( slice_this, SM[3:8] )
select_all = SM[:]
df.loc[select_all]
"""
def __getitem__(self, item):
return item
SM = SliceMaker()
################################################################################
# Functions for treating DCML harmony labels
################################################################################
def abs2rel_key(absolute, localkey, global_minor=False):
""" Expresses a Roman numeral as scale degree relative to a given localkey.
The result changes depending on whether Roman numeral and localkey are
interpreted within a global major or minor key.
Uses: split_sd()
``
Parameters
----------
absolute : :obj:`str`
Relative key expressed as Roman scale degree of the local key.
localkey : :obj:`str`
The local key in terms of which `absolute` will be expressed.
global_minor : bool, optional
Has to be set to True if `absolute` and `localkey` are scale degrees of a global minor key.
Examples
--------
In a minor context, the key of II would appear within the key of vii as #III.
>>> abs2rel_key('iv', 'VI', global_minor=False)
'bvi' # F minor expressed with respect to A major
>>> abs2rel_key('iv', 'vi', global_minor=False)
'vi' # F minor expressed with respect to A minor
>>> abs2rel_key('iv', 'VI', global_minor=True)
'vi' # F minor expressed with respect to Ab major
>>> abs2rel_key('iv', 'vi', global_minor=True)
'#vi' # F minor expressed with respect to Ab minor
>>> abs2rel_key('VI', 'IV', global_minor=False)
'III' # A major expressed with respect to F major
>>> abs2rel_key('VI', 'iv', global_minor=False)
'#III' # A major expressed with respect to F minor
>>> abs2rel_key('VI', 'IV', global_minor=True)
'bIII' # Ab major expressed with respect to F major
>>> abs2rel_key('VI', 'iv', global_minor=False)
'III' # Ab major expressed with respect to F minor
"""
if pd.isnull(absolute):
return np.nan
maj_rn = ['I','II','III','IV','V','VI','VII']
min_rn = ['i','ii','iii','iv','v','vi','vii']
shifts = np.array( [[0,0,0,0,0,0,0],
[0,0,1,0,0,0,1],
[0,1,1,0,0,1,1],
[0,0,0,-1,0,0,0],
[0,0,0,0,0,0,1],
[0,0,1,0,0,1,1],
[0,1,1,0,1,1,1]])
abs_acc, absolute = split_sd(absolute, count=True)
localkey_acc, localkey = split_sd(localkey, count=True)
shift = abs_acc - localkey_acc
steps = maj_rn if absolute.isupper() else min_rn
key_num = maj_rn.index(localkey.upper())
abs_num = (steps.index(absolute) - key_num) % 7
step = steps[abs_num]
if localkey.islower() and abs_num in [2,5,6]:
shift += 1
if global_minor:
key_num = (key_num - 2) % 7
shift -= shifts[key_num][abs_num]
acc = shift * '#' if shift > 0 else -shift * 'b'
return acc+step
def changes2list(changes, sort=True):
"""Splits a string of changes into a list of 4-tuples.
Example
-------
>>> changes2list('+#7b5')
[('+#7', '+', '#', '7'),
('b5', '', 'b', '5')]
"""
res = [t for t in re.findall("((\+)?(#+|b+)?(1\d|\d))",changes)]
return sorted(res, key=lambda x: int(x[3]), reverse=True) if sort else res
def changes2tpc(changes, numeral, minor=False, root_alterations=False):
""" Given a numeral and changes, computes the intervals that the changes represent.
Changes do not express absolute intervals but instead depend on the numeral and the mode.
Uses: split_sd(), changes2list()
Parameters
----------
changes : :obj:`str`
A string of changes following the DCML harmony standard.
numeral : :obj:`str`
Roman numeral. If it is preceded by accidentals, it depends on the parameter
`root_alterations` whether these are taken into account.
minor : :obj:`bool`, optional
Set to true if the `numeral` occurs in a minor context.
root_alterations : :obj:`bool`, optional
Set to True if accidentals of the root should change the result.
"""
root_alteration, num_degree = split_sd(numeral, count=True)
# build 2-octave diatonic scale on C major/minor
root = ['I','II','III','IV','V','VI','VII'].index(num_degree.upper())
tpcs = 2 * [i for i in (0,2,-3,-1,1,-4,-2)] if minor else 2 * [i for i in (0,2,4,-1,1,3,5)]
tpcs = tpcs[root:] + tpcs[:root] # starting the scale from chord root
root = tpcs[0]
if root_alterations:
root += 7 * root_alteration
tpcs[0] = root
alts = changes2list(changes, sort=False)
acc2tpc = lambda accidentals: 7 * (accidentals.count('#') - accidentals.count('b'))
return [(full, added, acc, chord_interval, (tpcs[int(chord_interval) - 1] + acc2tpc(acc) - root) if not chord_interval in ['3', '5'] else None) for full, added, acc, chord_interval in alts]
def chord2tpcs(chord, regex, **kwargs):
""" Split a chord label into its features and apply features2tpcs().
Uses: features2tpcs()
Parameters
----------
chord : :obj:`str`
Chord label that can be split into the features ['numeral', 'form', 'figbass', 'changes', 'relativeroot'].
regex : :obj:`re.Pattern`
Compiled regex with named groups for the five features.
**kwargs : arguments for features2tpcs
"""
chord_features = re.match(regex, chord)
assert chord_features is not None, f"{chord} does not match the regex."
chord_features = chord_features.groupdict()
numeral, form, figbass, changes, relativeroot = tuple(chord_features[f] for f in ('numeral', 'form', 'figbass', 'changes', 'relativeroot'))
return features2tpcs(numeral=numeral, form=form, figbass=figbass, changes=changes, relativeroot=relativeroot, **kwargs)
def compute_chord_tones(df, bass_only=False, expand=False, cols={}):
""" Compute the chord tones for DCML harmony labels. They are returned as lists
of tonal pitch classes in close position, starting with the bass note. The
tonal pitch classes represent intervals relative to the local tonic:
-2: Second below tonic
-1: fifth below tonic
0: tonic
1: fifth above tonic
2: second above tonic, etc.
The labels need to have undergone split_labels() and propagate_keys().
Pedal points are not taken into account.
Uses: features2tpcs(), transform()
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe containing DCML chord labels that have been split by split_labels()
and where the keys have been propagated using propagate_keys(add_bool=True).
bass_only : :obj:`bool`, optional
Pass True if you need only the bass note.
expand : :obj:`bool`, optional
Pass True if you need chord tones and added tones in separate columns.
cols : :obj:`dict`, optional
In case the column names for ['numeral', 'form', 'figbass', 'changes', 'relativeroot', 'localkey', 'globalkey'] deviate, pass a dict, such as
{'numeral': 'numeral_col_name',
'form': 'form_col_name',
'figbass': 'figbass_col_name',
'changes': 'changes_col_name',
'relativeroot': 'relativeroot_col_name',
'localkey': 'localkey_col_name',
'globalkey': 'globalkey_col_name'}
You may also deactivate columns by setting them to None, e.g. {'changes': None}
Returns
-------
:obj:`pandas.Series` or :obj:`pandas.DataFrame`
For every row of `df` one tuple with chord tones, expressed as tonal picth classes.
If `expand` is True, the function returns a DataFrame with four columns:
Two with tuples for chord tones and added tones, one with the chord root,
and one with the bass note.
"""
df = df.copy()
tmp_index = not df.index.is_unique
if tmp_index:
logging.debug("Index is not unique. Temporarily added a unique index level.")
df.set_index(pd.Series(range(len(df))), append=True, inplace=True)
features = ['numeral', 'form', 'figbass', 'changes', 'relativeroot', 'localkey', 'globalkey']
for col in features:
if col in df.columns and not col in cols:
cols[col] = col
local_minor, global_minor = f"{cols['localkey']}_is_minor", f"{cols['globalkey']}_is_minor"
if not local_minor in df.columns:
df[local_minor] = series_is_minor(df[cols['localkey']], is_name=False)
logging.debug(f"Boolean column '{local_minor} created.'")
if not global_minor in df.columns:
df[global_minor] = series_is_minor(df[cols['globalkey']], is_name=True)
logging.debug(f"Boolean column '{global_minor} created.'")
param_cols = {col: cols[col] for col in ['numeral', 'form', 'figbass', 'changes', 'relativeroot'] if cols[col] is not None}
param_cols['minor'] = local_minor
param_tuples = list(df[param_cols.values()].itertuples(index=False, name=None))
result_dict = {t: features2tpcs(**{a:b for a, b in zip(param_cols.keys(), t)}, bass_only=bass_only, merge_tones=not expand) for t in set(param_tuples)}
if expand:
res = pd.DataFrame([result_dict[t] for t in param_tuples], index=df.index)
res['bass_note'] = res.chord_tones.apply(lambda l: l if pd.isnull(l) else l[0])
else:
res = pd.Series([result_dict[t] for t in param_tuples], index=df.index)
return res.droplevel(-1) if tmp_index else res
def expand_labels(df, column, regex, groupby={'level': 0, 'group_keys': False}, cols={}, dropna=False, propagate=True, relative_to_global=False, chord_tones=False, absolute=False, all_in_c=False):
""" Split harmony labels complying with the DCML syntax into columns holding their various features
and allows for additional computations and transformations.
Uses: split_labels(), replace_special(), propagate_keys(), propagate_pedal(),
compute_chord_tones(), transform(), transpose(), labels2global_tonic(),
features2type()
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe where one column contains DCML chord labels.
column : :obj:`str`
Name of the column that holds the harmony labels.
regex : :obj:`re.Pattern`
Compiled regular expression used to split the labels. It needs to have named groups.
The group names are used as column names unless replaced by `cols`.
groupby : :obj:`dict`, optional
By default, `df` is treated as having several pieces that can be individually treated
by grouping the first index level: `df.groupby(level=0)`. The dictionary holds the parameters
that are being passed to df.groupby()`. {'group_keys': False} means that the piece identifies
will not be added as additional index level. If the pieces are identified by a column `col`,
pass {'by': 'col'}. If `df` has chords of only one piece, pass None.
cols : :obj:`dict`, optional
Dictionary to map the regex's group names to deviating column names of your choice.
dropna : :obj:`bool`, optional
Pass True if you want to drop rows where `column` is NaN/<NA>
propagate: :obj:`bool`, optional
By default, information about global and local keys and about pedal points is spread throughout
the DataFrame. Pass False if you only want to split the labels into their features. This ignores
all following parameters because their expansions depend on information about keys.
relative_to_global : :obj:`bool`, optional
Pass True if you want all labels expressed with respect to the global key.
This levels and eliminates the features `localkey` and `relativeroot`.
chord_tones : :obj:`bool`, optional
Pass True if you want to add four columns that contain information about each label's
chord, added, root, and bass tones. The pitches are expressed as intervals
relative to the respective chord's local key or, if `relative_to_global=True`,
to the globalkey. The intervals are represented as integers that represent
stacks of fifths over the tonic, such that 0 = tonic, 1 = dominant, -1 = subdominant,
2 = supertonic etc.
absolute : :obj:`bool`, optional
Pass True if you want to transpose the relative `chord_tones` to the global
key, which makes them absolute so they can be expressed as actual note names.
This implies prior conversion of the chord_tones (but not of the labels) to
the global tonic.
all_in_c : :obj:`bool`, optional
Pass True to transpose `chord_tones` to C major/minor. This performs the same
transposition of chord tones as `relative_to_global` but without transposing
the labels, too. This option clashes with `absolute=True`.
"""
assert sum((absolute, all_in_c)) < 2, "Chord tones can be either 'absolute' or 'all_in_c', not both."
df = df.copy()
tmp_index = not df.index.is_unique
if tmp_index:
logging.debug("Index is not unique. Temporarily added a unique index level.")
df.set_index(pd.Series(range(len(df))), append=True, inplace=True)
ix = df.index
df = df.sort_index()
for col in ['numeral', 'form', 'figbass', 'localkey', 'globalkey']:
if not col in cols:
cols[col] = col
global_minor = f"{cols['globalkey']}_is_minor"
not_nan = df[column].dropna()
immediate_repetitions = not_nan == not_nan.shift()
k = immediate_repetitions.sum()
if k > 0:
if k / len(not_nan.index) > 0.1:
raise ValueError("DataFrame has many direct repetitions of labels. This function is written for lists of labels only which should have no immediate repetitions.")
else:
logging.debug(f"Immediate repetition of labels:\n{not_nan[immediate_repetitions]}")
df = split_labels(df, column, regex, cols=cols, dropna=dropna)
df = replace_special(df, regex=regex, merge=True, cols=cols)
df['chord_type'] = transform(df, features2type, [cols[col] for col in ['numeral', 'form', 'figbass']])
if propagate:
key_cols = {col: cols[col] for col in ['localkey', 'globalkey']}
if groupby is not None:
try:
df = df.groupby(**groupby).apply(propagate_keys, add_bool=True, **key_cols)
except:
logging.error("Are you expanding labels of a single piece? In that case, call expand_labels() with 'groupby=None'")
raise
else:
try:
df = propagate_keys(df, add_bool=True, **key_cols)
except:
logging.error("Are you expanding labels of several pieces in the same DataFrame? In that case, define 'groupby()' parameters.")
raise
df = propagate_pedal(df, cols=cols)
if chord_tones:
ct = compute_chord_tones(df, expand=True, cols=cols)
if relative_to_global or absolute or all_in_c:
transpose_by = transform(df, rn2tpc, [cols['localkey'], global_minor])
if absolute:
transpose_by += transform(df, name2tpc, [cols['globalkey']])
ct = pd.DataFrame([transpose(tpcs, fifths) for tpcs, fifths in zip(ct.itertuples(index=False, name=None), transpose_by.values)], index=ct.index, columns=ct.columns)
df = pd.concat([df, ct], axis=1)
if relative_to_global:
labels2global_tonic(df, inplace=True, cols=cols)
return df.reindex(ix).droplevel(-1) if tmp_index else df.reindex(ix)
def features2tpcs(numeral, form=None, figbass=None, changes=None, relativeroot=None, key='C', minor=None, merge_tones=True, bass_only=False):
""" Given the features of a chord label, this function returns the chord tones
in the order of the inversion, starting from the bass note. The tones are
expressed as tonal pitch classes, where -1=F, 0=C, 1=G etc.
Uses: str_is_minor(), name2tpc(), rn2tpc(), changes2list(), sort_tpcs()
Parameters
----------
numeral: :obj:`str`
Roman numeral of the chord's root
form: {None, 'M', 'o', '+' '%'}, optional
Indicates the chord type if not a major or minor triad (for which `form`is None).
'%' and 'M' can only occur as tetrads, not as triads.
figbass: {None, '6', '64', '7', '65', '43', '2'}, optional
Indicates chord's inversion. Pass None for triad root position.
changes: :obj:`str`, optional
Added steps such as '+6' or suspensions such as '4' or any combination such as (9+64).
Numbers need to be in descending order.
relativeroot: :obj:`str`, optional
Pass a Roman scale degree if `numeral` is to be applied to a different scale
degree of the local key, as in 'V65/V'
key : :obj:`str` or :obj:`int`, optional
The local key expressed as the root's note name or a tonal pitch class.
If it is a name and `minor` is `None`, uppercase means major and lowercase minor.
If it is a tonal pitch class, `minor` needs to be specified.
minor : :obj:`bool`, optional
Pass True for minor and False for major. Can be omitted if `key` is a note name.
This affects calculation of chords related to III, VI and VII.
merge_tones : :obj:`bool`, optional
Pass False if you want the function to return two tuples, one with (potentially suspended)
chord tones and one with added notes.
bass_only : :obj:`bool`, optional
Return only the bass note instead of all chord tones.
"""
if pd.isnull(numeral):
if bass_only:
return np.nan
elif merge_tones:
return np.nan
else:
return {
'chord_tones': np.nan,
'added_tones': np.nan,
'root': np.nan,
}
form, figbass, changes, relativeroot = tuple('' if pd.isnull(val) else val for val in (form, figbass, changes, relativeroot))
label = f"{numeral}{form}{figbass}{'(' + changes + ')' if changes != '' else ''}{'/' + relativeroot if relativeroot != '' else ''}"
if minor is None:
try:
minor = str_is_minor(key, is_name=True)
logging.debug(f"Mode inferred from {key}.")
except:
raise ValueError(f"If parameter 'minor' is not specified, 'key' needs to be a string, not {key}")
key = name2tpc(key)
if form in ['%', 'M']:
assert figbass in ['7', '65', '43', '2'], f"{label}: {form} requires figbass since it specifies a chord's seventh."
if relativeroot != '':
resolved = resolve_relative_keys(relativeroot, minor)
rel_minor = str_is_minor(resolved, is_name=False)
transp = rn2tpc(resolved, minor)
logging.debug(f"Chord applied to {relativeroot}. Therefore transposing it by {transp} fifths.")
return features2tpcs(numeral=numeral, form=form, figbass=figbass, relativeroot=None, changes=changes, key=key + transp, minor=rel_minor, merge_tones=merge_tones, bass_only=bass_only)
if numeral.lower() == '#vii' and not minor:
logging.warning(f"{label} in major context is most probably an annotation error.")
root_alteration, num_degree = split_sd(numeral, count=True)
# build 2-octave diatonic scale on C major/minor
root = ['I','II','III','IV','V','VI','VII'].index(num_degree.upper())
tpcs = 2 * [i+key for i in (0,2,-3,-1,1,-4,-2)] if minor else 2 * [i+key for i in (0,2,4,-1,1,3,5)]
tpcs = tpcs[root:] + tpcs[:root] # starting the scale from chord root
root = tpcs[0] + 7 * root_alteration
tpcs[0] = root # octave stays diatonic, is not altered
logging.debug(f"The {'minor' if minor else 'major'} scale starting from the root: {tpcs}")
def set_iv(chord_interval, interval_size):
""" Fix the interval of a given chord interval (both viewed from the bass note).
"""
nonlocal tpcs, root
iv = root + interval_size
i = chord_interval - 1
tpcs[i] = iv
tpcs[i+7] = iv
if form == 'o':
set_iv(3, -3)
set_iv(5, -6)
if figbass in ['7', '65', '43', '2']:
set_iv(7, -9)
elif form == '%':
set_iv(3, -1)
set_iv(5, 6)
set_iv(7, -2)
elif form == '+':
set_iv(3, 4)
set_iv(5, 8)
else: # triad with or without major or minor seven
set_iv(5, 1)
if num_degree.isupper():
set_iv(3, 4)
else:
set_iv(3, -3)
if form == 'M':
set_iv(7, 5)
elif figbass in ['7', '65', '43', '2']:
set_iv(7, -2)
# apply changes
alts = changes2list(changes, sort=False)
added_notes = []
for full, added, acc, chord_interval in alts:
added = added == '+'
chord_interval = int(chord_interval) - 1
if chord_interval == 0 or chord_interval > 13:
logging.warning(f"Alteration of scale degree {chord_interval+1} is meaningless and ignored.")
continue
next_octave = chord_interval > 7
shift = 7 * (acc.count('#') - acc.count('b'))
new_val = tpcs[chord_interval] + shift
if added:
added_notes.append(new_val)
elif chord_interval in [1, 3, 5, 8, 10, 12]: # these are changes to scale degree 2, 4, 6 that replace the lower neighbour unless they have a #
if '#' in acc:
tpcs[chord_interval + 1] = new_val
if chord_interval == 5 and not figbass in ['7', '65', '43', '2']: # leading tone to 7 but not in seventh chord
added_notes.append(new_val)
else:
tpcs[chord_interval - 1] = new_val
else: # chord tone alterations
if chord_interval == 6 and figbass != '7': # 7th are a special case:
if figbass == '': # in root position triads they are added
added_notes.append(new_val)
elif figbass in ['6', '64']: # in inverted triads they replace the root
tpcs[0] = new_val
elif '#' in acc: # in a seventh chord, they might retardate the 8
tpcs[7] = new_val
added_notes.append(new_val)
else: # otherwise they are unclear
logging.warning(f"In seventh chords, such as {label}, it is not clear whether the {full} alters the 7 or replaces the 8 and should not be used.")
elif tpcs[chord_interval] == new_val:
logging.warning(f"The change {full} has no effect in {numeral}{form}{figbass}")
else:
tpcs[chord_interval] = new_val
if next_octave and not added:
added_notes.append(new_val)
if figbass in ['', '6', '64']:
chord_tones = [tpcs[i] for i in [0,2,4]]
elif figbass in ['7', '65', '43', '2']:
chord_tones = [tpcs[i] for i in [0,2,4,6]]
else:
raise ValueError(f"{figbass} is not a valid chord inversion.")
figbass2bass = {
'': 0,
'7': 0,
'6': 1,
'65': 1,
'64': 2,
'43': 2,
'2': 3
}
bass = figbass2bass[figbass]
bass_tpc = chord_tones[bass]
if bass_only:
return bass_tpc
elif merge_tones:
return tuple(sort_tpcs(chord_tones + added_notes, start=bass_tpc))
else:
return {
'chord_tones': tuple(chord_tones[bass:] + chord_tones[:bass]),
'added_tones': tuple(added_notes),
'root': root,
}
def features2type(numeral, form=None, figbass=None):
""" Turns a combination of the three chord features into a chord type.
Returns
-------
'M': Major triad
'm': Minor triad
'o': Diminished triad
'+': Augmented triad
'mm7': Minor seventh chord
'Mm7': Dominant seventh chord
'MM7': Major seventh chord
'mM7': Minor major seventh chord
'o7': Diminished seventh chord
'%7': Half-diminished seventh chord
'+7': Augmented (minor) seventh chord
'+M7': Augmented major seventh chord
"""
if pd.isnull(numeral):
return numeral
form, figbass = tuple('' if pd.isnull(val) else val for val in (form, figbass))
#triads
if figbass in ['', '6', '64']:
if form in ['o', '+']:
return form
if form in ['%', 'M', '+M']:
if figbass != '':
logging.error(f"{form} is a seventh chord and cannot have figbass '{figbass}'")
return None
# else: go down, interpret as seventh chord
else:
return 'm' if numeral.islower() else 'M'
# seventh chords
if form in ['o', '%', '+', '+M']:
return f"{form}7"
triad = 'm' if numeral.islower() else 'M'
seventh = 'M' if form == 'M' else 'm'
return f"{triad}{seventh}7"
def fifths2iv(fifths):
""" Return interval name of a stack of fifths such that
0 = 'P1', -1 = 'P4', -2 = 'm7', 4 = 'M3' etc.
"""
if pd.isnull(fifths):
return fifths
if isinstance(fifths, Iterable):
return map2elements(fifths, fifths2iv)
interval_qualities = {0: ['P', 'P', 'P', 'M', 'M', 'M', 'M'],
-1:['D', 'D', 'D', 'm', 'm', 'm', 'm']}
fifths += 1 # making 0 = fourth, 1 = unison, 2 = fifth etc.
pos = fifths % 7
int_num = [4, 1, 5, 2, 6, 3, 7][pos]
qual_region = fifths // 7
if qual_region in interval_qualities:
int_qual = interval_qualities[qual_region][pos]
elif qual_region < 0:
int_qual = (abs(qual_region) - 1) * 'D'
else:
int_qual = qual_region * 'A'
return int_qual + str(int_num)
def fifths2acc(fifths):
""" Returns accidentals for a stack of fifths that can be combined with a
basic representation of the seven steps."""
return abs(fifths // 7) * 'b' if fifths < 0 else fifths // 7 * '#'
def fifths2name(fifths):
""" Return note name of a stack of fifths such that
0 = C, -1 = F, -2 = Bb, 1 = G etc.
"""
if pd.isnull(fifths):
return fifths
if isinstance(fifths, Iterable):
return map2elements(fifths, fifths2name)
note_names = ['F', 'C', 'G', 'D', 'A', 'E', 'B']
return fifths2str(fifths, note_names)
def fifths2pc(fifths):
"""Turn a stack of fifths into a chromatic pitch class"""
if pd.isnull(fifths):
return fifths
if isinstance(fifths, Iterable):
return map2elements(fifths, fifths2pc)
return 7 * fifths % 12
def is_minor_mode(fifths, minor=False):
""" Returns True if the TPC `fifths` naturally has a minor third in the scale.
"""
thirds = [-4, -3, -2, -1, 0, 1, 2] if minor else [3, 4, 5, -1, 0, 1, 2]
third = thirds[(fifths + 1) % 7] - fifths
return third == -3
def fifths2rn(fifths, minor=False, auto_key=False):
"""Return Roman numeral of a stack of fifths such that
0 = I, -1 = IV, 1 = V, -2 = bVII in major, VII in minor, etc.
Parameters
----------
auto_key : :obj:`bool`, optional
By default, the returned Roman numerals are uppercase. Pass True to pass upper-
or lowercase according to the position in the scale.
"""
if pd.isnull(fifths):
return fifths
if isinstance(fifths, Iterable):
return map2elements(fifths, fifths2rn, minor=minor)
rn = ['VI', 'III', 'VII', 'IV', 'I', 'V', 'II'] if minor else ['IV', 'I', 'V', 'II', 'VI', 'III', 'VII']
sel = fifths + 3 if minor else fifths
res = fifths2str(sel, rn)
if auto_key and is_minor_mode(fifths, minor):
return res.lower()
return res
def fifths2sd(fifths, minor=False):
"""Return scale degree of a stack of fifths such that
0 = '1', -1 = '4', -2 = 'b7' in major, '7' in minor etc.
"""
if pd.isnull(fifths):
return fifths
if isinstance(fifths, Iterable):
return map2elements(fifths, fifths2sd, minor=minor)
sd = ['6', '3', '7', '4', '1', '5', '2'] if minor else ['4', '1', '5', '2', '6', '3', '7']
if minor:
fifths += 3
return fifths2str(fifths, sd)
def fifths2str(fifths, steps):
""" Boiler plate used by fifths2-functions.
"""
fifths += 1
acc = fifths2acc(fifths)
return acc + steps[fifths % 7]
def labels2global_tonic(df, cols={}, inplace=False):
""" Transposes all numerals to their position in the global major or minor scale.
This eliminates localkeys and relativeroots. The resulting chords are defined
by [`numeral`, `figbass`, `changes`, `globalkey_is_minor`] (and `pedal`).
Uses: transform(), rel2abs_key(), transpose_changes(), series_is_minor(), resolve_relative_keys() -> str_is_minor()
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe containing DCML chord labels that have been split by split_labels()
and where the keys have been propagated using propagate_keys(add_bool=True).
cols : :obj:`dict`, optional
In case the column names for ['numeral', 'form', 'figbass', 'changes', 'relativeroot', 'localkey', 'globalkey'] deviate, pass a dict, such as
{'chord': 'chord_col_name'
'pedal': 'pedal_col_name',
'numeral': 'numeral_col_name',
'form': 'form_col_name',
'figbass': 'figbass_col_name',
'changes': 'changes_col_name',
'relativeroot': 'relativeroot_col_name',
'localkey': 'localkey_col_name',
'globalkey': 'globalkey_col_name'}}
inplace : :obj:`bool`, optional
Pass True if you want to mutate the input.
Returns
-------
:obj:`pandas.DataFrame`
If `inplace=False`, the relevant features of the transposed chords are returned.
Otherwise, the original DataFrame is mutated.
"""
if not inplace:
df = df.copy()
tmp_index = not df.index.is_unique
if tmp_index:
logging.debug("Index is not unique. Temporarily added a unique index level.")
df.set_index(pd.Series(range(len(df))), append=True, inplace=True)
features = ['chord', 'pedal', 'numeral', 'form', 'figbass', 'changes', 'relativeroot', 'localkey', 'globalkey']
for col in features:
if col in df.columns and not col in cols:
cols[col] = col
local_minor, global_minor = f"{cols['localkey']}_is_minor", f"{cols['globalkey']}_is_minor"
if not local_minor in df.columns:
df[local_minor] = series_is_minor(df[cols['localkey']], is_name=False)
logging.debug(f"Boolean column '{local_minor} created.'")
if not global_minor in df.columns:
df[global_minor] = series_is_minor(df[cols['globalkey']], is_name=True)
logging.debug(f"Boolean column '{global_minor} created.'")
# Express pedals in relation to the global tonic
param_cols = [cols[col] for col in ['pedal', 'localkey']] + [global_minor]
df['pedal'] = transform(df, rel2abs_key, param_cols)
# Make relativeroots to local keys
param_cols = [cols[col] for col in ['relativeroot', 'localkey']] + [local_minor, global_minor]
relativeroots = df.loc[df[cols['relativeroot']].notna(), param_cols]
rr_tuples = list(relativeroots.itertuples(index=False, name=None))
transposed_rr = {(rr, localkey, local_minor, global_minor): rel2abs_key(resolve_relative_keys(rr, local_minor), localkey, global_minor) for (rr, localkey, local_minor, global_minor) in set(rr_tuples)}
df.loc[relativeroots.index, cols['localkey']] = pd.Series((transposed_rr[t] for t in rr_tuples), index=relativeroots.index)
df.loc[relativeroots.index, local_minor] = series_is_minor(df.loc[relativeroots.index, cols['localkey']])
# Express numerals in relation to the global tonic
param_cols = [cols[col] for col in ['numeral', 'localkey']] + [global_minor]
df['abs_numeral'] = transform(df, rel2abs_key, param_cols)
# Transpose changes to be valid with the new numeral
param_cols = [cols[col] for col in ['changes', 'numeral']] + ['abs_numeral', local_minor, global_minor]
df[cols['changes']] = transform(df, transpose_changes, param_cols)
# Combine the new chord features
df[cols['chord']] = df.abs_numeral + df.form.fillna('') + df.figbass.fillna('') + ('(' + df.changes + ')').fillna('') # + ('/' + df.relativeroot).fillna('')
if inplace:
df[cols['numeral']] = df.abs_numeral
drop_cols = [cols[col] for col in ['localkey', 'relativeroot']] + ['abs_numeral', local_minor]
df.drop(columns=drop_cols, inplace=True)
else:
res_cols = ['abs_numeral'] + [cols[col] for col in ['form', 'figbass', 'changes', 'globalkey']] + [global_minor]
res = df[res_cols].rename(columns={'abs_numeral': cols['numeral']})
return res.droplevel(-1) if tmp_index else res
def map2elements(e, f, *args, **kwargs):
""" If `e` is an iterable, `f` is applied to all elements.
"""
if isinstance(e, Iterable):
return e.__class__(map2elements(x, f, *args, **kwargs) for x in e)
return f(e, *args, **kwargs)
def merge_changes(left, right, *args):
""" Merge to `changes` into one, e.g. `b3` and `+#7` to `+#7b3`.
Uses: changes2list()
"""
all_changes = [changes2list(changes, sort=False) for changes in (left, right, *args)]
res = sum(all_changes, [])
res = sorted(res, key=lambda x: int(x[3]), reverse=True)
return ''.join(e[0] for e in res)
def propagate_keys(df, globalkey='globalkey', localkey='localkey', add_bool=True):
""" Propagate information about global keys and local keys throughout the dataframe.
Pass split harmonies for one piece at a time. For concatenated pieces, use apply_to_pieces().
Uses: series_is_minor()
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe containing DCML chord labels that have been split by split_labels().
globalkey, localkey : :obj:`str`, optional
In case you renamed the columns, pass column names.
add_bool : :obj:`bool`, optional
Pass True if you want to add two boolean columns which are true if the respective key is
a minor key.
"""
df = df.copy()
nunique = df[globalkey].nunique()
assert nunique > 0, "No global key specified. It might be that this function is being applied in a wrong groupby and gets rows instead of entire frames."
if nunique > 1:
raise NotImplementedError("Several global keys not accepted at the moment.")
logging.debug('Extending global key to all harmonies')
global_key = df[globalkey].iloc[0]
if pd.isnull(global_key):
global_key = df[globalkey].dropna().iloc[0]
logging.warning(f"Global key is not specified in the first label. Using '{global_key}' from index {df[df[globalkey] == global_key].index[0]}")
df.loc[:,globalkey] = global_key
global_minor = series_is_minor(df[globalkey], is_name=True)
logging.debug('Extending local keys to all harmonies')
local_key = df[localkey].iloc[0]
if pd.isnull(df[localkey].iloc[0]):
one = 'i' if global_minor.iloc[0] else 'I'
df.iloc[0, df.columns.get_loc(localkey)] = one
df[localkey].fillna(method='ffill', inplace=True)
if add_bool:
local_minor = series_is_minor(df[localkey], is_name=False)
gm = f"{globalkey}_is_minor"
lm = f"{localkey}_is_minor"
df[gm] = global_minor
df[lm] = local_minor
return df
def propagate_pedal(df, relative=True, drop_pedalend=True, cols={}):
""" Propagate the pedal note for all chords within square brackets.
By default, the note is expressed in relation to each label's localkey.
Uses: rel2abs_key(), abs2rel_key
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe containing DCML chord labels that have been split by split_labels()
and where the keys have been propagated using propagate_keys().
relative : :obj:`bool`, optional
Pass False if you want the pedal note to stay the same even if the localkey changes.
drop_pedalend : :obj:`bool`, optional
Pass False if you don't want the column with the ending brackets to be dropped.
cols : :obj:`dict`, optional
In case the column names for ['pedal','pedalend', 'globalkey', 'localkey'] deviate, pass a dict, such as
{'pedal': 'pedal_col_name',
'pedalend': 'pedalend_col_name',
'globalkey': 'globalkey_col_name',
'localkey': 'localkey_col_name'}
"""
df = df.copy()
tmp_index = not df.index.is_unique
if tmp_index:
logging.debug("Index is not unique. Temporarily added a unique index level.")
df.set_index(pd.Series(range(len(df))), append=True, inplace=True)
ix = df.index
df = df.sort_index()
features = ['pedal','pedalend', 'globalkey', 'localkey']
for col in features:
if not col in cols:
cols[col] = col
pedal, pedalend = cols['pedal'], cols['pedalend']
logging.debug('Extending pedal notes to concerned harmonies')
beginnings = df.loc[df[pedal].notna(), pedal]
endings = df.loc[df[pedalend].notna(), pedalend]
n_b, n_e = len(beginnings), len(endings)
assert n_b == n_e, f"{n_b} organ points started, {n_e} ended! Beginnings:\n{beginnings}\nEndings:\n{endings}"
if relative:
assert df[cols['localkey']].notna().all(), "Local keys must first be propagated using propagate_keys(), no NaNs allowed."
for (fro, ped), to in zip(beginnings.items(), endings.index):
try:
section = df.loc[fro:to].index
except:
logging.error(f"Slicing of the DataFrame did not work from {fro} to {to}. Index looks like this:\n{df.head().index}")
localkeys = df.loc[section, cols['localkey']]
if localkeys.nunique() > 1:
first_localkey = localkeys.iloc[0]
globalkeys = df.loc[section, cols['globalkey']].unique()
assert len(globalkeys) == 1, "Several globalkeys appearing within the same organ point."
global_minor = globalkeys[0].islower()
key2pedal = {key: ped if key == first_localkey else abs2rel_key(rel2abs_key(ped, first_localkey, global_minor), key, global_minor) for key in localkeys.unique()}
logging.debug(f"Pedal note {ped} has been transposed relative to other local keys within a global {'minor' if global_minor else 'major'} context: {key2pedal}")
pedals = pd.Series([key2pedal[key] for key in localkeys], index=section)
else:
pedals = pd.Series(ped, index=section)
df.loc[section, pedal] = pedals
if drop_pedalend:
df = df.drop(columns=pedalend)
return df.reindex(ix).droplevel(-1) if tmp_index else df
def rel2abs_key(rel, localkey, global_minor=False):
"""Expresses a Roman numeral that is expressed relative to a localkey
as scale degree of the global key. For local keys {III, iii, VI, vi, VII, vii}
the result changes depending on whether the global key is major or minor.
Uses: split_sd()
Parameters
----------
rel : :obj:`str`
Relative key or chord expressed as Roman scale degree of the local key.
localkey : :obj:`str`
The local key to which `rel` is relative.
global_minor : bool, optional
Has to be set to True if `localkey` is a scale degree of a global minor key.
Examples
--------
If the label viio6/VI appears in the context of the local key VI or vi,
viio6 the absolute key to which viio6 applies depends on the global key.
The comments express the examples in relation to global C major or C minor.
>>> rel2abs_key('vi', 'VI', global_minor=False)
'#iv' # vi of A major = F# minor
>>> rel2abs_key('vi', 'vi', global_minor=False)
'iv' # vi of A minor = F minor
>>> rel2abs_key('vi', 'VI', global_minor=True)
'iv' # vi of Ab major = F minor
>>> rel2abs_key('vi', 'vi', global_minor=True)
'biv' # vi of Ab minor = Fb minor
The same examples hold if you're expressing in terms of the global key
the root of a VI-chord within the local keys VI or vi.
"""
if pd.isnull(rel):
return np.nan
maj_rn = ['I','II','III','IV','V','VI','VII']
min_rn = ['i','ii','iii','iv','v','vi','vii']
shifts = np.array( [[0,0,0,0,0,0,0],
[0,0,1,0,0,0,1],
[0,1,1,0,0,1,1],
[0,0,0,-1,0,0,0],
[0,0,0,0,0,0,1],
[0,0,1,0,0,1,1],
[0,1,1,0,1,1,1]])
rel_acc, rel = split_sd(rel, count=True)
localkey_acc, localkey = split_sd(localkey, count=True)
shift = rel_acc + localkey_acc
steps = maj_rn if rel.isupper() else min_rn
rel_num = steps.index(rel)
key_num = maj_rn.index(localkey.upper())
step = steps[(rel_num + key_num) % 7]
if localkey.islower() and rel_num in [2,5,6]:
shift -= 1
if global_minor:
key_num = (key_num - 2) % 7
shift += shifts[rel_num][key_num]
acc = shift * '#' if shift > 0 else -shift * 'b'
return acc+step
def replace_special(df, regex, merge=False, inplace=False, cols={}, special_map={}):
""" Move special symbols in the `numeral` column to a separate column and replace them by the explicit chords they stand for.
In particular, this function replaces the symbols `It`, `Ger`, and `Fr`.
Uses: merge_changes()
Parameters
----------
df : :obj:`pandas.DataFrame`
Dataframe containing DCML chord labels that have been split by split_labels().
regex : :obj:`re.Pattern`
Compiled regular expression used to split the labels replacing the special symbols.It needs to have named groups.
The group names are used as column names unless replaced by `cols`.
merge : :obj:`bool`, optional
False: By default, existing values, except `figbass`, are overwritten.
True: Merge existing with new values (for `changes` and `relativeroot`).
inplace : :obj:`bool`, optional
True: Change `df` inplace (default).
False: Return copy.
cols : :obj:`dict`, optional
The special symbols appear in the column `numeral` and are moved to the column `special`.
In case the column names for ['numeral','form', 'figbass', 'changes', 'relativeroot', 'special'] deviate, pass a dict, such as
{'numeral': 'numeral_col_name',
'form': 'form_col_name
'figbass': 'figbass_col_name',
'changes': 'changes_col_name',
'relativeroot': 'relativeroot_col_name',
'special': 'special_col_name'}
special_map : :obj:`dict`, optional
In case you want to add or alter special symbols to be replaced, pass a replacement map, e.g.
{'N': 'bII6'}. The column 'figbass' is only altered if it's None to allow for inversions of special chords.
"""
if not inplace:
df = df.copy()
tmp_index = not df.index.is_unique
if tmp_index:
logging.debug("Index is not unique. Temporarily added a unique index level.")
df.set_index(pd.Series(range(len(df))), append=True, inplace=True)
spec = {
'It': 'viio6(b3)/V',
'Ger': 'viio65(b3)/V',
'Fr': 'V7(b5)/V',
}
spec.update(special_map)
features = ['numeral','form', 'figbass', 'changes', 'relativeroot']
for col in features + ['special']:
if not col in cols:
cols[col] = col
feature_cols = list(cols.values())
missing = [cols[f] for f in features if not cols[f] in df.columns]
assert len(missing) == 0, f"These columns are missing from the DataFrame: {missing}. Either use split_labels() first or give correct `cols` parameter."
select_all_special = df[df[cols['numeral']].isin(spec.keys())].index
logging.debug(f"Moving special symbols from {cols['numeral']} to {cols['special']}...")
if not cols['special'] in df.columns:
df.insert(df.columns.get_loc(cols['numeral'])+1, cols['special'], np.nan)
df.loc[select_all_special, cols['special']] = df.loc[select_all_special, cols['numeral']]
def repl_spec(frame, instead, special):
"""Check if the selected parts are empty and replace."""
new_vals = re.match(regex, instead)
if new_vals is None:
logging.warning(f"{instead} is not a valid label. Skipped.")
return frame
else:
new_vals = new_vals.groupdict()
for f in features:
if new_vals[f] is not None:
replace_this = SM[:] # by default, replace entire column
if f == 'figbass': # only empty figbass is replaced, with the exception of `Ger6` and `Fr6`
if special in ['Fr', 'Ger']: # For these symbols, a wrong `figbass` == 6 is accepted and replaced
replace_this = (frame[cols['figbass']] == '6') | frame[cols['figbass']].isna()
else:
replace_this = frame[cols['figbass']].isna()
elif f != 'numeral': # numerals always replaced completely
not_empty = frame[cols[f]].notna()
if not_empty.any():
if f in ['changes', 'relativeroot'] and merge:
if f == 'changes':
frame.loc[not_empty, cols[f]] = frame.loc[not_empty, cols[f]].apply(merge_changes, args=(new_vals[f],))
elif f == 'relativeroot':
frame.loc[not_empty, cols[f]] = frame.loc[not_empty, cols[f]].apply(lambda x: f"{new_vals[f]}/{x}")
logging.debug(f"While replacing {special}, the existing '{f}'-values have been merged with '{new_vals[f]}', resulting in :\n{frame.loc[not_empty, cols[f]]}")
replace_this = ~not_empty
else:
logging.warning(f"While replacing {special}, the following existing '{f}'-values have been overwritten with {new_vals[f]}:\n{frame.loc[not_empty, cols[f]]}")
frame.loc[replace_this, cols[f]] = new_vals[f]
return frame
for special, instead in spec.items():
select_special = df[cols['special']] == special
df.loc[select_special, feature_cols] = repl_spec(df.loc[select_special, feature_cols].copy(), instead, special)
if df[cols['special']].isna().all():
df.drop(columns=cols['special'], inplace=True)
return df.droplevel(-1) if tmp_index else df
def resolve_relative_keys(relativeroot, minor=False):
""" Resolve nested relative keys, e.g. 'V/V/V' => 'VI'.
Uses: rel2abs_key(), str_is_minor()
relativeroot : :obj:`str`
One or several relative keys, e.g. iv/v/VI (fourth scale degree of the fifth scale degree of the sixth scale degree)
minor : :obj:`bool`, optional
Pass True if the last of the relative keys is to be interpreted within a minor context.
"""
spl = relativeroot.split('/')
if len(spl) < 2:
return relativeroot
if len(spl) == 2:
applied, to = spl
return rel2abs_key(applied, to, minor)
previous, last = '/'.join(spl[:-1]), spl[-1]
return rel2abs_key(resolve_relative_keys(previous, str_is_minor(last, is_name=False)), last, minor)
def rn2tpc(rn, global_minor=False):
""" Turn a Roman numeral into a TPC interval (e.g. for transposition purposes).
Uses: split_sd()
"""
rn_tpcs_maj = {'I': 0, 'II': 2, 'III': 4, 'IV': -1, 'V': 1, 'VI': 3, 'VII': 5}
rn_tpcs_min = {'I': 0, 'II': 2, 'III': -3, 'IV': -1, 'V': 1, 'VI': -4, 'VII': -2}
accidentals, rn_step = split_sd(rn, count=True)
rn_step = rn_step.upper()
step_tpc = rn_tpcs_min[rn_step] if global_minor else rn_tpcs_maj[rn_step]
return step_tpc + 7 * accidentals
def series_is_minor(S, is_name=True):
# regex = r'([A-Ga-g])[#|b]*' if is_name else '[#|b]*(\w+)'
# return S.str.replace(regex, lambda m: m.group(1)).str.islower()
return S.str.islower() # as soon as one character is not lowercase, it should be major
def sort_tpcs(tpcs, ascending=True, start=None):
""" Sort tonal pitch classes by order on the piano.
Uses: fifths2pc()
Parameters
----------
tpcs : collection of :obj:`int`
Tonal pitch classes to sort.
ascending : :obj:`bool`, optional
Pass False to sort by descending order.
start : :obj:`int`, optional
Start on or above this TPC.
"""
res = sorted(tpcs, key=lambda x: (fifths2pc(x),-x))
if start is not None:
pcs = [fifths2pc(tpc) for tpc in res]
start = fifths2pc(start)
i = 0
while i < len(pcs) - 1 and pcs[i] < start:
i += 1
res = res[i:] + res[:i]
return res if ascending else list(reversed(res))
def split_sd(sd, count=False):
""" Splits a scale degree such as 'bbVI' or 'b6' into accidentals and numeral.
sd : :obj:`str`
Scale degree.
count : :obj:`bool`, optional
Pass True to get the accidentals as integer rather than as string.
"""
m = re.match("^(#*|b*)(VII|VI|V|IV|III|II|I|vii|vi|v|iv|iii|ii|i|\d)$", str(sd))
if m is None:
logging.error(sd + " is not a valid scale degree.")
return None, None
acc, num = m.group(1), m.group(2)
if count:
acc = acc.count('#') - acc.count('b')
return acc, num
def str_is_minor(tone, is_name=True):
""""""
# regex = r'([A-Ga-g])[#|b]*' if is_name else '[#|b]*(\w+)'
# m = re.match(regex, tone)
# if m is None:
# return m
# return m.group(1).islower()
return tone.islower()
def transform_columns(df, func, columns=None, param2col=None, inplace=False, **kwargs):
""" Wrapper function to use transform() on df[columns].
Parameters
----------
df : :obj:`pandas.DataFrame`
DataFrame where columns (or column combinations) work as function arguments.
func : :obj:`callable`
Function you want to apply to all elements in `columns`.
columns : :obj:`list`
Columns to which you want to apply `func`.
param2col : :obj:`dict` or :obj:`list`, optional
Mapping from parameter names of `func` to column names.
If you pass a list of column names, the columns' values are passed as positional arguments.
Pass None if you want to use all columns as positional arguments.
inplace : :obj:`bool`, optional
Pass True if you want to mutate `df` rather than getting an altered copy.
**kwargs: keyword arguments for transform()
"""
if not inplace:
df = df.copy()
param_cols = []
if columns is None:
columns = df.columns
elif param2col is None:
pass
elif param2col.__class__ == dict:
param_cols = list(param2col.values())
else:
param_cols = list(param2col)
tmp_index = not df.index.is_unique
if tmp_index:
ix = df.index
df.reset_index(drop=True, inplace=True)
df.loc[:, columns] = transform(df[columns+param_cols], func, param2col, **kwargs)
if tmp_index:
df.index = ix
if not inplace:
return df
def transform_note_columns(df, to, note_cols=['chord_tones', 'added_tones', 'bass_note', 'root'], minor_col='localkey_is_minor', inplace=False, **kwargs):
""" Turns columns with line-of-fifth tonal pitch classes into another representation.
Uses: transform_columns()
Parameters
----------
df : :obj:`pandas.DataFrame`
DataFrame where columns (or column combinations) work as function arguments.
to : {'name', 'iv', 'pc', 'sd', 'rn'}
The tone representation that you want to get from the `note_cols`.
'name': Note names. Should only be used if the stacked fifths actually represent
absolute tonal pitch classes rather than intervals over the local tonic.
In other words, make sure to use 'name' only if 0 means C rather than I.
'iv': Intervals such that 0 = 'P1', 1 = 'P5', 4 = 'M3', -3 = 'm3', 6 = 'A4',
-6 = 'D5' etc.
'pc': (Relative) chromatic pitch class, or distance from tonic in semitones.
'sd': Scale degrees such that 0 = '1', -1 = '4', -2 = 'b7' in major, '7' in minor etc.
This representation requires a boolean column `minor_col` which is
True in those rows where the stacks of fifths occur in a local minor
context and False for the others. Alternatively, if all pitches are
in the same mode or you simply want to express them as degrees of
particular mode, you can pass the boolean keyword argument `minor`.
'rn': Roman numerals such that 0 = 'I', -2 = 'bVII' in major, 'VII' in minor etc.
Requires boolean 'minor' values, see 'sd'.
note_cols : :obj:`list`, optional
List of columns that hold integers or collections of integers that represent
stacks of fifth (0 = tonal center, 1 = fifth above, -1 = fourth above, etc).
minor_col : :obj:`str`, optional
If `to` is 'sd' or 'rn', specify a boolean column where the value is
True in those rows where the stacks of fifths occur in a local minor
context and False for the others.
"""
transformations = {
'name': fifths2name,
'names': fifths2name,
'iv': fifths2iv,
'pc': fifths2pc,
'sd': fifths2sd,
'rn': fifths2rn,
}
assert to in transformations, "Parameter to needs to be one of {'name', 'iv', 'pc', 'sd', 'rn'}"
cols = [col for col in note_cols if col in df.columns]
if len(cols) < len(note_cols):
logging.warning(f"Columns {[[col for col in note_cols if not col in df.columns]]}")
param2col = None
if to in ['sd', 'rn']:
assert minor_col in df.columns or 'minor' in kwargs, f"'{to} representation requires a boolean column for the 'minor' argument, e.g. 'globalkey_is_minor'."
if not 'minor' in kwargs:
param2col = {'minor': minor_col}
func = transformations[to]
res = transform_columns(df, func, columns=note_cols, inplace=inplace, param2col=param2col, column_wise=True, **kwargs)
if not inplace:
return res
def transpose(e, n):
""" Add `n` to all elements `e` recursively.
"""
return map2elements(e, lambda x: x+n)
def transpose_changes(changes, old_num, new_num, old_minor=False, new_minor=False):
""" Since the interval sizes expressed by the changes of the DCML harmony syntax
depend on the numeral's position in the scale, these may change if the numeral
is transposed. This function expresses the same changes for the new position.
Chord tone alterations (of 3 and 5) stay untouched.
Uses: changes2tpc()
Parameters
----------
changes : :obj:`str`
A string of changes following the DCML harmony standard.
old_num, new_num : :obj:`str`:
Old numeral, new numeral.
old_minor, new_minor : :obj:`bool`, optional
For each numeral, pass True if it occurs in a minor context.
"""
if pd.isnull(changes):
return changes
old = changes2tpc(changes, old_num, minor=old_minor, root_alterations=True)
new = changes2tpc(changes, new_num, minor=new_minor, root_alterations=True)
res = []
get_acc = lambda n: n * '#' if n > 0 else -n * 'b'
for (full, added, acc, chord_interval, iv1), (_, _, _, _, iv2) in zip(old, new):
if iv1 is None or iv1 == iv2:
res.append(full)
else:
d = iv2 - iv1
if d % 7 > 0:
logging.warning(f"The difference between the intervals of {full} in {old_num} and {new_num} (in {'minor' if minor else 'major'}) don't differ by chromatic semitones.")
n_acc = acc.count('#') - acc.count('b')
new_acc = get_acc(n_acc - d // 7)
res.append(added + new_acc + chord_interval)
return ''.join(res)
| [
"johannes.hentschel@epfl.ch"
] | johannes.hentschel@epfl.ch |
46eb0092ec00ba666cc6bbdaa21bff606a02a170 | 6f594cc963795c69d8da3c30ca580c0405ef2d6e | /binaryTree/652FindDuplicateSubtrees.py | fbf0d850405b6d265b0194874f1be18bc6d4cea4 | [] | no_license | lo-tp/leetcode | 25933c5b25f64f881d43748d8b2763f69614a97f | 4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50 | refs/heads/master | 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | from collections import defaultdict
class Solution(object):
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [], defaultdict(lambda: 0)
while stack or root:
if root:
stack.append((root, False, ''))
root = root.left
else:
t, visited, left_str = stack.pop()
if visited:
current = '{} {} {}'.format(left_str, t.val, current)
root = None
if data[current] == 1:
res.append(t)
data[current] += 1
else:
stack.append((t, True, current))
current = ''
root = t.right
return res
def findDuplicateSubtrees(self, root):
res, current, stack, data = [], '', [
(root, 0, '')], defaultdict(lambda: 0)
while stack:
root, flag, left_str = stack.pop()
if not root:
current += ' '
elif not flag:
stack.append((root, 1, ''))
stack.append((root.left, 0, ''))
elif flag == 1:
stack.append((root, 2, current))
stack.append((root.right, 0, ''))
current = ''
else:
current = 'l{}-{}-{}r'.format(left_str, root.val, current)
if data[current] == 1:
res.append(root)
data[current] += 1
return res
def findDuplicateSubtrees(self, root):
cur = None
res, data, stack = [], defaultdict(lambda: 0), [(root, None, 0)]
while stack:
node, string, flag = stack.pop()
if not node:
cur = '#'
elif not flag:
stack.append((node, None, 1))
stack.append((node.left, None, 0))
elif flag == 1:
stack.append((node, cur, 2))
stack.append((node.right, None, 0))
else:
cur = '{},{},{}'.format(node.val, string, cur)
data[cur] += 1
if data[cur] == 2:
res.append(node)
return res
| [
"regesteraccount@hotmail.com"
] | regesteraccount@hotmail.com |
ceff54f968ea5881df1a7121e62e406f4da9e11d | e40900b4a766bdc0d44e9e7c6cd751cd180a053c | /python-downloader-gui.py | 6e8165b7b6fc8e2a1e2858a0d372c993ea45febd | [] | no_license | ohidurbappy/random-scripts | ce0601429a97723d6846222663813439366f92b9 | 4c675ee153481999990deafa5394da14cd974cab | refs/heads/master | 2021-06-23T09:35:16.013494 | 2021-02-07T04:41:03 | 2021-02-07T04:41:03 | 195,460,766 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # © ohidurbappy
import tkinter as tk
import tkinter.ttk as ttk
import requests
import threading
import tempfile
import os
import time
progress_window=tk.Tk()
progress_window.geometry('400x60')
progress_window.title("Querying file size..")
progressbar=ttk.Progressbar(master=progress_window, orient = tk.HORIZONTAL,
length = 100, mode = 'determinate')
progressbar.pack(ipadx=4,ipady=4,padx=(12,12),pady=(12,12))
def download_file():
session=requests.Session()
response=session.get("http://ipv4.download.thinkbroadband.com/10MB.zip",stream=True)
total_size_in_bytes= int(response.headers.get('Content-Length', 500*1024))
# we are streaming and don't know content length
# assuming its 500kb
# total_size_in_bytes=500*1024
block_size = 1024*50
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_filename=os.path.join(tmp_dir,'response.data')
with open(tmp_filename, 'wb') as file:
downloaded=0
for data in response.iter_content(block_size):
file.write(data)
downloaded+=len(data)
if downloaded==0:
progressbar['value']=0
else:
progressbar['value'] = int((downloaded/total_size_in_bytes)*100)
progress_window.title(f"Downloaded {downloaded} of {total_size_in_bytes} bytes.")
progress_window.update()
time.sleep(.1)
progress_window.destroy()
progress_window.after(300,lambda: threading.Thread(target=download_file).start())
progress_window.mainloop()
| [
"noreply@github.com"
] | ohidurbappy.noreply@github.com |
9f238e46d438784023ea24f418acbc362d03107b | 86813bf514f3e0257f92207f40a68443f08ee44b | /459 重复的子字符串/459 重复的子字符串.py | 09445982e188b913bf7d0f47bd859239932d3471 | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #label: string difficulty: easy
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
n = len(s)
for i in range(1,n//2+1):
if n % i == 0:
a = s[:i]
j = i
while j<n and s[j:j+i] == a:
j += i
if j == n:
return True
return False
| [
"noreply@github.com"
] | Aurora-yuan.noreply@github.com |
fbcb356c810de002cfb264ba3b9a23f8b81dc917 | 330216d96d4330a5eb8c396aa50db846eb985674 | /app/core/migrations/0001_initial.py | f568964ebdc4d346067f6fd7c8d7bfdc9c58191c | [] | no_license | Ash-jo121/Recipe-API | 16520208b86393794b4cfcbdc489fb511fa851c6 | 3acd344fe8bd577ba22086f57f030d4797ebb9af | refs/heads/master | 2023-06-17T14:55:57.705830 | 2021-07-14T06:46:08 | 2021-07-14T06:46:08 | 385,627,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | # Generated by Django 2.1.15 on 2021-07-14 06:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"47566176+Ash-jo121@users.noreply.github.com"
] | 47566176+Ash-jo121@users.noreply.github.com |
3f6b6e741838e1fbb8aef9d03e86a091b79c4e77 | ad67a45e0aee6e86d2b6e931019cbbcfdfba354b | /python/furiosa-sdk-runtime/tests/test_base.py | 5a2dc4aacd09649833e6ebbfafce04c118defdee | [] | no_license | ileixe/furiosa-sdk | 2470537a5378a8423eac85b8057dce5921d81272 | 68dcfcce8596ada95e576075e730dd8fdc1242a8 | refs/heads/main | 2023-06-23T09:28:44.745976 | 2021-07-23T04:11:38 | 2021-07-23T04:11:38 | 388,044,680 | 0 | 0 | null | 2021-07-21T08:14:37 | 2021-07-21T08:14:37 | null | UTF-8 | Python | false | false | 2,493 | py | import os
import random
import numpy as np
import tensorflow as tf
from furiosa.runtime import session
from tests import test_data
def model_path(name: str) -> str:
return os.path.dirname(__file__) + "/../npu-models/" + name
MNIST_MOBINENET_V2 = test_data("MNISTnet_uint8_quant_without_softmax.tflite")
def assert_tensors_equal(expected, result):
assert np.allclose(expected, result, atol=1.0), "{} was expected, but the result was {}".format(expected, result)
class SessionTester:
def __init__(self, model_path):
self.session = session.create(model=model_path)
def close(self):
self.session.close()
class AsyncSessionTester:
def __init__(self, model_path):
(self.session, self.queue) = session.create_async(model=model_path)
def close(self):
self.queue.close()
self.session.close()
class PredictionTester:
def __init__(self, model_path):
self.tf_sess = tf.lite.Interpreter(model_path=model_path)
def _run_nux(self, inputs: np.ndarray):
pass
def _run_tf(self, inputs: np.ndarray):
self.tf_sess.allocate_tensors()
tf_inputs = self.tf_sess.get_input_details()
tf_outputs = self.tf_sess.get_output_details()
self.tf_sess.set_tensor(tf_inputs[0]['index'], inputs)
self.tf_sess.invoke()
return self.tf_sess.get_tensor(tf_outputs[0]['index'])
def assert_equals(self, inputs: np.ndarray):
tf_results = self._run_tf(inputs)
nux_results = self._run_nux(inputs)
assert_tensors_equal(tf_results, nux_results)
class BlockingPredictionTester(PredictionTester):
def __init__(self, model_path):
self.nux_sess = session.create(model=model_path)
super().__init__(model_path)
def _run_nux(self, inputs: np.ndarray):
return self.nux_sess.run(inputs)[0].numpy()
def close(self):
self.nux_sess.close()
class AsyncPredictionTester(PredictionTester):
def __init__(self, model_path):
(nux_sess, nux_queue) = session.create_async(model=model_path)
self.nux_sess = nux_sess
self.nux_queue = nux_queue
super().__init__(model_path)
def _run_nux(self, inputs: np.ndarray) -> np.ndarray:
key = random.randint(0, 100)
self.nux_sess.submit(inputs, context={'key': key})
_, outputs = self.nux_queue.recv()
return outputs[0].numpy()
def close(self):
self.nux_queue.close()
self.nux_sess.close() | [
"hyunsik@furiosa.ai"
] | hyunsik@furiosa.ai |
1576d9a41714717115adfe3598e3691d2eb0a209 | 736e5d01098e19feaf2095d43e0729e0cd3a2c06 | /mercateo/filedate_check(1).py | 44236097d0cf42d4ba7948186605f175e09aca11 | [] | no_license | ankurparanjpe/python-scripts | b36c45b8aed25f0084e778de2d259eb8df6dfa21 | 3fe7ef4f5204aeee5935dea9852fa99f9d44b5a6 | refs/heads/master | 2023-04-07T10:31:47.841650 | 2021-03-15T18:04:26 | 2021-03-15T18:04:26 | 346,092,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import ftplib
from datetime import date,datetime,timedelta
import requests
from zipfile import ZipFile
import smtplib,ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import os
from subprocess import call
from win32com.client import Dispatch
yesterday = datetime.strftime(datetime.now() - timedelta(1), '%Y%m%d')
today1 = datetime.today().strftime('%d-%m-%Y')
email_user = 'rncteam@brodos.net'
email_send = ['rncteam@brodos.net']
message_file = """From: <rncteam@brodos.net>
Subject: t_Mercateo Pricelist Data/source Error!
One of the files for t_Mercateo Pricelist file source are older or not available in required path!
"""
try:
artzub = os.path.getmtime(f'I:/Abteilungen/Reporting_und_Controlling/private/Datenbanken/J DBF/ARTZUB.txt')
artikel = os.path.getmtime(f'I:/Abteilungen/Reporting_und_Controlling/private/Datenbanken/J DBF/ARTIKEL.txt')
stocklist = os.path.getmtime(f'I:/Abteilungen/Reporting_und_Controlling/private/Datenbanken/Eigene/STOCKLIST.xlsx')
produkte = os.path.isfile(f'I:/Abteilungen/Reporting_und_Controlling/private/Datenbanken/Eigene/PRODUKTE.xlsx')
artzub = datetime.fromtimestamp(artzub).strftime('%d-%m-%Y')
artikel = datetime.fromtimestamp(artikel).strftime('%d-%m-%Y')
stocklist = datetime.fromtimestamp(stocklist).strftime('%d-%m-%Y')
if artzub and artikel and stocklist == today1:
xl = Dispatch("Excel.Application")
xl.Visible = True
wb = xl.Workbooks.Open(r'I:\\Abteilungen\\Reporting_und_Controlling\\private\\Preislisten\\t_Mercateo\\REPORTING Mercateo Pricelist - Copy.xlsm')
print('yes available')
else:
print('nope')
smtpObj = smtplib.SMTP('172.17.7.101')
smtpObj.sendmail(email_user, email_send, message_file)
except Exception as e:
print('file error')
smtpObj = smtplib.SMTP('172.17.7.101')
smtpObj.sendmail(email_user, email_send, message_file)
| [
"standard@PCI045.brodosmit.de"
] | standard@PCI045.brodosmit.de |
8e67b2dda78a3d23ea3feaf4e9b99eb6a8cc630e | ee1d1f674aa96b15477167b76e25bbbbe85a690c | /sound_svc.py | e041f2264f6d5d2a722863159eb1e61638a4b3a5 | [] | no_license | chrisatyler/Pi-Projects | 250390caf067f4c64188181f6fbaa48d3b1621aa | 8ddcc0a179bec127206919ec053bd80cd6006748 | refs/heads/master | 2021-06-10T17:33:35.527168 | 2017-02-04T00:57:40 | 2017-02-04T00:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,343 | py | # Import libraries used in this program
import RPi.GPIO as GPIO
import os, sys
import time
from decimal import *
import math
getcontext().prec = 4
# Startup message
print "Preparing to monitor sound levels"
# Set our pin assignments
sensor_in = 18
red_led = 21
green_led = 20
#Simple sting for printing an output on detection - can be removed for quiet running
Is_Loud = "No"
# Web output settings:
web_file = "/var/www/html/level.js"
# Various counters used for determining the thresholds for sensitivity and detection
# as well as the time of the loop and frequency for debugging
Loud_Count = 0
loop_count = 0
per_detected = 0
events_detected = 0
time_loop = 15
# Max loop is determined by the tuning exercise I describe in my blog video
# at linkedin.com/in/chrisharrold - look for the post on detection tuning
max_loop = 30000
# This value is the final threshold where the system will take action
# it is the value of the number of times loud sound was detected
# versus the number of times the sensor was polled. Unless
# you are looking for spikes amidst loud noise, this number will be
# likely be .01 or even significantly less
a_threshold = .01
# How long between sound level checks - not required, but you
# can slow it down if needed for debuging or just for funsies
interval = .5
# Setup GPIO commands and pins and cleanup pins in case of errors
GPIO.setmode(GPIO.BCM)
GPIO.setup(red_led, GPIO.OUT)
GPIO.setup(green_led, GPIO.OUT)
GPIO.setup(sensor_in, GPIO.IN)
# Make sure the pins start off in the LOW state
GPIO.output(green_led, GPIO.LOW)
GPIO.output(red_led, GPIO.LOW)
# Then turn on the green - no noise light - and confirm system is online.
GPIO.output(green_led, GPIO.HIGH)
GPIO.output(red_led, GPIO.LOW)
print "Readying Web Output File"
# Opens and preps the HTML file for the first time. Will remove anything it
# finds in the file and prep it with this default:
with open(web_file + '.new', 'w') as f_output:
f_output.write("var int_level = 0")
os.rename(web_file + '.new', web_file)
print "GPIO set. Service starting. Press ctrl-c to break"
# Main try block to handle the exception conditions
try:
# Primary monitor is a "while" loop that will keep the monitor running
# indefinitely as a soft service.
#
# This first syntax will lock the loop into a time window, 5 seconds
# by default as definied by the time_loop variable.
# This is extremely useful for debugging, and for threshold detection.
#
# The microphone sensor is notoriously hard to tune for threshold
# and having this will allow you to figure out the number of events
# in a fixed window of time. This means you can divide by the number
# of events versus the number of times the monitor looked for an event,
# to define the sensitivity in software and not rely solely on the
# sensor itself.
#
# You can remove this version once the sensitivity is reliable:
# t_end = time.time() + time_loop
# while time.time() < t_end:
# This version simply loops for eternity unless ctrl-c is pressed
# and should be your "production" version of the loop based on your
# tuning results and the length of the loop that matches your sensitivity needs
# my happy default is 30k loops or about 5 seconds:
while loop_count < max_loop:
# Now we get to the actual loop and start detecting sound
# Count the number of iterations - important for determining
# sustained detection versus flutter in the sensor
loop_count = loop_count + 1
# If sound is loud enough, the GPIO PIN will switch state to HIGH
# record the occurance and add it to the count for computation
if GPIO.input(sensor_in) == GPIO.HIGH:
Is_Loud = "Loudness Detected"
Loud_Count = Loud_Count + 1
# have we hit our threshold yet?
per_detected = Decimal(Loud_Count) / Decimal(loop_count)
# You can un-remark the line to print the detected and threshold value each loop
# which is useful for the debugging, but takes cycles away from computation
#print "Detect vs Threshold: " + str(per_detected) + " / " + str(a_threshold)
# write it to the .js file for web display if per_detected is high enough to trigger
# the output otherwise, just leave it alone (saves CPU but causes a minor 'bleed'
# where the last per_detected could be displayed for a long time
if per_detected > 0:
with open(web_file + '.new', 'w') as f_output:
f_output.write("var int_level = " + str(per_detected))
os.rename(web_file + '.new', web_file)
# Lets see if we have actually detected a sound that meets the
# threshold? If so, we will turn on the red light and it will stay on
# until the sound drops under the threshold again.
if per_detected > a_threshold:
GPIO.output(red_led, GPIO.HIGH)
else:
GPIO.output(red_led, GPIO.LOW)
# Lastly for the main body, we catch our loop count before it gets to max_loop
# and reset everything to keep everything running, and our math accurate:
if loop_count == max_loop:
print "System is listening"
loop_count = 0
per_detected = 0
Loud_Count = 0
with open(web_file + '.new', 'w') as f_output:
f_output.write("var int_level = 0 ")
os.rename(web_file + '.new', web_file)
except (KeyboardInterrupt, SystemExit):
# If the system is interrupted (ctrl-c) this will print the final values
# so that you have at least some idea of what happened
print "-------------------------------------------"
print " "
print "System Reset on Keyboard Command or SysExit"
print " "
print "Final Detection was " + str(Is_Loud)
print " "
print "Total Noises Detected: " + str(Loud_Count)
print " "
print "Total loops run: " + str(loop_count)
print " "
print "-------------------------------------------"
GPIO.cleanup()
else:
GPIO.cleanup()
# You can remove this entire block once you go to "production" mode
# but these values are critical for the initial tuning phase.
print "-------------------------------------------"
print " "
print "System Reset on Keyboard Command or SysExit"
print " "
print "Final Detection was " + str(Is_Loud)
print " "
print "Total Noises Detected: " + str(Loud_Count)
print " "
print "Total loops run: " + str(loop_count)
print " "
print "-------------------------------------------"
| [
"christophereharrold@hotmail.com"
] | christophereharrold@hotmail.com |
9a2b12b36a19e6267a49ac095837da37bf67e59f | c57a402f4afd29e5f7154c613089924a7192066b | /Utils/PwdUtils.py | 3db463164ccd0df65874d43de83b49dac521b83a | [
"MIT"
] | permissive | avaitkus/Data-Platform-API | abdceead15a4764373da0f6fe2050af57cafc1c0 | bb13e8e8854a8acd3ae5f01360e6b3166bce0c80 | refs/heads/master | 2023-08-14T23:31:47.959691 | 2018-12-11T00:41:50 | 2018-12-11T00:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from passlib.hash import pbkdf2_sha256
def set_password(raw_password):
return pbkdf2_sha256.encrypt(raw_password, rounds=200000, salt_size=16)
def verify_password(password, hash):
return pbkdf2_sha256.verify(password, hash)
| [
"noreply@github.com"
] | avaitkus.noreply@github.com |
4b20683c0e49d5935efc52e17990111568c27d6b | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2005/py01/day02/game2.py | 13201ab13ccb3c6969428695af845f256120a340 | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import random
# 定义人胜利的列表
win_list = [['石头', '剪刀'], ['剪刀', '布'], ['布', '石头']]
# 人机分别出拳
all_choice = ['石头', '剪刀', '布']
computer = random.choice(all_choice)
player = input("请出拳(石头/剪刀/布): ")
# 打印人机选择
print("计算机的选择:%s, 您的选择:%s" % (computer, player))
# 判断并输出胜负
if computer == player:
print("平局")
elif [player, computer] in win_list:
print("You WIN!!!")
else:
print("You LOSE!!!")
| [
"zhangzhg@tedu.cn"
] | zhangzhg@tedu.cn |
e1bef1ab9a2f2cfac10e92460d610fb242c5b252 | 8ce094eb7ca937ebc6bf215274d94fac4476a7df | /gitconsensus/repository.py | 95c82c9b83cf7ffb17cd8ac665636f7d0ab2c6fe | [
"MIT"
] | permissive | minz1/GitConsensusCLI | 8eddc01ea0dcbfd8acfbef3bea57f173519e7736 | a7018d1e2c790ff84bd21f185b8eaeb3f97c8dac | refs/heads/master | 2022-11-15T19:57:34.920893 | 2020-06-29T04:24:52 | 2020-06-29T04:25:40 | 275,690,366 | 1 | 0 | MIT | 2020-06-29T00:06:05 | 2020-06-29T00:06:05 | null | UTF-8 | Python | false | false | 18,637 | py | import base64
import datetime
import github3
import json
import requests
from semantic_version import Version
import yaml
# .gitconsensus.yaml files with versions higher than this will be ignored.
max_consensus_version = Version('3.0.0', partial=True)
message_template = """
This Pull Request has been %s by [GitConsensus](https://www.gitconsensus.com/).
## Vote Totals
| Yes | No | Abstain | Voters |
| --- | -- | ------- | ------ |
| %s | %s | %s | %s |
## Vote Breakdown
%s
## Vote Results
| Criteria | Result |
| ---------- | ------ |
| Has Quorum | %s |
| Has Votes | %s |
"""
consensus_url_template = "https://api.github.com/repos/%s/%s/contents/.gitconsensus.yaml"
def githubApiRequest(url, client):
headers = {'Accept': 'application/vnd.github.squirrel-girl-preview'}
return client._get(url, headers=headers)
class Repository:
def __init__(self, user, repository, client):
self.user = user
self.name = repository
self.contributors = False
self.collaborators = {}
self.client = client
self.client.set_user_agent('gitconsensus')
self.repository = self.client.repository(self.user, self.name)
consensusurl = consensus_url_template % (self.user, self.name)
res = githubApiRequest(consensusurl, self.client)
self.rules = False
if res.status_code == 200:
ruleresults = res.json()
self.rules = yaml.safe_load(base64.b64decode(ruleresults['content']).decode('utf-8'))
# support older versions by converting from day to hours.
if 'version' not in self.rules or self.rules['version'] < 2:
if 'mergedelay' in self.rules and self.rules['mergedelay']:
self.rules['mergedelay'] = self.rules['mergedelay'] * 24
if 'timeout' in self.rules and self.rules['timeout']:
self.rules['timeout'] = self.rules['timeout'] * 24
self.rules['version'] = 2
if self.rules['version'] < 3:
self.rules['version'] = 3
self.rules['pull_requests'] = {
"quorum": self.rules.get('quorum', False),
"threshold": self.rules.get('threshold', False),
"contributors_only": self.rules.get('contributorsonly', False),
"collaborators_only": self.rules.get('collaboratorsonly', False),
"whitelist": self.rules.get('whitelist'),
"blacklist": self.rules.get('blacklist'),
"merge_delay": self.rules.get('mergedelay', False),
"delay_override": self.rules.get('delayoverride', False),
"merge_delay_min": self.rules.get('mergedelaymin', False),
"license_delay": self.rules.get('licenseddelay', False),
"license_lock": self.rules.get('locklicense', False),
"consensus_delay": self.rules.get('consensusdelay', False),
"consensus_lock": self.rules.get('lockconsensus', False),
"timeout": self.rules.get('timeout')
}
if int(self.rules['pull_requests']['threshold']) > 1:
self.rules['pull_requests']['threshold'] /= 100
# Treat higher version consensus rules are an unconfigured repository.
project_consensus_version = Version(str(self.rules['version']), partial=True)
if max_consensus_version < project_consensus_version:
self.rules = False
def getPullRequests(self):
prs = self.repository.iter_pulls(state="open")
retpr = []
for pr in prs:
newpr = PullRequest(self, pr.number)
retpr.append(newpr)
return retpr
def getPullRequest(self, number):
return PullRequest(self, number)
def isContributor(self, username):
if not self.contributors:
contributor_list = self.repository.contributors()
self.contributors = [str(contributor) for contributor in contributor_list]
return username in self.contributors
def isCollaborator(self, username):
if username not in self.collaborators:
self.collaborators[username] = self.repository.is_collaborator(username)
return self.repository.is_collaborator(username)
def getConsensus(self):
return Consensus(self.rules)
def setLabelColor(self, name, color):
labels = self.get_labels()
if name not in labels:
self.repository.create_label(name, color)
elif color != labels[name].color:
labels[name].update(name, color)
def get_labels(self):
labels = {}
for label in self.repository.labels():
labels[label.name] = label
return labels
class PullRequest:
labels = False
def __init__(self, repository, number):
self.repository = repository
self.consensus = repository.getConsensus()
self.number = number
self.pr = self.repository.client.pull_request(self.repository.user, self.repository.name, number)
# https://api.github.com/repos/OWNER/REPO/issues/1/reactions
reacturl = "https://api.github.com/repos/%s/%s/issues/%s/reactions" % (self.repository.user, self.repository.name, self.number)
res = githubApiRequest(reacturl, self.repository.client)
reactions = json.loads(res.text)
self.yes = []
self.no = []
self.abstain = []
self.contributors_yes = []
self.contributors_no = []
self.contributors_abstain = []
self.users = []
self.doubles = []
for reaction in reactions:
content = reaction['content']
user = reaction['user']
username = user['login']
if username in self.doubles:
continue
if 'blacklist' in self.repository.rules and self.repository.rules['blacklist']:
if username in self.repository.blacklist:
continue
if 'collaborators_only' in self.repository.rules and self.repository.rules['collaborators_only']:
if not self.repository.isCollaborator(username):
continue
if 'contributors_only' in self.repository.rules and self.repository.rules['contributors_only']:
if not self.repository.isContributor(username):
continue
if 'whitelist' in self.repository.rules:
if username not in self.repository.rules['whitelist']:
continue
if 'prevent_doubles' in self.repository.rules and self.repository.rules['prevent_doubles']:
# make sure user hasn't voted twice
if content == '+1' or content == '-1' or content == 'confused':
if username in self.users:
self.doubles.append(username)
self.users.remove(username)
if username in self.yes:
self.yes.remove(username)
if username in self.no:
self.no.remove(username)
if username in self.abstain:
self.abstain.remove(username)
if username in self.contributors_yes:
self.contributors_yes.remove(username)
if username in self.contributors_no:
self.contributors_no.remove(username)
if username in self.contributors_abstain:
self.contributors_abstain.remove(username)
continue
if content == '+1':
self.users.append(user['login'])
self.yes.append(user['login'])
if self.repository.isContributor(user['login']):
self.contributors_yes.append(user['login'])
elif content == '-1':
self.users.append(user['login'])
self.no.append(user['login'])
if self.repository.isContributor(user['login']):
self.contributors_no.append(user['login'])
elif content == 'confused':
self.users.append(user['login'])
self.abstain.append(user['login'])
if self.repository.isContributor(user['login']):
self.contributors_abstain.append(user['login'])
files = self.pr.files()
self.changes_consensus = False
self.changes_license = False
for changed_file in files:
if changed_file.filename == '.gitconsensus.yaml':
self.changes_consensus = True
if changed_file.filename.lower().startswith('license'):
self.changes_license = True
def hoursSinceLastCommit(self):
commits = self.pr.commits()
for commit in commits:
commit_date_string = commit._json_data['commit']['author']['date']
# 2017-08-19T23:29:31Z
commit_date = datetime.datetime.strptime(commit_date_string, '%Y-%m-%dT%H:%M:%SZ')
now = datetime.datetime.utcnow()
delta = now - commit_date
return delta.total_seconds() / 3600
def hoursSincePullOpened(self):
now = datetime.datetime.utcnow()
delta = now - self.pr.created_at.replace(tzinfo=None)
return delta.total_seconds() / 3600
def hoursSinceLastUpdate(self):
hoursOpen = self.hoursSincePullOpened()
hoursSinceCommit = self.hoursSinceLastCommit()
if hoursOpen < hoursSinceCommit:
return hoursOpen
return hoursSinceCommit
def changesConsensus(self):
return self.changes_consensus
def changesLicense(self):
return self.changes_license
def getIssue(self):
return self.repository.repository.issue(self.number)
def validate(self):
if self.repository.rules == False:
return False
return self.consensus.validate(self)
def shouldClose(self):
if not self.repository.rules:
return False
if 'pull_requests' not in self.repository.rules:
return False
if 'timeout' in self.repository.rules['pull_requests']:
if self.hoursSinceLastUpdate() >= self.repository.rules['pull_requests']['timeout']:
return True
return False
def close(self):
self.pr.close()
self.addLabels(['gc-closed'])
self.cleanInfoLabels()
self.commentAction('closed')
def vote_merge(self):
if not self.repository.rules:
return False
self.pr.merge('GitConsensus Merge')
self.addLabels(['gc-merged'])
self.cleanInfoLabels()
if 'extra_labels' in self.repository.rules and self.repository.rules['extra_labels']:
self.addLabels([
'gc-voters %s' % (len(self.users),),
'gc-yes %s' % (len(self.yes),),
'gc-no %s' % (len(self.no),),
'gc-age %s' % (int(self.hoursSinceLastUpdate()),)
])
self.commentAction('merged')
def addInfoLabels(self):
labels = self.getLabelList()
licenseMessage = 'License Change'
if self.changesLicense():
self.addLabels([licenseMessage])
else:
self.removeLabels([licenseMessage])
consensusMessage = 'Consensus Change'
if self.changesConsensus():
self.addLabels([consensusMessage])
else:
self.removeLabels([consensusMessage])
hasQuorumMessage = 'Has Quorum'
needsQuorumMessage = 'Needs Votes'
if self.consensus.hasQuorum(self):
self.addLabels([hasQuorumMessage])
self.removeLabels([needsQuorumMessage])
else:
self.removeLabels([hasQuorumMessage])
self.addLabels([needsQuorumMessage])
passingMessage = 'Passing'
failingMessage = 'Failing'
if self.consensus.hasVotes(self):
self.addLabels([passingMessage])
self.removeLabels([failingMessage])
else:
self.removeLabels([passingMessage])
self.addLabels([failingMessage])
def cleanInfoLabels(self):
self.removeLabels(['Failing', 'Passing', 'Needs Votes', 'Has Quorum'])
def commentAction(self, action):
table = self.buildVoteTable()
message = message_template % (
action,
str(len(self.yes)),
str(len(self.no)),
str(len(self.abstain)),
str(len(self.users)),
table,
self.consensus.hasQuorum(self),
self.consensus.hasVotes(self)
)
if len(self.doubles) > 0:
duplist = ["[%s](https://github.com/%s)" % (username, username) for username in self.doubles]
dupuserstring = ', '.join(duplist)
dupstring = '\n\nThe following users voted for multiple options and were exlcuded: \n%s' % (dupuserstring)
message = "%s\n%s" % (message, dupstring)
self.addComment(message)
def buildVoteTable(self):
table = '| User | Yes | No | Abstain |\n|--------|-----|----|----|'
for user in self.users:
if user in self.yes:
yes = '✔'
else:
yes = ' '
if user in self.no:
no = '✔'
else:
no = ' '
if user in self.abstain:
abstain = '✔'
else:
abstain = ' '
user_label = '[%s](https://github.com/%s)' % (user, user)
row = "| %s | %s | %s | %s |" % (user_label, yes, no, abstain)
table = "%s\n%s" % (table, row)
return table
def addLabels(self, labels):
existing = self.getLabelList()
issue = self.getIssue()
for label in labels:
if label not in existing:
issue.add_labels(label)
def removeLabels(self, labels):
existing = self.getLabelList()
issue = self.getIssue()
for label in labels:
if label in existing:
issue.remove_label(label)
def addComment(self, comment_string):
return self.getIssue().create_comment(comment_string)
def getLabelList(self):
if not self.labels:
issue = self.getIssue()
self.labels = [item.name for item in issue.labels()]
return self.labels
def isBlocked(self):
labels = [item.lower() for item in self.getLabelList()]
if 'wip' in labels:
return True
if 'dontmerge' in labels:
return True
return False
class Consensus:
def __init__(self, rules):
self.rules = rules
def validate(self, pr):
if not self.rules:
return False
if pr.isBlocked():
return False
if not self.isAllowed(pr):
return False
if not self.isMergeable(pr):
return False
if not self.hasQuorum(pr):
return False
if not self.hasVotes(pr):
return False
if not self.hasAged(pr):
return False
return True
def isAllowed(self, pr):
if not self.rules:
return False
if pr.changesLicense():
if 'license_lock' in self.rules['pull_requests'] and self.rules['pull_requests']['license_lock']:
return False
if pr.changesConsensus():
if 'consensus_lock' in self.rules['pull_requests'] and self.rules['pull_requests']['consensus_lock']:
return False
return True
def isMergeable(self, pr):
if not self.rules:
return False
if not pr.pr.mergeable:
return False
return True
def hasQuorum(self, pr):
if not self.rules:
return False
if 'quorum' in self.rules['pull_requests']:
if len(pr.users) < self.rules['pull_requests']['quorum']:
return False
return True
def hasVotes(self, pr):
if not self.rules:
return False
if 'threshold' in self.rules['pull_requests']:
total = (len(pr.yes) + len(pr.no))
if total <= 0:
return False
ratio = len(pr.yes) / total
if ratio < self.rules['pull_requests']['threshold']:
return False
return True
def hasAged(self, pr):
if not self.rules:
return False
hours = pr.hoursSinceLastUpdate()
if pr.changesLicense():
if 'license_delay' in self.rules['pull_requests'] and self.rules['pull_requests']['license_delay']:
if hours < self.rules['pull_requests']['license_delay']:
return False
if pr.changesConsensus():
if 'consensus_delay' in self.rules['pull_requests'] and self.rules['pull_requests']['consensus_delay']:
if hours < self.rules['pull_requests']['consensus_delay']:
return False
if 'merge_delay' not in self.rules['pull_requests'] or not self.rules['pull_requests']['merge_delay']:
return True
if hours >= self.rules['pull_requests']['merge_delay']:
return True
if 'delay_override' in self.rules['pull_requests'] and self.rules['pull_requests']['delay_override']:
if pr.changesConsensus() or pr.changesLicense():
return False
if 'merge_delay_min' in self.rules['pull_requests'] and self.rules['pull_requests']['merge_delay_min']:
if hours < self.rules['pull_requests']['merge_delay_min']:
return False
if len(pr.no) > 0:
return False
if len(pr.contributors_yes) >= self.rules['pull_requests']['delay_override']:
return True
return False
| [
"emerytang@gmail.com"
] | emerytang@gmail.com |
569aa3b9b87c7bb004260cef14824c3893f4f6cb | 808b0b8238a12afeea147d6f007769d16b0594c8 | /TripPlanner/branches/rb-0.4/bycycle/tripplanner/__init__.py | 985e7c9f5d1e88b3b0bd508e2355e772c699caea | [] | no_license | bycycle-org/bycycle-legacy | 029df89454d10d2becaf70c24389b0954c299c3b | 1c66b7278d989cc37245bca44ba5191dd53d9714 | refs/heads/master | 2020-12-02T21:01:23.333908 | 2010-06-22T18:15:35 | 2010-06-22T18:15:35 | 96,243,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | """
tripplanner
This file loads the finished app from tripplanner.config.middleware.
"""
from tripplanner.config.middleware import make_app
| [
"self@wyattbaldwin.com"
] | self@wyattbaldwin.com |
1e9a225fe5733b7b760390bc1f1511e3d4fc2649 | 99697559d046cdd04dd9068bd518e4da4177aaa2 | /Finish/H065_Valid_Number.py | 887cc224b572e863ae805b6987920e3864f81620 | [] | no_license | Azurisky/Leetcode | 3e3621ef15f2774cfdfac8c3018e2e4701760c3b | 8fa215fb0d5b2e8f6a863756c874d0bdb2cffa04 | refs/heads/master | 2020-03-18T22:46:35.780864 | 2018-10-07T05:45:30 | 2018-10-07T05:45:30 | 135,364,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | class Solution:
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return False
# strip the heading and tailing spaces of the string
s = s.strip()
i = 0
res = signs = eE = dot = False
while i < len(s):
if s[i].isdigit():
i += 1
res = signs = True
elif s[i]=='.' and not dot:
i += 1
dot = signs = True
elif (s[i]=='e' or s[i]=='E') and (not eE) and res:
i += 1
res = signs = False
dot = eE = True
elif (s[i]=='+' or s[i]=='-') and not res and not signs:
i += 1
signs = True
else:
return False
if res:
return True
return False | [
"andrew0704us@gmail.com"
] | andrew0704us@gmail.com |
78bfd9ac8cc090b6239749085f7dcc33098e73cf | dd4b467d84aa1c06b26747a3e0c18dd9e1e52b77 | /context_manager_custom.py | 28ec165f730321c47bd658ca55e09953f76f9215 | [] | no_license | hieptq-1904/Test_git | 6d35a2017122eb999b78ca7bffa676b6ef309b14 | 63d7549ededec9efa53ea6290d0804068baecd62 | refs/heads/master | 2023-04-25T03:51:57.736145 | 2021-05-13T10:33:06 | 2021-05-13T10:33:06 | 367,011,600 | 0 | 0 | null | 2021-05-13T10:33:06 | 2021-05-13T10:19:21 | Python | UTF-8 | Python | false | false | 106 | py | from dict import te
with te.a() as s:
print(s)
print('c')
print(s)
print(te.count())
print(te.b) | [
"tony.ng@vmogroup.com"
] | tony.ng@vmogroup.com |
1f5bdaf099ea6f0dbeaf10ad8c748e900aa89e17 | e4f04c827c2402440915619a51dfbf0700688398 | /05Spider/2020/day06/gushiwen/gushiwen/middlewares.py | ab0bd2c32921618444b3d7868e797bd0651950a0 | [] | no_license | wangxinglong74520/filename | 58177cb0d1dfc262713816d175334bbd52ace3b8 | 3347ab61ed1cf0290c6cc431d9931fb0975a612f | refs/heads/main | 2023-03-30T21:55:23.207561 | 2021-03-22T08:21:40 | 2021-03-22T08:21:40 | 349,773,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class GushiwenSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class GushiwenDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# 设置请求的cookies信息
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"1639837640@qq.com"
] | 1639837640@qq.com |
96f8a2eb60406b926a83ff11e8486515b1b71524 | f23ef5f7ea28794a9578b33d19c0566848046650 | /202104 - April_2021/20210403.py | 8ff4fe8d304c82dcc563f35e3d8d8304accd9fbd | [] | no_license | barbaramchd/one_coding_challenge_a_day | a6b6ba87dcc648246726b3628b45b476b0d0c3f8 | 0dc5a97df10d148bfc1a4faf434891061774b7f9 | refs/heads/master | 2021-12-24T22:42:21.444664 | 2021-08-07T06:11:16 | 2021-08-07T06:11:16 | 236,455,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | """
Longest Valid Parentheses
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
"""
class Solution:
def longestValidParentheses(self, s: str) -> int:
longest_substring = ""
opening_parenthesis = 0
closing_parenthesis = 0
start_point = 0
for p in range(len(s)):
if s[p] == "(":
opening_parenthesis += 1
else:
closing_parenthesis += 1
if opening_parenthesis < closing_parenthesis:
if len(longest_substring) < len(s[start_point:p]):
print(1)
longest_substring = s[start_point:p]
start_point = p + 1
opening_parenthesis = 0
closing_parenthesis = 0
if opening_parenthesis == closing_parenthesis:
if len(longest_substring) < len(s[start_point:]):
print(2)
longest_substring = s[start_point:]
if opening_parenthesis > closing_parenthesis:
# longest_substring = ""
opening_parenthesis = 0
closing_parenthesis = 0
start_point = len(s) - 1
for p in range(len(s) - 1, -1, -1):
if s[p] == "(":
opening_parenthesis += 1
else:
closing_parenthesis += 1
if opening_parenthesis > closing_parenthesis:
if len(longest_substring) < len(s[p:start_point]):
print(3)
longest_substring = s[p:start_point]
start_point = p - 1
opening_parenthesis = 0
closing_parenthesis = 0
if opening_parenthesis == closing_parenthesis:
if len(longest_substring) < len(s[p:start_point + 1]):
print(4)
longest_substring = s[p:start_point + 1]
print(longest_substring)
return len(longest_substring)
| [
"barbaramachado.dex@gmail.com"
] | barbaramachado.dex@gmail.com |
74ab53846c7f95d413948f7ff2c3a206fcf660ca | d3b7a7a922eb9999f22c99c0cc3908d7289ca27e | /tests/end-to-end.py | e965fe6cbdead05f32e481356524c7034165020e | [
"Apache-2.0"
] | permissive | g3l0o/plaso | b668203c2c7cf8799a1c12824ee1bdc8befd3980 | ae29d853a6bcdd1530ce9320a3af7b3f122941ac | refs/heads/master | 2020-12-25T20:31:08.928709 | 2016-07-22T20:00:33 | 2016-07-22T20:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,110 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""End-to-end test launcher."""
from __future__ import print_function
import abc
import argparse
import difflib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
try:
import ConfigParser as configparser
except ImportError:
import configparser # pylint: disable=import-error
if sys.version_info[0] < 3:
STRING_TYPES = (basestring, )
else:
STRING_TYPES = (str, )
# Since os.path.abspath() uses the current working directory (cwd)
# os.path.abspath(__file__) will point to a different location if
# cwd has been changed. Hence we preserve the absolute location of __file__.
__file__ = os.path.abspath(__file__)
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory object."""
super(TempDirectory, self).__init__()
self.name = u''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
class TestCase(object):
"""Class that defines the test case object interface.
The test case defines what aspect of the plaso tools to test.
A test definition is used to provide parameters for the test
case so it can be easily run on different input files.
Attributes:
name (str): name of the test case.
"""
NAME = None
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestCase, self).__init__()
self._debug_output = debug_output
self._log2timeline_path = None
self._pinfo_path = None
self._psort_path = None
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _InitializeLog2TimelinePath(self):
"""Initializes the location of log2timeline."""
for filename in (
u'log2timeline.exe', u'log2timeline.sh', u'log2timeline.py'):
self._log2timeline_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._log2timeline_path):
break
if self._log2timeline_path.endswith(u'.py'):
self._log2timeline_path = u' '.join([
sys.executable, self._log2timeline_path])
def _InitializePinfoPath(self):
"""Initializes the location of pinfo."""
for filename in (u'pinfo.exe', u'pinfo.sh', u'pinfo.py'):
self._pinfo_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._pinfo_path):
break
if self._pinfo_path.endswith(u'.py'):
self._pinfo_path = u' '.join([sys.executable, self._pinfo_path])
def _InitializePsortPath(self):
"""Initializes the location of psort."""
for filename in (u'psort.exe', u'psort.sh', u'psort.py'):
self._psort_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._psort_path):
break
if self._psort_path.endswith(u'.py'):
self._psort_path = u' '.join([sys.executable, self._psort_path])
def _RunCommand(self, command):
"""Runs a command.
Args:
command (str): command to run.
Returns:
bool: True if the command ran successfully.
"""
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
return True
@abc.abstractmethod
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
@abc.abstractmethod
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
class TestCasesManager(object):
"""Class that implements the test cases manager."""
_test_case_classes = {}
_test_case_objects = {}
@classmethod
def DeregisterTestCase(cls, test_case_class):
"""Deregisters a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is not set for the corresponding name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name not in cls._test_case_classes:
raise KeyError(
u'Formatter class not set for name: {0:s}.'.format(
test_case_class.NAME))
del cls._test_case_classes[test_case_name]
@classmethod
def GetTestCaseObject(
cls, name, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Retrieves the test case object for a specific name.
Args:
name (str): name of the test case.
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
Returns:
TestCase: test case or None if not available.
"""
name = name.lower()
if name not in cls._test_case_objects:
test_case_object = None
if name in cls._test_case_classes:
test_case_class = cls._test_case_classes[name]
test_case_object = test_case_class(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
if not test_case_object:
return
cls._test_case_objects[name] = test_case_object
return cls._test_case_objects[name]
@classmethod
def RegisterTestCase(cls, test_case_class):
"""Registers a test case class.
The test case classes are identified based on their lower case name.
Args:
test_case_class (type): test case class.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
test_case_name = test_case_class.NAME.lower()
if test_case_name in cls._test_case_classes:
raise KeyError((
u'Formatter class already set for name: {0:s}.').format(
test_case_class.NAME))
cls._test_case_classes[test_case_name] = test_case_class
@classmethod
def RegisterTestCases(cls, test_case_classes):
"""Registers test case classes.
The test case classes are identified based on their lower case name.
Args:
test_case_classes (list[type]): test case classes.
Raises:
KeyError: if test case class is already set for the corresponding
name.
"""
for test_case_class in test_case_classes:
cls.RegisterTestCase(test_case_class)
class TestDefinition(object):
"""Class that implements a test definition.
Attributes:
case (str): name of test case.
name (str): name of the test.
"""
def __init__(self, name):
"""Initializes a test definition object.
Args:
name (str): name of the test.
"""
super(TestDefinition, self).__init__()
self.case = u''
self.name = name
class TestDefinitionReader(object):
"""Class that implements a test definition reader.
The test definition reader reads tests definitions from a configuration
file.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test definition reader object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestDefinitionReader, self).__init__()
self._config_parser = None
self._debug_output = debug_output
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def GetConfigValue(self, section_name, value_name):
"""Retrieves a value from the config parser.
Args:
section_name (str): name of the section that contains the value.
value_name (str): the name of the value.
Returns:
object: value or None if the value does not exists.
Raises:
RuntimeError: if the configuration parser is not set.
"""
if not self._config_parser:
raise RuntimeError(u'Missing configuration parser.')
try:
return self._config_parser.get(section_name, value_name).decode('utf-8')
except configparser.NoOptionError:
return
def Read(self, file_object):
"""Reads test definitions.
Args:
file_object (file): a file-like object to read from.
Yields:
TestDefinition: end-to-end test definition.
"""
# TODO: replace by:
# self._config_parser = configparser. ConfigParser(interpolation=None)
self._config_parser = configparser.RawConfigParser()
try:
self._config_parser.readfp(file_object)
for section_name in self._config_parser.sections():
test_definition = TestDefinition(section_name)
test_definition.case = self.GetConfigValue(section_name, u'case')
if not test_definition.case:
logging.warning(
u'Test case missing in test definition: {0:s}.'.format(
section_name))
continue
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path,
debug_output=self._debug_output)
if not test_case:
logging.warning(u'Undefined test case: {0:s}'.format(
test_definition.case))
continue
if not test_case.ReadAttributes(self, test_definition):
logging.warning(
u'Unable to read attributes of test case: {0:s}'.format(
test_definition.case))
continue
yield test_definition
finally:
self._config_parser = None
class TestLauncher(object):
"""Class that implements the test launcher.
The test launcher reads the test definitions from a file, looks up
the corresponding test cases in the test case manager and then runs
the test case with the parameters specified in the test definition.
"""
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test launcher object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(TestLauncher, self).__init__()
self._debug_output = debug_output
self._test_definitions = []
self._test_references_path = test_references_path
self._test_results_path = test_results_path
self._test_sources_path = test_sources_path
self._tools_path = tools_path
def _RunTest(self, test_definition):
"""Runs the test.
Args:
test_definition (TestDefinition): test definition.
Returns:
A boolean value indicating the test ran successfully.
"""
test_case = TestCasesManager.GetTestCaseObject(
test_definition.case, self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
if not test_case:
logging.error(u'Unsupported test case: {0:s}'.format(
test_definition.case))
return False
return test_case.Run(test_definition)
def ReadDefinitions(self, configuration_file):
"""Reads the test definitions from the configuration file.
Args:
configuration_file (str): path of the configuration file.
"""
self._test_definitions = []
with open(configuration_file) as file_object:
test_definition_reader = TestDefinitionReader(
self._tools_path, self._test_sources_path,
self._test_references_path, self._test_results_path)
for test_definition in test_definition_reader.Read(file_object):
self._test_definitions.append(test_definition)
def RunTests(self):
"""Runs the tests.
Returns:
list[str]: names of the failed tests.
"""
# TODO: set up test environment
failed_tests = []
for test_definition in self._test_definitions:
if not self._RunTest(test_definition):
failed_tests.append(test_definition.name)
return failed_tests
class ExtractAndOutputTestCase(TestCase):
"""Class that implements the extract and output test case.
The extract and output test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted pinfo and psort are run to validate if the resulting storage
file is readable.
"""
NAME = u'extract_and_output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ExtractAndOutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializeLog2TimelinePath()
self._InitializePinfoPath()
self._InitializePsortPath()
def _RunLog2Timeline(
self, test_definition, temp_directory, storage_file, source_path):
"""Runs log2timeline with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
source_path (str): path of the source.
Returns:
bool: True if log2timeline ran successfully.
"""
extract_options = u'--status-view=none {0:s}'.format(
u' '.join(test_definition.extract_options))
stdout_file = os.path.join(
temp_directory, u'{0:s}-log2timeline.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-log2timeline.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} {3:s} > {4:s} 2> {5:s}'.format(
self._log2timeline_path, extract_options, storage_file, source_path,
stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(storage_file):
shutil.copy(storage_file, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPinfo(self, test_definition, temp_directory, storage_file):
"""Runs pinfo on the storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
stdout_file = os.path.join(
temp_directory, u'{0:s}-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-pinfo.err'.format(test_definition.name))
command = u'{0:s} {1:s} > {2:s} 2> {3:s}'.format(
self._pinfo_path, storage_file, stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPinfoCompare(self, test_definition, temp_directory, storage_file):
"""Runs pinfo --compare on the storage file and a reference storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if pinfo ran successfully.
"""
reference_storage_file = test_definition.reference_storage_file
if self._test_references_path:
reference_storage_file = os.path.join(
self._test_references_path, reference_storage_file)
if not os.path.exists(reference_storage_file):
logging.error(u'No such reference storage file: {0:s}'.format(
reference_storage_file))
return False
stdout_file = os.path.join(
temp_directory, u'{0:s}-compare-pinfo.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-compare-pinfo.err'.format(test_definition.name))
command = u'{0:s} --compare {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._pinfo_path, reference_storage_file, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def _RunPsort(self, test_definition, temp_directory, storage_file):
"""Runs psort on a storage file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort.err'.format(test_definition.name))
command = u'{0:s} {1:s} > {2:s} 2> {3:s}'.format(
self._psort_path, storage_file, stdout_file, stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.extract_options = test_definition_reader.GetConfigValue(
test_definition.name, u'extract_options')
if test_definition.extract_options is None:
test_definition.extract_options = []
elif isinstance(test_definition.extract_options, STRING_TYPES):
test_definition.extract_options = test_definition.extract_options.split(
u',')
test_definition.reference_storage_file = (
test_definition_reader.GetConfigValue(
test_definition.name, u'reference_storage_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, u'{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Check if the resulting storage file can be read with pinfo.
if not self._RunPinfo(
test_definition, temp_directory, storage_file):
return False
# Compare storage file with a reference storage file.
if test_definition.reference_storage_file:
if not self._RunPinfoCompare(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file):
return False
return True
class ExtractAndTagTestCase(ExtractAndOutputTestCase):
"""Class that implements the extract and tag test case.
The extract and tag test case runs log2timeline to extract data
from a source, specified by the test definition. After the data has been
extracted psort is run to tag events in the resulting storage file.
"""
NAME = u'extract_and_tag'
def _RunPsortWithTaggingOptions(
self, test_definition, temp_directory, storage_file):
"""Runs psort with the tagging options specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
# TODO: determine why --analysis=tagging fails.
tagging_options = (
u'--analysis tagging --output-format=null '
u'--tagging-file {0:s}').format(test_definition.tagging_file)
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort-tagging.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort-tagging.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._psort_path, tagging_options, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
if not super(ExtractAndTagTestCase, self).ReadAttributes(
test_definition_reader, test_definition):
return False
test_definition.tagging_file = test_definition_reader.GetConfigValue(
test_definition.name, u'tagging_file')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
storage_file = os.path.join(
temp_directory, u'{0:s}.plaso'.format(test_definition.name))
# Extract events with log2timeline.
if not self._RunLog2Timeline(
test_definition, temp_directory, storage_file, source_path):
return False
# Add tags to the resulting storage file with psort.
if not self._RunPsortWithTaggingOptions(
test_definition, temp_directory, storage_file):
return False
# Check if the resulting storage file can be read with psort.
if not self._RunPsort(
test_definition, temp_directory, storage_file):
return False
return True
class ImageExportTestCase(TestCase):
"""Class that implements the image export test case.
The image export test case runs image_export to extract files from a storage
media image, specified by the test definition.
"""
NAME = u'image_export'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(ImageExportTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._image_export_path = None
self._InitializeImageExportPath()
def _InitializeImageExportPath(self):
"""Initializes the location of image_export."""
for filename in (
u'image_export.exe', u'image_export.sh', u'image_export.py'):
self._image_export_path = os.path.join(self._tools_path, filename)
if os.path.exists(self._image_export_path):
break
if self._image_export_path.endswith(u'.py'):
self._image_export_path = u' '.join([
sys.executable, self._image_export_path])
def _RunImageExport(self, test_definition, temp_directory, source_path):
"""Runs image_export on a storage media image.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
source_path (str): path of the source.
Returns:
bool: True if image_export ran successfully.
"""
output_file_path = os.path.join(temp_directory, u'export')
output_options = [u'-w {0:s}'.format(output_file_path)]
output_options = u' '.join(output_options)
stdout_file = os.path.join(
temp_directory, u'{0:s}-image_export.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-image_export.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._image_export_path, output_options, source_path, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
# TODO: hash the files.
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.filter_file = test_definition_reader.GetConfigValue(
test_definition.name, u'filter_file')
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
# Extract files with image_export.
if not self._RunImageExport(
test_definition, temp_directory, source_path):
return False
return True
class OutputTestCase(TestCase):
"""Class that implements the output test case.
The output test case runs psort on a storage file to its various
output formats.
"""
NAME = u'output'
def __init__(
self, tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=False):
"""Initializes a test case object.
Args:
tools_path (str): path to the plaso tools.
test_sources_path (str): path to the test sources.
test_references_path (str): path to the test references.
test_results_path (str): path to store test results.
debug_output (Optional[bool]): True if debug output should be generated.
"""
super(OutputTestCase, self).__init__(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=debug_output)
self._InitializePsortPath()
def _CompareOutputFile(self, test_definition, temp_directory):
"""Compares the output file with a reference output file.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
Returns:
bool: True if he output files are identical.
"""
if test_definition.output_format not in (
u'dynamic', u'json', u'json_line', u'l2tcsv', u'l2ttln', u'rawpy',
u'tln'):
logging.error(u'Unsuppored output format: {0:s}'.format(
test_definition.output_format))
return False
output_file_path = os.path.join(temp_directory, test_definition.output_file)
# TODO: add support to compare output by SHA-256.
result = False
if test_definition.reference_output_file:
reference_output_file_path = test_definition.reference_output_file
if self._test_references_path:
reference_output_file_path = os.path.join(
self._test_references_path, reference_output_file_path)
if not os.path.exists(reference_output_file_path):
logging.error(u'No such reference output file: {0:s}'.format(
reference_output_file_path))
return False
with open(reference_output_file_path, 'r') as reference_output_file:
with open(output_file_path, 'r') as output_file:
differences = list(difflib.unified_diff(
reference_output_file.readlines(), output_file.readlines(),
fromfile=reference_output_file_path, tofile=output_file_path))
if not differences:
result = True
return result
def _RunPsortWithOutputOptions(
self, test_definition, temp_directory, storage_file):
"""Runs psort with the output options specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
temp_directory (str): name of a temporary directory.
storage_file (str): path of the storage file.
Returns:
bool: True if psort ran successfully.
"""
output_options = test_definition.output_options
if test_definition.output_format:
output_options.append(u'-o {0:s}'.format(test_definition.output_format))
output_file_path = None
if test_definition.output_file:
output_file_path = os.path.join(
temp_directory, test_definition.output_file)
output_options.append(u'-w {0:s}'.format(output_file_path))
output_options = u' '.join(output_options)
stdout_file = os.path.join(
temp_directory, u'{0:s}-psort.out'.format(test_definition.name))
stderr_file = os.path.join(
temp_directory, u'{0:s}-psort.err'.format(test_definition.name))
command = u'{0:s} {1:s} {2:s} > {3:s} 2> {4:s}'.format(
self._psort_path, output_options, storage_file, stdout_file,
stderr_file)
logging.info(u'Running: {0:s}'.format(command))
result = self._RunCommand(command)
if self._debug_output:
with open(stderr_file, 'rb') as file_object:
output_data = file_object.read()
print(output_data)
if output_file_path and os.path.exists(output_file_path):
shutil.copy(output_file_path, self._test_results_path)
if os.path.exists(stdout_file):
shutil.copy(stdout_file, self._test_results_path)
if os.path.exists(stderr_file):
shutil.copy(stderr_file, self._test_results_path)
return result
def ReadAttributes(self, test_definition_reader, test_definition):
"""Reads the test definition attributes into to the test definition.
Args:
test_definition_reader (TestDefinitionReader): test definition reader.
test_definition (TestDefinition): test definition.
Returns:
bool: True if the read was successful.
"""
test_definition.output_file = test_definition_reader.GetConfigValue(
test_definition.name, u'output_file')
test_definition.output_format = test_definition_reader.GetConfigValue(
test_definition.name, u'output_format')
test_definition.output_options = test_definition_reader.GetConfigValue(
test_definition.name, u'output_options')
if test_definition.output_options is None:
test_definition.output_options = []
elif isinstance(test_definition.output_options, STRING_TYPES):
test_definition.output_options = test_definition.output_options.split(
u',')
test_definition.reference_output_file = (
test_definition_reader.GetConfigValue(
test_definition.name, u'reference_output_file'))
test_definition.source = test_definition_reader.GetConfigValue(
test_definition.name, u'source')
return True
def Run(self, test_definition):
"""Runs the test case with the parameters specified by the test definition.
Args:
test_definition (TestDefinition): test definition.
Returns:
bool: True if the test ran successfully.
"""
source_path = test_definition.source
if self._test_sources_path:
source_path = os.path.join(self._test_sources_path, source_path)
if not os.path.exists(source_path):
logging.error(u'No such source: {0:s}'.format(source_path))
return False
with TempDirectory() as temp_directory:
if not self._RunPsortWithOutputOptions(
test_definition, temp_directory, source_path):
return False
# Compare output file with a reference output file.
if test_definition.output_file and test_definition.reference_output_file:
if not self._CompareOutputFile(test_definition, temp_directory):
return False
return True
TestCasesManager.RegisterTestCases([
ExtractAndOutputTestCase, ExtractAndTagTestCase, ImageExportTestCase,
OutputTestCase])
def Main():
"""The main function."""
argument_parser = argparse.ArgumentParser(
description=u'End-to-end test launcher.', add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
argument_parser.add_argument(
u'-c', u'--config', dest=u'config_file', action=u'store',
metavar=u'CONFIG_FILE', default=None,
help=u'path of the test configuration file.')
argument_parser.add_argument(
u'--debug', dest=u'debug_output', action=u'store_true', default=False,
help=u'enable debug output.')
argument_parser.add_argument(
u'-h', u'--help', action=u'help',
help=u'show this help message and exit.')
argument_parser.add_argument(
u'--references-directory', u'--references_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'references_directory', type=str,
default=None, help=(
u'The location of the directory where the test references are '
u'stored.'))
argument_parser.add_argument(
u'--results-directory', u'--results_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'results_directory', type=str,
default=None, help=(
u'The location of the directory where to store the test results.'))
argument_parser.add_argument(
u'--sources-directory', u'--sources_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'sources_directory', type=str,
default=None, help=(
u'The location of the directory where the test sources are stored.'))
argument_parser.add_argument(
u'--tools-directory', u'--tools_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'tools_directory', type=str,
default=None, help=u'The location of the plaso tools directory.')
options = argument_parser.parse_args()
if not options.config_file:
options.config_file = os.path.dirname(__file__)
options.config_file = os.path.dirname(options.config_file)
options.config_file = os.path.join(
options.config_file, u'config', u'end-to-end.ini')
if not os.path.exists(options.config_file):
print(u'No such config file: {0:s}.'.format(options.config_file))
print(u'')
return False
logging.basicConfig(
format=u'[%(levelname)s] %(message)s', level=logging.INFO)
tools_path = options.tools_directory
if not tools_path:
tools_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), u'tools')
test_sources_path = options.sources_directory
if test_sources_path and not os.path.isdir(test_sources_path):
print(u'No such sources directory: {0:s}.'.format(test_sources_path))
print(u'')
return False
test_references_path = options.references_directory
if test_references_path and not os.path.isdir(test_references_path):
print(u'No such references directory: {0:s}.'.format(test_references_path))
print(u'')
return False
test_results_path = options.results_directory
if not test_results_path:
test_results_path = os.getcwd()
if not os.path.isdir(test_results_path):
print(u'No such results directory: {0:s}.'.format(test_results_path))
print(u'')
return False
tests = []
with open(options.config_file) as file_object:
test_definition_reader = TestDefinitionReader(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
for test_definition in test_definition_reader.Read(file_object):
tests.append(test_definition)
test_launcher = TestLauncher(
tools_path, test_sources_path, test_references_path,
test_results_path, debug_output=options.debug_output)
test_launcher.ReadDefinitions(options.config_file)
failed_tests = test_launcher.RunTests()
if failed_tests:
print(u'Failed tests:')
for failed_test in failed_tests:
print(u' {0:s}'.format(failed_test))
print(u'')
return False
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
d46ec2cdd661faa6e8fdcff5ee945c17a044cb26 | 7e4b4633fab4cc1dc030d69fa1c5bd335d655915 | /problems.py | 70ea59f70958cc5fd799b2a5153d09cd644391df | [] | no_license | jmockbee/reviewquiz1 | 109e19a86659b52a34f551760766d23a240f60f3 | c3a20e29a591b7d941ab5659456accf4b010b1b2 | refs/heads/main | 2023-06-30T09:38:27.721991 | 2021-08-05T01:23:42 | 2021-08-05T01:23:42 | 392,862,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | bill = 47.28
tip = bill * 0.15
total = bill + tip
share = (total/5)
print("Each person needs to pay:" + str(share))
numerator = 10
denominator = 10
result = numerator / denominator
print(result)
word1 = "How"
word2 = "do"
word3 = "you"
word4 = "like"
word5 = "Python"
word6 = "so"
word7 = "far?"
print(word1, word2, word3, word4, word5, word6, word7) | [
"mockbee1000@gmail.com"
] | mockbee1000@gmail.com |
a4687640887e3eaa056be17102156fb6c73301a5 | 71ef2ddc4a10c9f6be7b938dadbd25bb5accbe89 | /bots/MyBot_alt2/MyBot_alt2.py | c5bd7445daa98e9422d2b37244815251b1dfaf0c | [] | no_license | NicoKNL/halite3 | e06b72e68c102d5cf863b6efd7c2ef5b0c161ea2 | 60ccd9a36e13b447a481e242762379d38e71c1b1 | refs/heads/master | 2020-04-07T22:01:18.804779 | 2018-12-20T00:47:17 | 2018-12-20T00:47:17 | 158,751,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,186 | py | #!/usr/bin/env python3
# Python 3.6
# Import the Halite SDK, which will let you interact with the game.
import hlt
# This library contains constant values.
from hlt import constants
# This library contains direction metadata to better interface with the game.
from hlt.positionals import Direction, Position
# This library allows you to generate random numbers.
# Logging allows you to save messages for yourself. This is required because the regular STDOUT
# (# print statements) are reserved for the engine-bot communication.
import logging
""" <<<Game Begin>>> """
# This game object contains the initial game state.
game = hlt.Game()
# At this point "game" variable is populated with initial map data.
# This is a good place to do computationally expensive start-up pre-processing.
# As soon as you call "ready" function below, the 2 second per turn timer will start.
game.ready("ALT2")
# Now that your bot is initialized, save a message to yourself in the log file with some important information.
# Here, you log here your id, which you can always fetch from the game object by using my_id.
logging.info("Successfully created bot! My Player ID is {}.".format(game.my_id))
FILL_RATIO = 0.9 # For now we accept 80% fill rate
INF = 99999999
directions = {"n": (0, -1),
"e": (1, 0),
"s": (0, 1),
"w": (-1, 0)}
ship_actions = {}
ship_directions = {}
def shipyard_cleanup(game_map, ship, shipyard):
if ship in ship_actions.keys():
action = not ship_actions[ship]
else:
action = True
ship_actions[ship] = action
if ship in ship_directions.keys():
turn_in = ship_directions[ship]
elif ship.position == shipyard.position:
turn_in = False
else:
turn_in = False
ship_directions[ship] = turn_in
if action:
if turn_in:
target = shipyard.position
else:
target = ship.position
max_value = game_map[target.x][target.y].w
staying_value = max_value // 4
moving_cost = max_value // 10
if moving_cost < ship.halite_amount or moving_cost == 0:
for d in directions.values():
pos = game_map.normalize(ship.position.directional_offset(d))
# logging.debug(f"pos: {pos} | {game_map.calculate_distance(ship.position, shipyard.position) <= 5} | {game_map[pos.x][pos.y].w}")
if game_map.calculate_distance(pos, shipyard.position) <= 5:
w = game_map[pos.x][pos.y].w
if (w // 4) - moving_cost > staying_value and w > max_value:
max_value = w
target = pos
if game_map.calculate_distance(ship.position, shipyard.position) == 5:
ship_directions[ship] = True # Start turning in
else:
target = ship.position
logging.debug(f"decision: {target}")
return target
def closest_cell_with_ratio_fill(game_map, ship):
minimum = min(0.25 * game_map.max_halite, 4 * (constants.MAX_HALITE - ship.halite_amount))
logging.debug(f"res max: {game_map.max_halite} - minimum: {minimum}")
current_offset = 1
found = False
pos = ship.position
target = None
# Search with an expanding ring
while not found and current_offset <= game_map.height: # possible max search range
offsets = list(range(-current_offset, current_offset + 1))
offsets = [(x, y) for x in offsets for y in offsets]
for offset in offsets:
# # print(f"offset: {offset}")
cell_pos = game_map.normalize(Position(pos.x - offset[0], pos.y - offset[1]))
# print(f"cell_pos: {cell_pos}")
cell = game_map[cell_pos]
if not target and cell.halite_amount >= minimum:
target = cell_pos
found = True
elif cell.halite_amount >= minimum and game_map.calculate_distance(ship.position, cell_pos) < game_map.calculate_distance(ship.position, target):
target = cell_pos
current_offset += 1
if not target:
target = ship.position
logging.info("target not found!")
else:
logging.info(f"target found!: {target}")
return target
def weighted_cleanup(game_map, ship, shipyard):
minimum = 30
current_offset = 1
running_sum = 0
found = False
targets = []
# Search with an expanding ring
while not found and current_offset <= game_map.height: # possible max search range
offsets = list(range(-current_offset, current_offset + 1))
offsets = [(x, y) for x in offsets for y in offsets]
for offset in offsets:
cell_pos = game_map.normalize(shipyard.position + Position(*offset))
# print(f"cell_pos: {cell_pos}")
cell = game_map[cell_pos]
if cell.halite_amount >= minimum and not cell.is_occupied:
targets.append(cell_pos)
if len(targets) > 3:
found = True
current_offset += 1
best_target = (None, INF) # For now best => closest
for target in targets:
distance = game_map.calculate_distance(ship.position, target)
if distance < best_target[1]:
best_target = (target, distance)
return best_target[0]
def dijkstra_a_to_b(game_map, source, target, offset=1):
if source == target:
return Direction.Still
dx = abs(target.x - source.x)
dy = abs(target.y - source.y)
xdir = 1 if target.x > source.x else -1
ydir = 1 if target.y > source.y else -1
# Valid x and y positions in range
if xdir == 1:
rx = range(source.x - offset, target.x + offset + 1)
else:
rx = range(target.x - offset, source.x + offset + 1)
if ydir == 1:
ry = range(source.y - offset, target.y + offset + 1)
else:
ry = range(target.y - offset, source.y + offset + 1)
# initialize distances
distance_map = {
source: {
"distance": 0,
"previous": None}
}
queue = [source]
for offset_x in range(-offset, dx + offset + 1):
for offset_y in range(-offset, dy + offset + 1):
if offset_x == 0 and offset_y == 0:
continue
x = source.x + offset_x * xdir
y = source.y + offset_y * ydir
position = Position(x, y)
distance_map[position] = {
"distance": INF * 32,
"previouis": None
}
queue.append(position)
# Dijkstra
# Calculating the cheapest path to each respective node in the grid
while len(queue):
# Take the item in the queue with the lowest distance and remove it from the queue
node = sorted(queue, key=lambda position: distance_map[position]["distance"])[0]
queue.pop(queue.index(node))
# For each neighbouring position
for pos in node.get_surrounding_cardinals():
pos = game_map.normalize(pos) # Ensure position is in normalized coordinates
# validate cell is within search bounds
if pos.x in rx and pos.y in ry:
neighbour = game_map[pos]
# Calculate the cost of traveling to that neighbour
if game_map[pos].is_occupied:
neighbour_weight = INF
else:
neighbour_weight = neighbour.halite_amount
# neighbour_weight = neighbour.halite_amount if not game_map[pos].is_occupied else INF
# logging.debug(f"Neighbour: {pos} | {neighbour_weight} | occupied: {game_map[pos].is_occupied} | ship id {game_map[pos].ship}")
# Calculate the distance of the path to the neighbour
dist_to_neighbour = distance_map[node]["distance"] + neighbour_weight
# If path is shorter than any other current path to that neighbour, then we update the path to that node
if dist_to_neighbour < distance_map[pos]["distance"]:
distance_map[pos]["distance"] = dist_to_neighbour
distance_map[pos]["previous"] = node
# Traverse from the target to the source by following all "previous" nodes that we calculated
path_node = target
while path_node != source:
prev_path_node = distance_map[path_node]["previous"]
if prev_path_node == source:
for d in Direction.get_all_cardinals(): #.values():
if game_map.normalize(source.directional_offset(d)) == path_node:
return d
path_node = prev_path_node
def safe_greedy_move(game_map, source, target):
safe_moves = []
# Evaluate if standing still is safe
if game_map.position_is_safe(source):
safe_moves.append(Direction.Still)
# Evaluate if any of the cardinal directions are safe
for direction in Direction.get_all_cardinals():
new_position = game_map.normalize(source.directional_offset(direction))
if game_map.position_is_safe(new_position):
safe_moves.append(direction)
# The scenario where we are fucked
if not safe_moves:
return Direction.Still
# Else we greedily check which move brings us closest to our target
closest_to_target = (None, INF)
for direction in safe_moves:
position = game_map.normalize(source.directional_offset(direction))
distance = game_map.calculate_distance(position, target)
if distance < closest_to_target[1]:
closest_to_target = (direction, distance)
# Returns direction
return closest_to_target[0]
""" <<<Game Loop>>> """
while True:
# This loop handles each turn of the game. The game object changes every turn, and you refresh that state by
# running update_frame().
game.update_frame()
me = game.me
game_map = game.game_map
# A command queue holds all the commands you will run this turn. You build this list up and submit it at the
# end of the turn.
ship_queue = me.get_ships()
command_queue = []
new_ship_positions = []
ship_position_map = [] # (ship, target)
# First we check if we are at the end of the game and the ship needs to start coming home
ship_queue_tmp = []
for ship in ship_queue:
if ship.should_turn_in(game_map, game.turn_number) and ship.can_move(game_map[ship]):
target = me.shipyard.position
new_dir = dijkstra_a_to_b(game_map, ship.position, target)
# Final check if the move is actually safe as Dijkstra can result in an unsafe move when 1 unit away from target
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
if not game_map.position_is_safe(new_position):
new_dir = safe_greedy_move(game_map, ship.position, target)
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
# Already move the ship in the game map to help prevent collisions
logging.debug(f"SHIP {ship.id} WANTS TO MOVE: {ship.position} - {new_dir}")
game_map[ship].mark_safe()
game_map[new_position].mark_unsafe(ship)
# And finally add the command to the queue
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Evaluated all the ships that can't move
ship_queue_tmp = []
for ship in ship_queue:
current_cell = game_map[ship]
if not ship.can_move(current_cell):
new_dir = Direction.Still
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Then evaluate all ships that don't want to move and are in a safe spot
ship_queue_tmp = []
for ship in ship_queue:
current_cell = game_map[ship]
logging.debug(f"SHOULD MOVE: {not ship.should_move(current_cell)} | {game_map.position_is_safe(current_cell)}")
if not ship.should_move(current_cell) and not game_map.enemy_is_close(current_cell):
new_dir = Direction.Still
logging.debug(f"SHIP {ship.id} WANTS TO STAY: {ship.position} - {new_dir}")
command_queue.append(ship.move(new_dir))
else:
ship_queue_tmp.append(ship)
ship_queue = ship_queue_tmp
# Finally start resolving all ships that CAN move, and want or should move
for ship in ship_queue:
current_cell = game_map[ship]
if ship.halite_amount >= FILL_RATIO * constants.MAX_HALITE:
# Case: We need to turn in our halite
target = me.shipyard.position
else:
# Case: Gather more resources
target = weighted_cleanup(game_map, ship, me.shipyard)
new_dir = dijkstra_a_to_b(game_map, ship.position, target)
# Final check if the move is actually safe as Dijkstra can result in an unsafe move when 1 unit away from target
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
if not game_map.position_is_safe(new_position):
new_dir = safe_greedy_move(game_map, ship.position, target)
new_position = game_map.normalize(ship.position.directional_offset(new_dir))
# Already move the ship in the game map to help prevent collisions
logging.debug(f"SHIP {ship.id} WANTS TO MOVE: {ship.position} - {new_dir}")
game_map[ship].mark_safe()
game_map[new_position].mark_unsafe(ship)
# And finally add the command to the queue
command_queue.append(ship.move(new_dir))
# Spawning a ship
if game.turn_number <= constants.MAX_TURNS - 150 and me.halite_amount >= constants.SHIP_COST and not game_map[me.shipyard].is_occupied and game_map.total_halite / max(game_map.ship_count, 1) > 4000 and game_map.ship_count < 50:
command_queue.append(me.shipyard.spawn())
# if game.turn_number > 10:
# time.sleep(2)
# Sending moves to end the turn
game.end_turn(command_queue)
| [
"klaassen.nico@gmail.com"
] | klaassen.nico@gmail.com |
15a1a380b457e89c7c753d782d86d74906266671 | fcea076f585db7b17664a12f72e2d49a04df9896 | /blog/migrations/0001_initial.py | 18f5fd6a64de41a5af5d0698716341cd9876a458 | [] | no_license | SadishGautam/Jaggasale | 9eefcd0c539f906d453344660c3055f33b3f11d7 | da25e56ffe255a6a3787ee02540a453516748db2 | refs/heads/master | 2023-05-11T06:02:07.556950 | 2021-05-12T00:32:23 | 2021-05-12T00:32:23 | 318,173,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # Generated by Django 3.1.1 on 2021-03-21 14:31
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog_News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Blog title', max_length=50)),
('full_news', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Here you acn write full news')),
('image', models.ImageField(default='default.jpg', upload_to='static/images/blog')),
('date', models.DateField(blank=True, null=True)),
],
),
]
| [
"Sadish.gautam09@gmail.com"
] | Sadish.gautam09@gmail.com |
d63071059d2af8811674bbe2648d6dbaba74f566 | cd687955e6ff5565f802703805a2670abbee7f6f | /progs/Form_v1_clientsidepagination/form/migrations/0004_auto_20171121_0736.py | 29e48fa10c0c4609bfce67345275ddf95d58fffc | [] | no_license | gopishettyshashikanth/python_programs | ce11112db8a3b64349b9f1e5ebb3d8daf0294c07 | 98084ef31a71e10c02da525cffe0a5f775140e03 | refs/heads/master | 2023-05-31T00:24:14.329478 | 2019-09-16T05:24:07 | 2019-09-16T05:24:07 | 110,864,203 | 0 | 0 | null | 2021-06-10T20:06:52 | 2017-11-15T17:22:40 | JavaScript | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('form', '0003_auto_20171121_0721'),
]
operations = [
migrations.AlterField(
model_name='usercategory',
name='deptID',
field=models.ForeignKey(choices=[(b'CSE', b'10'), (b'ECE', b'20')], to='form.Dept', blank=True, null=True),
),
]
| [
"noreply@github.com"
] | gopishettyshashikanth.noreply@github.com |
71c75ca7209b88ffee2c89724feb16a5d0991531 | 92d0f8eb81aa5325dc5887de25f6ef4b423dfe18 | /tests/conftest.py | 2dc9f277560f854ce29d7e7d1e0008c2fdae5687 | [
"Apache-2.0"
] | permissive | kbakk/ansible-role-nfs | bac297b811632794032c3f6f416dd32204409f52 | 780a557798ef78435a92c51132d301444404bfaa | refs/heads/master | 2020-03-20T10:00:26.773773 | 2018-06-23T12:29:44 | 2018-06-23T12:29:44 | 137,355,991 | 0 | 0 | null | 2018-06-14T12:34:27 | 2018-06-14T12:34:27 | null | UTF-8 | Python | false | false | 686 | py | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.fixture(scope='module')
def os_map(host):
os_fam = host.ansible('setup')['ansible_facts']['ansible_os_family']
os_fam = str(os_fam).lower()
map_ = dict(
cmd_showmount=dict(debian='/sbin/showmount',
redhat='/usr/sbin/showmount'),
pkg_nfsutils=dict(debian='nfs-common', redhat='nfs-utils'),
stdout_showmount_nfs_1=dict(debian='127.0.0.1:/exports', redhat='::1:/exports')
)
return {k: v[os_fam] for k, v in map_.items()}
| [
"kbakk@users.noreply.github.com"
] | kbakk@users.noreply.github.com |
1aed2d3abeb6d420798dbb537ee3ea42775e0c8a | d933afccc627ff94b3eae4abac6e45f66d01c712 | /deep-learning-udacity/codes/shamir-alavi/L3-15_softmax.py | fa8a3d620b4856853264a1cf8bd9b2f9f5bac55f | [] | no_license | dg1223/ai_bangladesh | 719b95f82f80a6edc3c1ac151338f56e5838a8a3 | de7abf36ebf8519f6a93095e4c430aca7f365f64 | refs/heads/master | 2020-09-22T04:18:26.573590 | 2019-12-24T23:06:21 | 2019-12-24T23:06:21 | 225,045,687 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import numpy as np
# Write a function that takes as input a list of numbers, and returns
# the list of values given by the softmax function.
def softmax(L):
softmax_proba = []
for i in range(len(L)):
probability = np.exp(L[i]) / np.sum(np.exp(L))
softmax_proba.append(probability)
return softmax_proba
| [
"alavi1223@hotmail.com"
] | alavi1223@hotmail.com |
a6d6962bd3dab10fa4df29e34e5373b526aa75e8 | 63d3ebe4dcc514c2ec74e3b584cf80e9dcaab293 | /flexmatcher/__init__.py | ab5e4bb8a14637c3097055e0e350e7f009dafa83 | [] | no_license | LeTheTai1312/TichHopDuLieu_N14 | 2e212e9c8d689a3ec0fd6a669477fd27a874a8b5 | a04818c609ad9b995bf1b191de635aa22f5fca61 | refs/heads/main | 2023-06-04T01:05:33.864608 | 2021-06-14T10:04:37 | 2021-06-14T10:04:37 | 376,772,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from flexmatcher.flexmatcher import FlexMatcher
__author__ = """BigGorilla Team"""
__email__ = 'thebiggorilla.team@gmail.com'
__version__ = '0.8.0'
| [
"62357036+LeTheTai1805@users.noreply.github.com"
] | 62357036+LeTheTai1805@users.noreply.github.com |
83eed61d269a16f0f7d5d8a1e50b90709adb1a07 | d7824fec6c6ecc50af55093740cab05dfda33a1a | /devel/lib/python3/dist-packages/turtlebot3_teleop/__init__.py | 071b5ff61abd0af6229fa2b9bd16cb71427b31d8 | [] | no_license | dlerner97/path_planning_robot_maze | 949264421aa4814932a35d839ae4e50eb81a63c9 | 8a38ced29313bd136c4f229fff7be9c6f23f0ecb | refs/heads/main | 2023-04-22T11:40:03.609550 | 2021-05-10T03:27:02 | 2021-05-10T03:27:02 | 344,199,204 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | /home/dani/Documents/ENPM661/path_planning_robot_maze/devel/.private/turtlebot3_teleop/lib/python3/dist-packages/turtlebot3_teleop/__init__.py | [
"dlerner97@gmail.com"
] | dlerner97@gmail.com |
5e18141419b191b7774c5b4df05eafde7bb1e49a | d9504746c96c9e03c70db23428813da22069f2cb | /migrations/versions/10723b632a87_.py | fa1a2354cfeca51f952c847345f453a56f4da06b | [
"MIT"
] | permissive | SevereOverfl0w/MCDirectory | c7a40f609dccfafa9fd0ff1e4cd3b44db29a6253 | 443a44a01998938571cda8e3bfecbad7b81269c1 | refs/heads/master | 2020-03-30T06:39:24.309162 | 2013-12-18T13:38:23 | 2013-12-18T13:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | """empty message
Revision ID: 10723b632a87
Revises: 3d7ce850941c
Create Date: 2013-11-12 22:18:26.482191
"""
# revision identifiers, used by Alembic.
revision = '10723b632a87'
down_revision = '3d7ce850941c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('commenter_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('comment', sa.Text(), nullable=False),
sa.Column('stars', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['commenter_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
### end Alembic commands ###
| [
"overfl0w@narna.co"
] | overfl0w@narna.co |
af15f81dafcf12aeca3ce23323e2c78b0d7ac525 | 4ac9330ba6616d4cfe7b9bd8937a6a309c89dd14 | /fixture/mail.py | c64f34a3b30f5cd5b13a6c54161a28ea9e8df161 | [] | no_license | MrnRezanova/Python_Mantis | 2587d3b9e7891d4a3cc1677b46aeda630416411b | 33f0e93ed75477dcc7230574c175f0c4909e3297 | refs/heads/main | 2023-08-28T06:13:33.411331 | 2021-10-23T15:32:54 | 2021-10-23T15:32:54 | 419,301,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import poplib
import email
import time
import quopri
class MailHelper:
def __init__(self, app):
self.app = app
def get_mail(self, username, password, subject):
for i in range(5):
pop = poplib.POP3(self.app.config['james']['host'])
pop.user(username)
pop.pass_(password)
num = pop.stat()[0]
if num > 0:
for n in range(num):
msglines = pop.retr(n+1)[1]
msgtext = '\n'.join(map(lambda x: x.decode('utf-8'), msglines))
msg = email.message_from_string(msgtext)
if msg.get('Subject') == subject:
pop.dele(n+1)
pop.quit()
text = msg.get_payload()
return quopri.decodestring(text).decode('utf-8')
pop.close()
time.sleep(3)
return None | [
"m.rezanova@gemark.ru"
] | m.rezanova@gemark.ru |
c3e860d10932c3d29943f4d48e4e7a4f0a10de48 | 8d8c276f32dbc3917bb64fc5d1d0e83e5c4884ce | /interviewBit/arrays/firstMissingNumber.py | 377b2c65286fefd588d6159a608e14e311fab054 | [] | no_license | UddhavNavneeth/DSA-practice | a29b1ca27d72a1af36fb9e4d2e515ac00c99eb38 | 9f7d03145a8f026420a7e4672098f7c7a0361570 | refs/heads/master | 2021-06-22T08:09:08.590179 | 2021-03-12T07:11:10 | 2021-03-12T07:11:10 | 204,329,462 | 3 | 3 | null | 2020-10-01T07:06:15 | 2019-08-25T17:47:34 | Java | UTF-8 | Python | false | false | 1,945 | py | # NOT DONE BY ME THIS IS FROM SOLUTION PROVIDED IN COMMENTS
# Link to the question:
# https://www.interviewbit.com/problems/first-missing-intgerer/
# ESSENTIAL TO KNOW "MARK PRESENCE OF ELEMENT X" LOGIC
class Solution:
# @param A : list of integers
# @return an integer
def firstMissingPositive(self, A):
# check whether one is in an array or not
# and also set elements greater than n or less than 1 to 1
one_exists = False
n = len(A)
for i in range(n):
if A[i] == 1:
one_exists = True
if A[i] > n or A[i] < 1:
A[i] = 1
# if 1 not in the array then 1 is the answer
if not one_exists:
return 1
# At this point we have the array with all the elements greater than 0
# and even more, they are also in the interval [1, n]
# Whilst iterating through the array, we can remember that we saw a number
# m by negating the value (whatever it is) at the position m-1 in the array
# (but be sure to do so only once)
for i in range(n):
pos = abs(A[i]) - 1
if A[pos] > 0:
A[pos] *= -1
# the first element which we didn't negate in the previous for loop
# corresponds to the first missing positive integer
for i in range(n):
if A[i] > 0:
return i+1
# if all 1 ... n were in the array then the answer is n+1
return n+1
# MY LOGIC
# class Solution:
# # @param A : list of integers
# # @return an integer
# def firstMissingPositive(self, A):
# flag=0
# cntr=1
# A.sort()
# for i in range(len(A)):
# if (A[i]<=0):
# continue
# if (A[i]>0 and A[i]!=cntr):
# return cntr
# break
# cntr+=1
# flag=1
# if (flag==0):
# return 1
# return cntr | [
"uddhav.navneeth@gmail.com"
] | uddhav.navneeth@gmail.com |
ebd0772e3365bbe6a584056283e29f916a35c811 | 3196e5157cd7d2bffb6f810ec3643be18e1c5e04 | /simon.py | 0324f726946347c470b27a2469594348575f7ce8 | [] | no_license | cam-rod/simon-pi | 12dae0c74cab3fbe336cd81f371ad2718720999f | 69b16e23711ed855b1dc1a00649fd2fb29459d4a | refs/heads/master | 2020-06-05T02:24:41.860711 | 2019-06-17T14:48:51 | 2019-06-17T14:48:51 | 192,281,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,604 | py | # Cameron Rodriguez
# June 17, 2019
# This game allows a user to play Simon on a Raspberry Pi.
"""
Data Dictionary
all_leds: list: contains the numbers of all the GPIO pins that power the LEDs
all_buttons: list: contains the numbers of all the GPIO pins that take input from the butttons
sequence: list: a randomly generated list of LED pins for the user to match with the buttons
led_delay: float: the amount of time between LED flashes during the "teaching" stage
level: int: the user's current level, which is the number of LEDs shown
matching_led: dict: matches each button input pin to the LED power pin for verification
temp: str: used to hold raw_input at start of game
lost: bool: indicates if the player has lost the game
checked: bool: indicates if the current sequence LED has been checked for an input match
"""
import time # For flashing LEDs
import RPi.GPIO as g # Controls Raspberry Pi GPIO pins
from random import randint # Generates sequence of lights
# This function initializes the GPIO pins and introduces the player to the game.
def initialize():
# Setup system
g.setmode(g.BOARD)
g.setwarnings(False)
# Setup input (with pulldown resistor) and output pins
g.setup([11, 33, 35, 36, 37], g.OUT)
g.setup(all_buttons, g.IN, pull_up_down=g.PUD_DOWN)
g.output(11, g.HIGH) # Power for button supply
# Introduce game
print 'Welcome to Simon, Raspberry Pi edition! The rules are very simple. A series of LEDs will'
print 'flash in a certain order; your job is to press the buttons below the LEDs in the same'
print 'order. You can play after the LEDs flash thrice, by pressing each button for less than a second.'
print 'You can press the next button after an LED flashes. If you succeed, the LEDs will flash again'
print 'in the same order with an addition. If you miss, the correct LED will flash for a few seconds'
print 'before the game ends.\n'
temp = raw_input('When you are ready to start the game, press Enter/Return.')
# End initialize
# This function runs the game.
def gameplay():
# Modify global vars
global sequence, led_delay, level
lost = False
checked = False
# Display a countdown ending
for i in range(5,-1,-1):
print i
time.sleep(1)
# End for i
print ''
# Main gameplay loop
while lost is False:
level += 1
print 'Level {}'.format(level)
# Generate and display the sequence
sequence.append(single_led(randint(34,37)))
for i in sequence:
g.output(i, g.HIGH)
time.sleep(led_delay)
g.output(i, g.LOW)
time.sleep(led_delay)
# End for i
# Flash all LEDs thrice to indicate user turn
for i in range(3):
g.output(all_leds, g.HIGH)
time.sleep(0.1)
g.output(all_leds, g.LOW)
time.sleep(0.1)
# End for i
# Scan for correct user input
for i in range(len(sequence)):
checked = False
while checked is False:
for j in all_buttons:
if g.input(j): # Button j was pressed
if matching_led[j] == sequence[i]:
checked = True # Break button scan loop
g.output(matching_led[j], g.HIGH) # Flash LED
time.sleep(0.7)
g.output(matching_led[j], g.LOW)
else:
for k in range(5): # Flash the correct answer 5 times
g.output(sequence[i], g.HIGH)
time.sleep(0.5)
g.output(sequence[i], g.LOW)
time.sleep(0.5)
# End for i
lost = True
checked = True
# End if right_button(i,j)
# End if g.input(j)
# End for j
# End while checked
if lost:
break
# End if lost
# End for i
if lost:
pass
else:
for i in range(3): # Flash all LEDs to indicate successful entry
g.output(all_leds, g.HIGH)
time.sleep(0.1)
g.output(all_leds, g.LOW)
time.sleep(0.1)
# End for i
if len(sequence) < 19:
led_delay -= 0.05 # Decrease the flash interval
# End if len(sequence)
time.sleep(1) # Break before next level
# End if lost
# End while lost
# End gameplay
# This function declares a loss and closes the program
def endgame():
# Display congratulatory message
print '\nCongratulations, you made it to level {}!'.format(level)
print 'Thanks for playing Simon. Goodbye!'
# Disable GPIO pins and cleanup
g.output(all_leds, g.LOW)
g.output(11, g.LOW) # Power off buttons
g.cleanup()
# Leave message visible for 5 seconds
time.sleep(5)
# End endgame
if __name__ == '__main__':
# Create global variables for arrays of pins
all_leds = [33, 35, 36, 37]
all_buttons = [13, 15, 38, 40]
# Create global variables for game
sequence = []
led_delay = 1.00
level = 0
matching_led = {13: 33, 15: 35, 38: 36, 40: 37}
single_led = lambda x: x if x <> 34 else 33 # Used to correct randint call to first pin
# Run the game
initialize()
gameplay()
endgame()
# End if __name__ | [
"rod.cam2014+dev@gmail.com"
] | rod.cam2014+dev@gmail.com |
eab63574f4dcecacf3e31ea1d9388a3c31a188e0 | 88966ba71258c6e7163b3a648bed3fe773547e46 | /create_database.py | ccc91ddbfb840fec4b25054c7a0cf2875734a98a | [] | no_license | mauricioobgo/mvc_mauricio_obando | d4eaed8f05c29e38be57231f47337e87cd7be9a9 | 729c83d84bff374e155b5bf1d0e4fe66400643b0 | refs/heads/master | 2022-07-19T06:53:18.193798 | 2020-05-20T18:43:35 | 2020-05-20T18:43:35 | 265,653,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from app_index import database
import db.user
import db.book
import db.comments
database.create_all()
print("database created with success...") | [
"mauricioobgo@gmail.com"
] | mauricioobgo@gmail.com |
d5417d605f2204782ab1b6dd38bcb7262adc6354 | 99ae6372a5a5518543f9863a33ab21218a3a0768 | /tests/test-all.py | 31554599186717cf11032773e371545ac5143bde | [] | no_license | DANS-KNAW/parthenos-widget | 7b3578a37402069e99da8eaf0d8cf52f32c12231 | b549b76b7f16f1338cd80c6af7952963b3a8dd63 | refs/heads/master | 2022-07-17T23:07:25.238193 | 2021-11-03T08:36:55 | 2021-11-03T08:36:55 | 84,067,894 | 0 | 3 | null | 2021-11-03T08:36:55 | 2017-03-06T11:56:28 | JavaScript | UTF-8 | Python | false | false | 684 | py | #!/usr/bin/python
from __future__ import print_function, absolute_import
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
#import pytest
from tests.config import MATRIX
from parthenos.core.datatojson import *
import uuid
import httpretty
import requests
import pandas as pd
import simplejson
import json
if __name__ == '__main__':
print ('%s' % contents(0))
print ('%s' % gettopics("SOCIAL SCIENCE"))
print ('%s' % gettopics("LANGUAGE STUDIES"))
# print ('%s' % policies(4))
# (df, fairtest) = fair(4)
# print ('%s' % fairtest)
# x = fairfilter(df, fairtest, 'fair')
# print ('%s' % x.to_html())
| [
"4tikhonov@gmail.com"
] | 4tikhonov@gmail.com |
b628ccb6e816caed898e1c576e2f650c75fb2fdc | 223c4821db9fb16fb2695fc238e93b6cd45f1735 | /rand_init.py | 6ab9e8c8894e5a98e2dae15fe4538cf1f3b986bf | [] | no_license | DJ73/ShellCrypt | 3ad0e91dbe955ad0fa30d616f863ea0e6446fe05 | a3a360ea9a290c9f9daeddefb23116ba47417a85 | refs/heads/master | 2023-08-28T23:39:21.173727 | 2021-10-31T17:58:28 | 2021-10-31T17:58:28 | 294,967,073 | 0 | 3 | null | 2021-10-31T17:58:29 | 2020-09-12T15:07:00 | Python | UTF-8 | Python | false | false | 186 | py | # generate random integer values
from random import seed as sd
from random import randint
def gen(seed):
sd(seed)
while True:
value = randint(0, 26)
yield value
| [
"noreply@github.com"
] | DJ73.noreply@github.com |
69e8a54db83cd07fa4005fa7153ae12482392824 | 86eada7154a99a22fad685b4a7356718ed3a830a | /org_work/migrations/0007_auto_20180501_0638.py | ddc3913f28775a4e31f050fb64cf382459b63dc9 | [] | no_license | ahomentc/gameCoop2 | d1cf704cbcb1ed1d416d3310def62522e371cce0 | 6068cf7e4b9fba0fe2f5a7d6747146ada90390f5 | refs/heads/master | 2021-04-18T21:30:07.552185 | 2018-11-24T08:15:15 | 2018-11-24T08:15:15 | 126,461,344 | 0 | 0 | null | 2018-10-19T08:10:47 | 2018-03-23T09:17:26 | HTML | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-05-01 06:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('org_work', '0006_auto_20180428_0820'),
]
operations = [
migrations.AlterField(
model_name='projects',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='relatedParent', to='org_work.Projects'),
),
]
| [
"ahomentc@gmail.com"
] | ahomentc@gmail.com |
dde07929f74ea85ebccd3d70da86cf394e93fb13 | 6c1691eb80c4cdb98d5cb8d7ec5b0d3c11a08c87 | /desafio074.py | 292336bed8feb5478721a4ebffc7d2f8803d63c0 | [] | no_license | lodi-jesse/aula_python | 52d26edc8044e569f5480ba2220a03a233dd967a | 791d85a3804321f1fb0e0d8673c8c1a3b4193667 | refs/heads/main | 2023-03-11T18:10:56.282357 | 2021-02-28T05:12:22 | 2021-02-28T05:12:22 | 343,025,062 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import os
from random import randint
os.system('cls')
numeros = ((randint(1,10)),(randint(1,10)),(randint(1,10)),(randint(1,10)),(randint(1,10)))
print('Os valores sorteados foram: ',end='')
for n in numeros:
print(f'{n} ',end='')
print(f'\nO maior valor sorteado foi {max(numeros)}')
print(f'O menor valor sorteado foi {min(numeros)}') | [
"noreply@github.com"
] | lodi-jesse.noreply@github.com |
cbe5fcfebd86005da1e5423f1e9442ad8e4d6add | df475886cbd050884c3661ced12d1075d5b3da92 | /fcblog/app/errors.py | aaf04ba8b7804ca6e9a1aa7dfaa436c3f66f35c3 | [
"MIT"
] | permissive | francoiscolombo/sample | f722341380fdd2fc71b4647eea2b5bb3116c8abb | 87a1943b815da8d8c3772758b8181df27242d216 | refs/heads/master | 2022-09-21T15:01:59.031570 | 2020-03-12T09:58:17 | 2020-03-12T09:58:17 | 245,798,323 | 1 | 0 | MIT | 2022-09-16T18:19:46 | 2020-03-08T11:00:43 | Python | UTF-8 | Python | false | false | 271 | py | from flask import render_template
from app import app, db
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
| [
"francois_colombo@yahoo.fr"
] | francois_colombo@yahoo.fr |
06addf9a3de52d49cc7fba98acb67212c16517a1 | adf2d484cc7032c98d14aeda0d78730e0c613930 | /python/AA/k_measure_problem.py | a1fad5bbc2a51417afffb1540fe45cede5d3aacd | [] | no_license | jhg3522/Algorithm_Study | aaefd44e5f7c510186602ef1ff0786750cc69517 | 00c411cd15fce0da7f17f34ccb806fd7cf20ef58 | refs/heads/master | 2023-02-15T05:11:38.332685 | 2021-01-08T15:30:24 | 2021-01-08T15:30:24 | 296,337,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import sys
#sys.stdin=open("input.txt", "rt")
n,m=map(int,input().split()) # 여러개의 입력을 받을때는 map을 써야하고 띄어쓰기로 구별할때 split() 써야함
cnt=0
for i in range(1,n+1): #약수는 1부터 구해야 하므로
if(n%i==0):
cnt+=1
if cnt==m:
print(i)
break
else: #break없이 정상적으로 끝내면 else문이 실행됨
print(-1) | [
"jhg3522@kookmin.ac.kr"
] | jhg3522@kookmin.ac.kr |
9123c8acc110b0930239e6e84a61e5dc899fd8f3 | 1ffde96b8fdbb53c2e8c31cac539751b0b70032f | /app.py | 7fc4ac989ced98cb1b62bb538247b85f69339cb3 | [] | no_license | krung2/WebProgramming_test | eae227b8f102d3111d811372d832afad4296240a | 32be019d5d26316c59da464e287a2f4fcce987af | refs/heads/master | 2023-04-25T06:14:43.945090 | 2021-05-13T04:33:32 | 2021-05-13T04:33:32 | 366,927,844 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | import pymysql
import os
import requests, json
from flask import *
from flaskext.mysql import MySQL
app = Flask(__name__)
os.environ['APP_SETTING'] = 'settings.cfg'
app.config.from_envvar('APP_SETTING')
mysql = MySQL(
cursorclass=pymysql.cursors.DictCursor
)
mysql.init_app(app)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/logged')
def main_login():
name = session.get('name', None)
if name is None:
return redirect('/')
return render_template('index_logged.html', name=name)
@app.route('/logout')
def logout():
del session['name']
return redirect('/')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/login', methods=['POST'])
def api_login():
id = request.values.get('id')
pw = request.values.get('pw')
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('SELECT * FROM user WHERE id = %s AND pw = %s', [id, pw])
user = cursor.fetchone()
if user is None:
abort(401, "id 또는 pw를 확인해주세요")
session['name'] = user['name']
return redirect('/logged')
@app.route('/register')
def register():
return render_template('join.html')
@app.route('/register', methods=['POST'])
def api_register():
id = request.values.get('id')
pw = request.values.get('pw')
name = request.values.get('name')
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('SELECT * FROM user WHERE id = %s', [id])
user = cursor.fetchone()
if user is not None:
abort(401, "이미 있는 id입니다")
else:
cursor.execute('INSERT INTO user(id, pw, name) VALUES (%s, %s, %s)', [id, pw, name])
conn.commit()
return redirect('/login')
@app.route('/guestBook')
def geustBook():
name = session.get('name', None);
isLogin = 0
if name is not None :
isLogin = 1
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('SELECT * FROM POST ORDER BY idx DESC')
posts = cursor.fetchall()
return render_template('guestBook.html', isLogin=isLogin, name=name, posts=posts)
@app.route('/guestBook', methods=['POST'])
def registerGuestBook():
name = request.values.get('name')
content = request.values.get('content')
conn = mysql.get_db()
cursor = conn.cursor()
cursor.execute('INSERT INTO post(author, content) VALUES (%s, %s)', [name, content])
conn.commit()
return redirect('/guestBook')
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"jungbin4337@dgsw.hs.kr"
] | jungbin4337@dgsw.hs.kr |
71a5c19173ba5d7fff6a672bc740bfb2be5740e7 | 6c3d259cf90cf110ca1e14cd88b2b41e143072c6 | /snippets/my_cifar_cnn.py | 0b9bba6e1ae85b0195a155b95e49c399972f6bc4 | [] | no_license | mrrizal/Pytorch_scholarship_challenge | ee3e7e325558a1b0ee0299be1444602f5875bca4 | b73d53e3b8c429eac27fdd83ae1b3bbf4b174419 | refs/heads/master | 2020-04-07T02:46:35.827558 | 2018-12-10T09:22:20 | 2018-12-10T09:22:20 | 157,989,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,342 | py | import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from my_cnn_v2 import Cnn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(4 * 4 * 64, 500)
self.fc2 = nn.Linear(500, 10)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 4 * 4 * 64)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
def initialize_transform():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
return transform
def load_cifar(transform):
train_data = datasets.CIFAR10(
'data', train=True, download=False, transform=transform)
test_data = datasets.CIFAR10(
'data', train=False, download=False, transform=transform)
return train_data, test_data
def split_train(train, valid_size=0.2):
num_train = len(train)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
return train_idx, valid_idx
def initialize_data_loader(data, batch_size, sampler=None, num_workers=0):
return torch.utils.data.DataLoader(
data, batch_size=batch_size, sampler=sampler, num_workers=num_workers)
def train(model, epochs, train_loader, valid_loader, filename):
valid_loss_min = np.Inf
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1, epochs + 1):
train_loss = 0
valid_loss = 0
# train model
model.train()
for data, target in train_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item() * data.size(0)
# validate model
model.eval()
for data, target in valid_loader:
output = model(data)
loss = criterion(output, target)
valid_loss += loss.item() * data.size(0)
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.
format(epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print(
'Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.
format(valid_loss_min, valid_loss))
torch.save(model.state_dict(), '{}.pt'.format(filename))
valid_loss_min = valid_loss
def test(model, filename, data_test, batch_size, classes):
classes = classes
criterion = nn.CrossEntropyLoss()
model.load_state_dict(torch.load(filename))
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval()
for data, target in data_test:
output = model(data)
loss = criterion(output, target)
test_loss += loss.item() * data.size(0)
_, pred = torch.max(output, 1)
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy())
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_loss = test_loss / len(data_test.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' %
(classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' %
(classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' %
(100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
if __name__ == '__main__':
# prepare data
batch_size = 20
classes = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
'horse', 'ship', 'truck'
]
transform = initialize_transform()
train_data, test_data = load_cifar(transform)
train_idx, valid_idx = split_train(train_data)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=SubsetRandomSampler(train_idx),
num_workers=0)
valid_loader = initialize_data_loader(
data=train_data,
batch_size=batch_size,
sampler=SubsetRandomSampler(valid_idx),
num_workers=0)
test_loader = initialize_data_loader(
data=test_data, batch_size=batch_size, num_workers=0)
# # initialize model
# model = Net()
# model = Cnn()
# print(model)
# train model
# train(
# model=model,
# epochs=30,
# train_loader=train_loader,
# valid_loader=valid_loader,
# filename='model1_cifar')
# test(
# model=model,
# filename='model1_cifar.pt',
# data_test=test_loader,
# batch_size=batch_size,
# classes=classes)
model1 = Net()
model2 = Cnn()
models = {'model_cifar.pt': model1, 'model1_cifar.pt': model2}
for key, value in models.items():
test(
model=value,
filename=key,
data_test=test_loader,
batch_size=batch_size,
classes=classes)
| [
"rizalubuntuuser@gmail.com"
] | rizalubuntuuser@gmail.com |
f0d1e9cd3a11b5165bb8bf6e4f5fa9bb5e8449de | 09ceac296c625de4f5ca3b012fec5ec24a164502 | /ve4hp/bin/mturk | 7f7662a917c1cd5142fb9859ebb58970d515b07e | [] | no_license | ShinyRyo/HPcalc | 94bafb0415cc0e3ab318561c5730ce4d5ff387f6 | 46ae8bd3fb5866ab0cadb80ca92b346fdd0335d1 | refs/heads/master | 2022-04-20T12:39:37.401445 | 2019-09-12T23:10:51 | 2019-09-12T23:10:51 | 255,493,097 | 0 | 0 | null | 2020-04-14T02:42:58 | 2020-04-14T02:42:57 | null | UTF-8 | Python | false | false | 19,196 | #!/mnt/d/desktop/program/apps/heroku-django/HPcalc/ve4hp/bin/python3
# Copyright 2012, 2014 Kodi Arfer
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
import argparse # Hence, Python 2.7 is required.
import sys
import os.path
import string
import inspect
import datetime, calendar
import boto.mturk.connection, boto.mturk.price, boto.mturk.question, boto.mturk.qualification
from boto.compat import json
# --------------------------------------------------
# Globals
# -------------------------------------------------
interactive = False
con = None
mturk_website = None
default_nicknames_path = os.path.expanduser('~/.boto_mturkcli_hit_nicknames')
nicknames = {}
nickname_pool = set(string.ascii_lowercase)
get_assignments_page_size = 100
time_units = dict(
s = 1,
min = 60,
h = 60 * 60,
d = 24 * 60 * 60)
qual_requirements = dict(
Adult = '00000000000000000060',
Locale = '00000000000000000071',
NumberHITsApproved = '00000000000000000040',
PercentAssignmentsSubmitted = '00000000000000000000',
PercentAssignmentsAbandoned = '00000000000000000070',
PercentAssignmentsReturned = '000000000000000000E0',
PercentAssignmentsApproved = '000000000000000000L0',
PercentAssignmentsRejected = '000000000000000000S0')
qual_comparators = {v : k for k, v in dict(
LessThan = '<', LessThanOrEqualTo = '<=',
GreaterThan = '>', GreaterThanOrEqualTo = '>=',
EqualTo = '==', NotEqualTo = '!=',
Exists = 'exists').items()}
example_config_file = '''Example configuration file:
{
"title": "Pick your favorite color",
"description": "In this task, you are asked to pick your favorite color.",
"reward": 0.50,
"assignments": 10,
"duration": "20 min",
"keywords": ["color", "favorites", "survey"],
"lifetime": "7 d",
"approval_delay": "14 d",
"qualifications": [
"PercentAssignmentsApproved > 90",
"Locale == US",
"2ARFPLSP75KLA8M8DH1HTEQVJT3SY6 exists"
],
"question_url": "http://example.com/myhit",
"question_frame_height": 450
}'''
# --------------------------------------------------
# Subroutines
# --------------------------------------------------
def unjson(path):
with open(path) as o:
return json.load(o)
def add_argparse_arguments(parser):
parser.add_argument('-P', '--production',
dest = 'sandbox', action = 'store_false', default = True,
help = 'use the production site (default: use the sandbox)')
parser.add_argument('--nicknames',
dest = 'nicknames_path', metavar = 'PATH',
default = default_nicknames_path,
help = 'where to store HIT nicknames (default: {})'.format(
default_nicknames_path))
def init_by_args(args):
init(args.sandbox, args.nicknames_path)
def init(sandbox = False, nicknames_path = default_nicknames_path):
global con, mturk_website, nicknames, original_nicknames
mturk_website = 'workersandbox.mturk.com' if sandbox else 'www.mturk.com'
con = boto.mturk.connection.MTurkConnection(
host = 'mechanicalturk.sandbox.amazonaws.com' if sandbox else 'mechanicalturk.amazonaws.com')
try:
nicknames = unjson(nicknames_path)
except IOError:
nicknames = {}
original_nicknames = nicknames.copy()
def save_nicknames(nicknames_path = default_nicknames_path):
if nicknames != original_nicknames:
with open(nicknames_path, 'w') as o:
json.dump(nicknames, o, sort_keys = True, indent = 4)
print >>o
def parse_duration(s):
'''Parses durations like "2 d", "48 h", "2880 min",
"172800 s", or "172800".'''
x = s.split()
return int(x[0]) * time_units['s' if len(x) == 1 else x[1]]
def display_duration(n):
for unit, m in sorted(time_units.items(), key = lambda x: -x[1]):
if n % m == 0:
return '{} {}'.format(n / m, unit)
def parse_qualification(inp):
'''Parses qualifications like "PercentAssignmentsApproved > 90",
"Locale == US", and "2ARFPLSP75KLA8M8DH1HTEQVJT3SY6 exists".'''
inp = inp.split()
name, comparator, value = inp.pop(0), inp.pop(0), (inp[0] if len(inp) else None)
qtid = qual_requirements.get(name)
if qtid is None:
# Treat "name" as a Qualification Type ID.
qtid = name
if qtid == qual_requirements['Locale']:
return boto.mturk.qualification.LocaleRequirement(
qual_comparators[comparator],
value,
required_to_preview = False)
return boto.mturk.qualification.Requirement(
qtid,
qual_comparators[comparator],
value,
required_to_preview = qtid == qual_requirements['Adult'])
# Thus required_to_preview is true only for the
# Worker_Adult requirement.
def preview_url(hit):
return 'https://{}/mturk/preview?groupId={}'.format(
mturk_website, hit.HITTypeId)
def parse_timestamp(s):
'''Takes a timestamp like "2012-11-24T16:34:41Z".
Returns a datetime object in the local time zone.'''
return datetime.datetime.fromtimestamp(
calendar.timegm(
datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ').timetuple()))
def get_hitid(nickname_or_hitid):
return nicknames.get(nickname_or_hitid) or nickname_or_hitid
def get_nickname(hitid):
for k, v in nicknames.items():
if v == hitid:
return k
return None
def display_datetime(dt):
return dt.strftime('%e %b %Y, %l:%M %P')
def display_hit(hit, verbose = False):
et = parse_timestamp(hit.Expiration)
return '\n'.join([
'{} - {} ({}, {}, {})'.format(
get_nickname(hit.HITId),
hit.Title,
hit.FormattedPrice,
display_duration(int(hit.AssignmentDurationInSeconds)),
hit.HITStatus),
'HIT ID: ' + hit.HITId,
'Type ID: ' + hit.HITTypeId,
'Group ID: ' + hit.HITGroupId,
'Preview: ' + preview_url(hit),
'Created {} {}'.format(
display_datetime(parse_timestamp(hit.CreationTime)),
'Expired' if et <= datetime.datetime.now() else
'Expires ' + display_datetime(et)),
'Assignments: {} -- {} avail, {} pending, {} reviewable, {} reviewed'.format(
hit.MaxAssignments,
hit.NumberOfAssignmentsAvailable,
hit.NumberOfAssignmentsPending,
int(hit.MaxAssignments) - (int(hit.NumberOfAssignmentsAvailable) + int(hit.NumberOfAssignmentsPending) + int(hit.NumberOfAssignmentsCompleted)),
hit.NumberOfAssignmentsCompleted)
if hasattr(hit, 'NumberOfAssignmentsAvailable')
else 'Assignments: {} total'.format(hit.MaxAssignments),
# For some reason, SearchHITs includes the
# NumberOfAssignmentsFoobar fields but GetHIT doesn't.
] + ([] if not verbose else [
'\nDescription: ' + hit.Description,
'\nKeywords: ' + hit.Keywords
])) + '\n'
def digest_assignment(a):
return dict(
answers = {str(x.qid): str(x.fields[0]) for x in a.answers[0]},
**{k: str(getattr(a, k)) for k in (
'AcceptTime', 'SubmitTime',
'HITId', 'AssignmentId', 'WorkerId',
'AssignmentStatus')})
# --------------------------------------------------
# Commands
# --------------------------------------------------
def get_balance():
return con.get_account_balance()
def show_hit(hit):
return display_hit(con.get_hit(hit)[0], verbose = True)
def list_hits():
'Lists your 10 most recently created HITs, with the most recent last.'
return '\n'.join(reversed(map(display_hit, con.search_hits(
sort_by = 'CreationTime',
sort_direction = 'Descending',
page_size = 10))))
def make_hit(title, description, keywords, reward, question_url, question_frame_height, duration, assignments, approval_delay, lifetime, qualifications = []):
r = con.create_hit(
title = title,
description = description,
keywords = con.get_keywords_as_string(keywords),
reward = con.get_price_as_price(reward),
question = boto.mturk.question.ExternalQuestion(
question_url,
question_frame_height),
duration = parse_duration(duration),
qualifications = boto.mturk.qualification.Qualifications(
map(parse_qualification, qualifications)),
max_assignments = assignments,
approval_delay = parse_duration(approval_delay),
lifetime = parse_duration(lifetime))
nick = None
available_nicks = nickname_pool - set(nicknames.keys())
if available_nicks:
nick = min(available_nicks)
nicknames[nick] = r[0].HITId
if interactive:
print 'Nickname:', nick
print 'HIT ID:', r[0].HITId
print 'Preview:', preview_url(r[0])
else:
return r[0]
def extend_hit(hit, assignments_increment = None, expiration_increment = None):
con.extend_hit(hit, assignments_increment, expiration_increment)
def expire_hit(hit):
con.expire_hit(hit)
def delete_hit(hit):
'''Deletes a HIT using DisableHIT.
Unreviewed assignments get automatically approved. Unsubmitted
assignments get automatically approved upon submission.
The API docs say DisableHIT doesn't work with Reviewable HITs,
but apparently, it does.'''
con.disable_hit(hit)
global nicknames
nicknames = {k: v for k, v in nicknames.items() if v != hit}
def list_assignments(hit, only_reviewable = False):
# Accumulate all relevant assignments, one page of results at
# a time.
assignments = []
page = 1
while True:
rs = con.get_assignments(
hit_id = hit,
page_size = get_assignments_page_size,
page_number = page,
status = 'Submitted' if only_reviewable else None)
assignments += map(digest_assignment, rs)
if len(assignments) >= int(rs.TotalNumResults):
break
page += 1
if interactive:
print json.dumps(assignments, sort_keys = True, indent = 4)
print ' '.join([a['AssignmentId'] for a in assignments])
print ' '.join([a['WorkerId'] + ',' + a['AssignmentId'] for a in assignments])
else:
return assignments
def grant_bonus(message, amount, pairs):
for worker, assignment in pairs:
con.grant_bonus(worker, assignment, con.get_price_as_price(amount), message)
if interactive: print 'Bonused', worker
def approve_assignments(message, assignments):
for a in assignments:
con.approve_assignment(a, message)
if interactive: print 'Approved', a
def reject_assignments(message, assignments):
for a in assignments:
con.reject_assignment(a, message)
if interactive: print 'Rejected', a
def unreject_assignments(message, assignments):
for a in assignments:
con.approve_rejected_assignment(a, message)
if interactive: print 'Unrejected', a
def notify_workers(subject, text, workers):
con.notify_workers(workers, subject, text)
def give_qualification(qualification, workers, value = 1, notify = True):
for w in workers:
con.assign_qualification(qualification, w, value, notify)
if interactive: print 'Gave to', w
def revoke_qualification(qualification, workers, message = None):
for w in workers:
con.revoke_qualification(w, qualification, message)
if interactive: print 'Revoked from', w
# --------------------------------------------------
# Mainline code
# --------------------------------------------------
if __name__ == '__main__':
interactive = True
parser = argparse.ArgumentParser()
add_argparse_arguments(parser)
subs = parser.add_subparsers()
sub = subs.add_parser('bal',
help = 'display your prepaid balance')
sub.set_defaults(f = get_balance, a = lambda: [])
sub = subs.add_parser('hit',
help = 'get information about a HIT')
sub.add_argument('HIT',
help = 'nickname or ID of the HIT to show')
sub.set_defaults(f = show_hit, a = lambda:
[get_hitid(args.HIT)])
sub = subs.add_parser('hits',
help = 'list all your HITs')
sub.set_defaults(f = list_hits, a = lambda: [])
sub = subs.add_parser('new',
help = 'create a new HIT (external questions only)',
epilog = example_config_file,
formatter_class = argparse.RawDescriptionHelpFormatter)
sub.add_argument('JSON_PATH',
help = 'path to JSON configuration file for the HIT')
sub.add_argument('-u', '--question-url', dest = 'question_url',
metavar = 'URL',
help = 'URL for the external question')
sub.add_argument('-a', '--assignments', dest = 'assignments',
type = int, metavar = 'N',
help = 'number of assignments')
sub.add_argument('-r', '--reward', dest = 'reward',
type = float, metavar = 'PRICE',
help = 'reward amount, in USD')
sub.set_defaults(f = make_hit, a = lambda: dict(
unjson(args.JSON_PATH).items() + [(k, getattr(args, k))
for k in ('question_url', 'assignments', 'reward')
if getattr(args, k) is not None]))
sub = subs.add_parser('extend',
help = 'add assignments or time to a HIT')
sub.add_argument('HIT',
help = 'nickname or ID of the HIT to extend')
sub.add_argument('-a', '--assignments', dest = 'assignments',
metavar = 'N', type = int,
help = 'number of assignments to add')
sub.add_argument('-t', '--time', dest = 'time',
metavar = 'T',
help = 'amount of time to add to the expiration date')
sub.set_defaults(f = extend_hit, a = lambda:
[get_hitid(args.HIT), args.assignments,
args.time and parse_duration(args.time)])
sub = subs.add_parser('expire',
help = 'force a HIT to expire without deleting it')
sub.add_argument('HIT',
help = 'nickname or ID of the HIT to expire')
sub.set_defaults(f = expire_hit, a = lambda:
[get_hitid(args.HIT)])
sub = subs.add_parser('rm',
help = 'delete a HIT')
sub.add_argument('HIT',
help = 'nickname or ID of the HIT to delete')
sub.set_defaults(f = delete_hit, a = lambda:
[get_hitid(args.HIT)])
sub = subs.add_parser('as',
help = "list a HIT's submitted assignments")
sub.add_argument('HIT',
help = 'nickname or ID of the HIT to get assignments for')
sub.add_argument('-r', '--reviewable', dest = 'only_reviewable',
action = 'store_true',
help = 'show only unreviewed assignments')
sub.set_defaults(f = list_assignments, a = lambda:
[get_hitid(args.HIT), args.only_reviewable])
for command, fun, helpmsg in [
('approve', approve_assignments, 'approve assignments'),
('reject', reject_assignments, 'reject assignments'),
('unreject', unreject_assignments, 'approve previously rejected assignments')]:
sub = subs.add_parser(command, help = helpmsg)
sub.add_argument('ASSIGNMENT', nargs = '+',
help = 'ID of an assignment')
sub.add_argument('-m', '--message', dest = 'message',
metavar = 'TEXT',
help = 'feedback message shown to workers')
sub.set_defaults(f = fun, a = lambda:
[args.message, args.ASSIGNMENT])
sub = subs.add_parser('bonus',
help = 'give some workers a bonus')
sub.add_argument('AMOUNT', type = float,
help = 'bonus amount, in USD')
sub.add_argument('MESSAGE',
help = 'the reason for the bonus (shown to workers in an email sent by MTurk)')
sub.add_argument('WIDAID', nargs = '+',
help = 'a WORKER_ID,ASSIGNMENT_ID pair')
sub.set_defaults(f = grant_bonus, a = lambda:
[args.MESSAGE, args.AMOUNT,
[p.split(',') for p in args.WIDAID]])
sub = subs.add_parser('notify',
help = 'send a message to some workers')
sub.add_argument('SUBJECT',
help = 'subject of the message')
sub.add_argument('MESSAGE',
help = 'text of the message')
sub.add_argument('WORKER', nargs = '+',
help = 'ID of a worker')
sub.set_defaults(f = notify_workers, a = lambda:
[args.SUBJECT, args.MESSAGE, args.WORKER])
sub = subs.add_parser('give-qual',
help = 'give a qualification to some workers')
sub.add_argument('QUAL',
help = 'ID of the qualification')
sub.add_argument('WORKER', nargs = '+',
help = 'ID of a worker')
sub.add_argument('-v', '--value', dest = 'value',
metavar = 'N', type = int, default = 1,
help = 'value of the qualification')
sub.add_argument('--dontnotify', dest = 'notify',
action = 'store_false', default = True,
help = "don't notify workers")
sub.set_defaults(f = give_qualification, a = lambda:
[args.QUAL, args.WORKER, args.value, args.notify])
sub = subs.add_parser('revoke-qual',
help = 'revoke a qualification from some workers')
sub.add_argument('QUAL',
help = 'ID of the qualification')
sub.add_argument('WORKER', nargs = '+',
help = 'ID of a worker')
sub.add_argument('-m', '--message', dest = 'message',
metavar = 'TEXT',
help = 'the reason the qualification was revoked (shown to workers in an email sent by MTurk)')
sub.set_defaults(f = revoke_qualification, a = lambda:
[args.QUAL, args.WORKER, args.message])
args = parser.parse_args()
init_by_args(args)
f = args.f
a = args.a()
if isinstance(a, dict):
# We do some introspective gymnastics so we can produce a
# less incomprehensible error message if some arguments
# are missing.
spec = inspect.getargspec(f)
missing = set(spec.args[: len(spec.args) - len(spec.defaults)]) - set(a.keys())
if missing:
raise ValueError('Missing arguments: ' + ', '.join(missing))
doit = lambda: f(**a)
else:
doit = lambda: f(*a)
try:
x = doit()
except boto.mturk.connection.MTurkRequestError as e:
print 'MTurk error:', e.error_message
sys.exit(1)
if x is not None:
print x
save_nicknames()
| [
"tan0ry0atr002@gmail.com"
] | tan0ry0atr002@gmail.com | |
82dff9b5a80ff76ab3b5cc330c972f87777fd14b | 36934e99c4632370a83e7ed414446886deca2388 | /address/serializers.py | d78ffa14a08d604d0b8244526e72a115f7dec252 | [] | no_license | SDeVPro/umarketuz | a4dae81e3dd28921d52f4c6c861b7633e8794814 | fde57849ee0f1c97c18a59be62f1cf390fe0a6c4 | refs/heads/main | 2023-06-27T23:29:33.320399 | 2021-07-31T05:22:29 | 2021-07-31T05:22:29 | 391,262,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | from rest_framework import serializers
from .models import SellerAddress, CustomerAddress
class SellerAddressSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='selleraddress-detail')
class Meta:
model = SellerAddress
fields = '__all__'
class CustomerAddressSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='customeraddress-detail')
class Meta:
model = CustomerAddress
fields = '__all__'
| [
"web_software_developer@mail.ru"
] | web_software_developer@mail.ru |
cb5b6881b67759c738a71b69a9741db540e63aa3 | 5b76e2b3139bbc9eff00f62582a1e1b301e6af24 | /content based.py | 3fab2c33c3be08beac39cb4854735b46fd68829f | [] | no_license | nguyenlinhvn512/Content-Based-Filtering | 1dd33567750c1fe3c8f81743a463f3561ae40c45 | 5e46bcaf23acad80c2e404491fac164e99495b98 | refs/heads/main | 2022-12-30T16:58:52.461256 | 2020-10-12T11:25:38 | 2020-10-12T11:25:38 | 303,369,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | #Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
#Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('ratings.csv')
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))', expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)', expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
#Every genre is separated by a | so we simply have to call the split function on |
movies_df['genres'] = movies_df.genres.str.split('|')
#Copying the movie dataframe into a new one since we won't need to use the genre information in our first case.
moviesWithGenres_df = movies_df.copy()
#For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column
for index, row in movies_df.iterrows():
for genre in row['genres']:
moviesWithGenres_df.at[index, genre] = 1
#Filling in the NaN values with 0 to show that a movie doesn't have that column's genre
moviesWithGenres_df = moviesWithGenres_df.fillna(0)
print(ratings_df.head())
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
print(ratings_df.head())
#Content-Based recommendation system
userInput = [
{'title': 'Breakfast Club, The', 'rating': 5},
{'title': 'Toy Story', 'rating': 3.5},
{'title': 'Jumanji', 'rating': 2},
{'title': "Pulp Fiction", 'rating': 5},
{'title': 'Akira', 'rating': 4.5}
]
inputMovies = pd.DataFrame(userInput)
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
#Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
#Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('genres', 1).drop('year', 1)
#Final input dataframe
#If a movie you added in above isn't here, then it might not be in the original
#dataframe or it might spelled differently, please check capitalisation.
#Filtering out the movies from the input
userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(
inputMovies['movieId'].tolist())]
#Resetting the index to avoid future issues
userMovies = userMovies.reset_index(drop=True)
#Dropping unnecessary issues due to save memory and to avoid issues
userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
inputMovies['rating']
#Dot produt to get weights
userProfile = userGenreTable.transpose().dot(inputMovies['rating'])
#Now let's get the genres of every movie in our original dataframe
genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId'])
#And drop the unnecessary information
genreTable = genreTable.drop('movieId', 1).drop(
'title', 1).drop('genres', 1).drop('year', 1)
#Multiply the genres by the weights and then take the weighted average
recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum())
#Sort our recommendations in descending order
recommendationTable_df = recommendationTable_df.sort_values(ascending=False)
#The final recommendation table
print(movies_df.loc[movies_df['movieId'].isin(
recommendationTable_df.head(20).keys())])
| [
"noreply@github.com"
] | nguyenlinhvn512.noreply@github.com |
116cb7f1f3ff4aae9cb1c5284401cf68dda66329 | 338184b4359bf0477375f62c173559ad198eb141 | /sw.py | 37fea1bbd62a15e8a3ce9cb53ceec40f6f6f2c81 | [] | no_license | Abhishek-kumar09/Railway-Reservation-Project | 6a0fca6ebbab8ff26fe1b6b74092f5b0446d4aa6 | 91eb6ae6f3bf1170bf2a040ce552d659a4dcec0d | refs/heads/master | 2023-02-07T10:25:24.204209 | 2021-01-02T13:11:20 | 2021-01-02T13:11:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | from tkinter import *
def main_account_screen():
main_screen = Tk() # create a GUI window
main_screen.geometry("300x250") # set the configuration of GUI window
main_screen.title("Account Login") # set the title of GUI window
# create a Form label
Label(text="Choose Login Or Register", bg="blue", width="300", height="2", font=("Calibri", 13)).pack()
Label(text="").pack()
# create Login Button
Button(text="Login", height="2", width="30").pack()
Label(text="").pack()
# create a register button
Button(text="Register", height="2", width="30").pack()
main_screen.mainloop() # start the GUI
main_account_screen() # call the main_account_screen() function
def register():
# The Toplevel widget work pretty much like Frame,
# but it is displayed in a separate, top-level window.
# Such windows usually have title bars, borders, and other “window decorations”.
# And in argument we have to pass global screen variable
register_screen = Toplevel(main_screen)
register_screen.title("Register")
register_screen.geometry("300x250")
# Set text variables
username = StringVar()
password = StringVar()
# Set label for user's instruction
Label(register_screen, text="Please enter details below", bg="blue").pack()
Label(register_screen, text="").pack()
# Set username label
username_lable = Label(register_screen, text="Username * ")
username_lable.pack()
# Set username entry
# The Entry widget is a standard Tkinter widget used to enter or display a single line of text.
username_entry = Entry(register_screen, textvariable=username)
username_entry.pack()
# Set password label
password_lable = Label(register_screen, text="Password * ")
password_lable.pack()
# Set password entry
password_entry = Entry(register_screen, textvariable=password, show='*')
password_entry.pack()
Label(register_screen, text="").pack()
# Set register button
Button(register_screen, text="Register", width=10, height=1, bg="blue").pack() | [
"abhimait1909@gmail.com"
] | abhimait1909@gmail.com |
c00bff8a97f2f0cd605b081aab99214bd019e9fd | fe42f1c1eefb2069eda1dd98821ba6049fb4f01a | /ML/P3DataAnalysisPandas/P4Combining.py | 30cbbcdbd467feed161647f9dcf1775382909e7d | [] | no_license | hvn2001/LearnPython | c1b13f6685e6e62b3c9b612e88e624925f43eb6e | 323595df8d69e84873f74819a36b5eb36b017773 | refs/heads/master | 2021-03-30T06:26:55.110963 | 2020-04-10T16:13:36 | 2020-04-10T16:13:36 | 248,025,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import pandas as pd
print('------A. Concatenation------')
df1 = pd.DataFrame({'c1': [1, 2], 'c2': [3, 4]},
index=['r1', 'r2'])
df2 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]},
index=['r1', 'r2'])
df3 = pd.DataFrame({'c1': [5, 6], 'c2': [7, 8]})
concat = pd.concat([df1, df2], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1 3 5 7
r2 2 4 6 8
'''
concat = pd.concat([df2, df1, df3])
print('{}\n'.format(concat))
'''
c1 c2
r1 5 7
r2 6 8
r1 1 3
r2 2 4
0 5 7
1 6 8
'''
concat = pd.concat([df1, df3], axis=1)
print('{}\n'.format(concat))
'''
c1 c2 c1 c2
r1 1.0 3.0 NaN NaN
r2 2.0 4.0 NaN NaN
0 NaN NaN 5.0 7.0
1 NaN NaN 6.0 8.0
'''
print('------B. Merging------')
mlb_df1 = pd.DataFrame({'name': ['john doe', 'al smith', 'sam black', 'john doe'],
'pos': ['1B', 'C', 'P', '2B'],
'year': [2000, 2004, 2008, 2003]})
mlb_df2 = pd.DataFrame({'name': ['john doe', 'al smith', 'jack lee'],
'year': [2000, 2004, 2012],
'rbi': [80, 100, 12]})
print('{}\n'.format(mlb_df1))
'''
name pos year
0 john doe 1B 2000
1 al smith C 2004
2 sam black P 2008
3 john doe 2B 2003
'''
print('{}\n'.format(mlb_df2))
'''
name rbi year
0 john doe 80 2000
1 al smith 100 2004
2 jack lee 12 2012
'''
mlb_merged = pd.merge(mlb_df1, mlb_df2)
print('{}\n'.format(mlb_merged))
'''
name pos year rbi
0 john doe 1B 2000 80
1 al smith C 2004 100
'''
print('------Ex: ------')
def concat_rows(df1, df2):
row_concat = pd.concat([df1, df2])
return row_concat
def concat_cols(df1, df2):
col_concat = pd.concat([df1, df2], axis=1)
return col_concat
def merge_dfs(df1, df2):
merged_df = pd.merge(df1, df2)
return merged_df
| [
"hoangvu1991uit@gmail.com"
] | hoangvu1991uit@gmail.com |
40c1b8c3ff03082a68e3d906964553c4da5afa44 | 0985dfc7b53f6bb80a6ee9c7b9cad4d7c31d2013 | /dform/admin.py | bcef11df218893cdb425e1167f9e615755c14996 | [
"MIT"
] | permissive | yonghuming/django-dform | 6f237020573f5e5a5e1d8ed1a58ed5b944f31aef | 3a8cb2ee61b5ea4719e6fc3bfb9ede66f468831e | refs/heads/master | 2021-01-17T06:30:52.467715 | 2015-10-21T19:22:03 | 2015-10-21T19:22:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,128 | py | from django.contrib import admin
from django.core.urlresolvers import reverse, NoReverseMatch
from awl.admintools import make_admin_obj_mixin
from awl.rankedmodel.admintools import admin_link_move_up, admin_link_move_down
from .fields import FIELD_CHOICES_DICT
from .models import (Survey, SurveyVersion, Question, QuestionOrder, Answer,
AnswerGroup)
# ============================================================================
def _questions_link(version, show_reorder=True):
num_q = Question.objects.filter(survey_versions=version).count()
if num_q == 0:
return ''
plural = ''
if num_q > 1:
plural = 's'
show = reverse('admin:dform_question_changelist')
reorder = reverse('admin:dform_questionorder_changelist')
urls = [
'<a href="%s?survey_versions__id=%s">%s Question%s</a>' % (show,
version.id, num_q, plural)
]
if show_reorder:
urls.append(
'<a href="%s?survey_version__id=%s">Reorder</a>' % (reorder,
version.id)
)
return ' | '.join(urls)
def _answers_link(version):
num_a = Answer.objects.filter(answer_group__survey_version=version).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?survey_version__id=%s">%s Answer%s</a>' % (link,
version.id, num_a, plural)
return url
# ============================================================================
# Surveys
# ============================================================================
@admin.register(Survey)
class SurveyAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'version_num', 'show_actions',
'show_versions', 'show_questions', 'show_answers')
def version_num(self, obj):
return '%s' % obj.latest_version.version_num
version_num.short_description = 'Latest Version'
def show_actions(self, obj):
actions = []
if obj.latest_version.is_editable():
url = reverse('dform-edit-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">Edit Survey</a>' % url)
else:
url = reverse('dform-new-version', args=(obj.id,))
actions.append('<a href="%s">New Version</a>' % url)
try:
url = reverse('dform-sample-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">View Sample</a>' % url)
except NoReverseMatch:
# sample-survey view isn't guaranteed to be there
pass
try:
url = reverse('dform-survey', args=(obj.latest_version.id,))
actions.append('<a href="%s">Answer Survey</a>' % url)
except NoReverseMatch:
# survey view isn't guaranteed to be there
pass
return ', '.join(actions)
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
def show_versions(self, obj):
num_v = SurveyVersion.objects.filter(survey=obj).count()
link = reverse('admin:dform_surveyversion_changelist')
url = '<a href="%s?survey__id=%s">%s Versions</a>' % (link, obj.id,
num_v)
return url
show_versions.short_description = 'Versions'
show_versions.allow_tags = True
def show_questions(self, obj):
return _questions_link(obj.latest_version)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
return _answers_link(obj.latest_version)
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
mixin = make_admin_obj_mixin('SurveyVersionMixin')
mixin.add_obj_link('show_survey', 'survey')
@admin.register(SurveyVersion)
class SurveyVersionAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'show_survey', 'version_num', 'show_actions',
'show_questions', 'show_answers')
def show_actions(self, obj):
actions = []
if obj.is_editable():
url = reverse('dform-edit-survey', args=(obj.id,))
actions.append('<a href="%s">Edit Survey</a>' % url)
try:
url = reverse('dform-sample-survey', args=(obj.id,))
actions.append('<a href="%s">View Sample</a>' % url)
except NoReverseMatch:
# view sample isn't guaranteed to be there
pass
try:
url = reverse('dform-survey', args=(obj.id,))
actions.append('<a href="%s">Answer Survey</a>' % url)
except NoReverseMatch:
# survey view isn't guaranteed to be there
pass
return ', '.join(actions)
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
def show_questions(self, obj):
return _questions_link(obj)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
return _answers_link(obj)
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
# ============================================================================
# Questions
# ============================================================================
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'field_key', 'required', 'show_reorder',
'show_answers')
def show_reorder(self, obj):
link = reverse('admin:dform_questionorder_changelist')
url = '<a href="%s?survey_version__id=%s">Reorder</a>' % (link,
obj.survey.latest_version.id)
return url
show_reorder.short_description = 'Reorder'
show_reorder.allow_tags = True
def show_answers(self, obj):
num_a = Answer.objects.filter(question=obj).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?question__id=%s">%s Answer%s</a>' % (link, obj.id,
num_a, plural)
return url
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
@admin.register(QuestionOrder)
class QuestionOrderAdmin(admin.ModelAdmin):
list_display = ('id', 'survey_version', 'show_text', 'move_up',
'move_down')
def show_text(self, obj):
return obj.question.text
show_text.short_description = 'Question Text'
def move_up(self, obj):
return admin_link_move_up(obj, 'Up')
move_up.allow_tags = True
move_up.short_description = 'Move Up'
def move_down(self, obj):
return admin_link_move_down(obj, 'Down')
move_down.allow_tags = True
move_down.short_description = 'Move Down'
# ============================================================================
# Answers
# ============================================================================
mixin = make_admin_obj_mixin('AnswerMixin')
mixin.add_obj_link('show_group', 'answer_group',
display='AnswerGroup.id={{obj.id}}')
mixin.add_obj_link('show_question', 'question',
display='Question.id={{obj.id}}')
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'show_group', 'show_question', 'show_text',
'show_field_key', 'value')
def show_text(self, obj):
return obj.question.text
show_text.short_description = 'Question Text'
def show_field_key(self, obj):
return FIELD_CHOICES_DICT[obj.question.field_key]
show_field_key.short_description = 'Field Key'
mixin = make_admin_obj_mixin('AnswerGroupMixin')
mixin.add_obj_link('show_data', 'group_data')
mixin.add_obj_link('show_version', 'survey_version',
display='SurveyVersion.id={{obj.id}}')
@admin.register(AnswerGroup)
class AnswerGroupAdmin(admin.ModelAdmin, mixin):
list_display = ('id', 'updated', 'show_version', 'show_data',
'show_questions', 'show_answers', 'show_actions')
def show_questions(self, obj):
return _questions_link(obj.survey_version, False)
show_questions.short_description = 'Questions'
show_questions.allow_tags = True
def show_answers(self, obj):
num_a = Answer.objects.filter(answer_group=obj).count()
if num_a == 0:
return ''
plural = ''
if num_a > 1:
plural = 's'
link = reverse('admin:dform_answer_changelist')
url = '<a href="%s?answer_group__id=%s">%s Answer%s</a>' % (link,
obj.id, num_a, plural)
return url
show_answers.short_description = 'Answers'
show_answers.allow_tags = True
def show_actions(self, obj):
try:
url = reverse('dform-survey-with-answers', args=(
obj.survey_version.id, obj.id))
return '<a href="%s">Change Answers</a>' % url
except NoReverseMatch:
# view survey-with-answers isn't guaranteed to be there
return ''
show_actions.short_description = 'Actions'
show_actions.allow_tags = True
| [
"ctrudeau@arsensa.com"
] | ctrudeau@arsensa.com |
e6e034f452a62afe559d778611e458476d626741 | c8e11705a8010405a0d76aaeff35709313196994 | /Todo/settings.py | 72a55b5115b0a6f29d0ffd59c7a78062a198efba | [] | no_license | shubham-137/todo | 746a3eae3bd7eea69ef754b8519289b17b209145 | b824c2d766fe2d370ff4cafc30b74526f6a90e32 | refs/heads/master | 2022-12-19T10:20:55.913301 | 2020-09-19T09:22:00 | 2020-09-19T09:22:00 | 296,792,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | """
Django settings for Todo project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%y5yfmd$kr%(%-+@-mpuy)40g#+__#y7lfo5ryu1z-3l@ido!$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tasks',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"patilshubham1698@gmail.com"
] | patilshubham1698@gmail.com |
5116c37c6a53c5ffd85d3a98f02c291f82c145d2 | 03389f9591ce06e23757d5f086d1f58072bfe21a | /app/followers/fetch_followers.py | 7a9122262d9df193a43d7c8e824b621c68f69d3d | [] | no_license | poberherr/twitter-stats | da3d301699bdf9ba9653ab37d6108d73a0b3e808 | cfb887cd410d5490c182cf9d3bbc272cb6ea47fd | refs/heads/master | 2021-01-10T06:20:18.603619 | 2016-02-25T12:46:40 | 2016-02-25T12:46:40 | 52,022,458 | 0 | 0 | null | 2016-02-24T20:51:28 | 2016-02-18T16:44:19 | Python | UTF-8 | Python | false | false | 1,299 | py | import tweepy
from app.twitter_api import twitter_api
from app.followers.models import Followers
def fetch_and_create_followers(user):
# For now we assume the followers don't change
if user.is_follower_fetched:
print('User was already in DB :', user.screen_name)
follower_ids = retrieve_followers_from_db(user)
else:
try:
print('Fetching followers for: ', user.screen_name)
# TODO: This call fetches 'just' 5000 followers
follower_ids = twitter_api.followers_ids(user.twitter_id)
print('Inserting followers for: ', user.screen_name)
for follower_id in follower_ids:
follower = Followers(user.id, user.twitter_id, follower_id)
follower.add(follower)
user.is_follower_fetched = True
user.update()
return follower_ids
except tweepy.TweepError as e:
# set flag in user that followers are private
return None, e
return follower_ids
def fetch_followers_from_db(user):
followers = Followers.query.filter_by(user_id=user.id).all()
if followers:
follower_ids = []
for follower in followers:
follower_ids.append(follower.twitter_follower_id)
return follower_ids
| [
"patrick.oberherr@gmail.com"
] | patrick.oberherr@gmail.com |
e039e3d38142077644be74d6bef41967df1bbb00 | c2582163f6bb7c1996f702ea10e8935de710587a | /TerVer_LAB_4.py | 3af9e8b0c93bc6af5e95ddfa5a43a82980d488af | [] | no_license | fixik338/Ter_ver | 918859e9b2adc07d30766a1baadb60668ab8bdd0 | 1ee1c63342dc663fabdd6783e4e3d5c4d19d98e6 | refs/heads/master | 2021-02-06T21:38:32.262605 | 2020-05-29T07:18:32 | 2020-05-29T07:18:32 | 243,950,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,493 | py | import numpy as np
from math import *
from PIL import Image
import matplotlib.pyplot as plt
image1 = Image.open('images/srd_kdr.png')
image2 = Image.open('images/dispisya.png')
image3 = Image.open('images/Matozha.png')
def C_bezrepeat(n, m):
return factorial(n) / (factorial(n - m) * factorial(m))
def binom(n, p):
q = 1 - p
M = n * p
D = n * p * q
P = []
for i in range(n + 1):
P.append(C_bezrepeat(n, i) * p ** i * q ** n - i)
return M, D, P
def chisl_harac(x, p):
M = np.sum(x * p)
M_2 = np.sum((x ** 2) * p)
D = M_2 - M ** 2
return M, D
def gip_geo(n, m, k, X):
p = (C_bezrepeat(m, k) * C_bezrepeat(n - m, X - k)) / C_bezrepeat(n, X)
return p
def psevd_geo(k, p):
p = np.array(p)
q = 1 - p
k = np.arange(1, k + 1)
P = q ** (k - 1) * p
P[-1] = 1 - np.sum(P[:-1])
return P
def puass(lmbd, m):
P = []
for i in range(len(m)):
P.append((lmbd ** m[i]) / factorial(m[i]) * np.exp(-lmbd))
return P
def puass_quest():
t = input('a) Задачи'
'\nb) Ручной ввод'
'\n ==> ')
if t == 'a':
print('Случайная величина Храспределена по закону Пуассона, причем lmbd = 0,2. '
'\nПостройте часть ряда распределения случайной величины Х для m = 0, 1, 2, 3, 4.')
lmbd1 = 0.2
m1 = [i for i in range(5)]
print('\nСлучайная величина Х распределена по закону Пуассона, причем lmbd = 0,8. '
'\nПостройте часть ряда распределения случайной величины Х для m = 0, 1, 2, 3, 4, 5, 6.')
lmbd2 = 0.8
m2 = [i for i in range(7)]
print('\nСлучайная величина Х распределена по закону Пуассона, причем lmbd = 3. '
'\nПостройте часть ряда распределения случайной величины Х для m = 0, 1, 2, 3, 4, 5, 6.')
lmbd3 = 3
m3 = [i for i in range(7)]
return [[puass(lmbd1, m1), puass(lmbd2, m2), puass(lmbd3, m3)], [m1, m2, m3]]
elif t == 'b':
lmbd = float(input('Значелие лямбды: '))
mk = int(input('Количество m: '))
m = []
for i in range(mk + 1):
m.append(int(input('m[' + str(i + 1) + '] = ')))
return puass(lmbd, m), m
def F(x, p):
a = [0]
for i in range(2, len(x)):
a.append(np.sum(p[:i]))
a.append(np.sum(p))
return a
def condition():
X = int(input('Количество возможных значений СВ: '))
x = [0]
p = [0]
for i in range(X):
x.append(int(input('x[' + str(i + 1) + ']= ')))
p.append(float(input('p[' + str(i + 1) + ']= ')))
x = np.array(x)
p = np.array(p)
return x, p
def GERAF(x, y, p):
plt.subplot(2, 1, 1)
plt.hlines(y[-1], x[-1], 60)
for i in range(1, len(x)):
plt.hlines(y[i - 1], x[i - 1], x[i])
for i in range(1, len(x)):
plt.vlines(x[i], y[i], y[i - 1], linestyles='--')
plt.grid(True)
plt.title('Функция распределения')
plt.xlabel('x')
plt.ylabel('F(x)')
plt.subplot(2, 1, 2)
plt.plot(x[1:], p[1:], color='black')
plt.grid(True)
plt.title('Многоугольник распределения')
plt.xlabel('x')
plt.ylabel('p')
plt.show()
while True:
w = input('\nВыберите: '
'\n 1) Вид распределения'
'\n 2) Решение задач'
'\n ==> ')
if w == '1':
w = input('\n a) Биноминальное'
'\n b) Гипергеометрическое'
'\n c) Пуассона'
'\n d) Псевдогеометрическое'
'\n e) Ислледование СВ'
'\n ==> ')
if w == 'a':
n = int(input('n = '))
p = float(input('p = '))
M, D, p = binom(n, p)
elif w == 'b':
n = int(input('n = '))
m = int(input('m = '))
X = int(input('X = '))
k = int(input('k = '))
k = range(k + 1)
k = np.array(k)
p = []
for i in range(len(k)):
p.append(gip_geo(n, m, k[i], X))
M, D = chisl_harac(k, p)
print(k, '\n', np.round(p, 4), )
plt.plot(k, p, color='black')
plt.grid(True)
plt.title('Многоугольник распределения')
plt.xlabel('x')
plt.ylabel('p')
plt.show()
elif w == 'c':
lmbd = float(input('Лямбда= '))
c = int(input('Кол-во m: '))
m = [i for i in range(c + 1)]
p = puass(lmbd, m)
print(m)
for i in range(len(m)):
print(np.round(p[i], 4), end=' ')
plt.plot(m, p, color='black')
plt.grid(True)
plt.title('Многоугольник распределения')
plt.xlabel('x')
plt.ylabel('p')
plt.show()
elif w == 'd':
k = int(input('k = '))
p = float(input('p = '))
M, D = chisl_harac(k, p)
p = psevd_geo(k, p)
print(p)
plt.plot(range(k), p, color='black')
plt.grid(True)
plt.title('Многоугольник распределения')
plt.xlabel('x')
plt.ylabel('p')
plt.show()
elif w == 'e':
x, p=condition()
M, D = chisl_harac(x, p)
F = F(x[1:], p[1:])
print('Математическое ожидание:', np.round(M, 4),
'\nДисперсия:', np.round(D, 4),
'\nСреднее квадратическое отклонение: ', np.round(sqrt(D), 4),
'\nМода:', np.max(p))
GERAF(x[1:], F, p[1:])
image1.show()
image2.show()
image3.show()
elif w == '2':
p, m = puass_quest()
for i in range(len(m)):
print('\nОтвет[' + str(i + 1) + ']:\n', m[i], '\n', np.round(p[i], 4))
| [
"0firter0@gmail.com"
] | 0firter0@gmail.com |
31fdbbc045069d508cdbae75adb41f6ea4a73e48 | 7a15cf7b79093501deb1c63783f5372159640943 | /code.py | 8068cd8a72e3dbf63b2ae65b4a71fb29382bb5c2 | [] | no_license | sharp03/TICK-stack-docker-setup | de1cc909e58145a892e7a833b342ee3bc484522f | a693917164781ef81c3f1fabf79fc61251d1dea6 | refs/heads/master | 2020-04-24T19:45:43.737663 | 2019-03-25T13:05:48 | 2019-03-25T13:05:48 | 172,222,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,868 | py | from random import randint
from numpy import array
from numpy import argmax
from numpy import array_equal
import tensorflow as tf
import numpy as np
import random
import math
#from matplotlib import pyplot as plt
import os
import copy
import time
from influxdb import InfluxDBClient
from tensorflow.python.layers import core as layers_core
from tensorflow.python import debug as tf_debug
import pandas as pd
def generate_sin_wave(no_of_samples):
"""pass the number of samples you want to generate"""
samples=no_of_samples/10
time = np.arange(0, samples, 0.1)
sin_wave_data = np.sin(time)
return sin_wave_data
def read_input(arguments):
# timestamps
end=start_time
start=end-arguments.limit*arguments.interval
ct = 0
timestamps = []
values = []
lasttimestamps = []
client = InfluxDBClient(host='localhost',port='8086',database=arguments.data_base)
if arguments.multiple_tables:
for tab in arguments.tables:
query = "SELECT time,"+arguments.parameter+" FROM \""+tab+"\" WHERE time > "+str(int(start))+"s AND time < "+str(int(end))+"s;"
#query = "SELECT time,"+arguments.parameter+" FROM \""+tab+"\" ORDER BY DESC LIMIT %d"%(arguments.limit)
output = client.query(query,epoch='s')
output = np.array(output.raw["series"][0]["values"]).transpose()
tm = list(output[0])
val = list(output[1])
print("value",len(val))
lasttimestamps.append(tm[-1])
timestamps.append(tm)
ct=ct+1
#fills missing timestamps with mean value
lasttm = tm[-1]
i = 0
while tm[i] != lasttm:
if i != 0 and tm[i] - tm[i-1] >= 2*arguments.interval:
mean = (val[i] + val[i-1])/2
missingPoints = int((tm[i] - tm[i-1])/arguments.interval)
for j in range(missingPoints-1):
val.insert((i+1), mean)
i = i+1
i = i+1
values.append(val)
while True:
for i in range(len(values)):
for j in range(len(values)):
if len(values[i]) > len(values[j]):
values[i].pop(0)
if all(len(v) == len(values[0]) for v in values):
break
datapoints = np.vstack((values))
print (datapoints.shape)
datapoints = datapoints.T
datapoints = pd.DataFrame(datapoints)
return datapoints, lasttimestamps
else:
for param in arguments.parameter:
query = "SELECT time,"+param+" FROM \""+arguments.tables[0]+"\" WHERE time > "+str(int(start))+"s AND time < "+str(int(end))+"s;"
#query = "SELECT time,"+param+" FROM \""+arguments.tables+"\" ORDER BY DESC LIMIT %d"%(arguments.limit)
output = client.query(query,epoch='s')
output = np.array(output.raw["series"][0]["values"]).transpose()
tm = list(output[0])
val = list(output[1])
print("value",len(val))
lasttimestamps.append(tm[-1])
timestamps.append(tm)
ct=ct+1
#fills missing timestamps with mean value
lasttm = tm[-1]
i = 0
while tm[i] != lasttm:
if i != 0 and tm[i] - tm[i-1] >= 2*arguments.interval:
mean = (val[i] + val[i-1])/2
missingPoints = int((tm[i] - tm[i-1])/arguments.interval)
for j in range(missingPoints-1):
val.insert((i+1), mean)
i = i+1
i = i+1
values.append(val)
while True:
for i in range(len(values)):
for j in range(len(values)):
if len(values[i]) > len(values[j]):
values[i].pop(0)
if all(len(v) == len(values[0]) for v in values):
break
datapoints = np.vstack((values))
print (datapoints.shape)
datapoints = datapoints.T
datapoints = pd.DataFrame(datapoints ,columns =arguments.parameter)
return datapoints, lasttimestamps
#write function
def write_data(arguments,predicted_list):
import os
ti=arguments.interval
np_predicted_list = np.array(predicted_list)
print( "writing")
#arguments.multiple_tables =True
if arguments.multiple_tables:
for i in range(0, len(predicted_list[0])):
var = np_predicted_list[:,i]
t=arguments.last_ts[i]
for j in var:
t = int(t)+ti
print( t)
os.system("docker-compose exec influxdb curl -i -silent -XPOST 'http://localhost:8086/write?db=%s&precision=s' --data-binary '%s,host=%s value=%s %d'" % (arguments.data_base,arguments.w_measurement[i],arguments.host,j,t))
print("docker-compose exec influxdb curl -i -silent -XPOST 'http://localhost:8086/write?db=%s&precision=s' --data-binary '%s,host=%s value=%s %d'" % (arguments.data_base,arguments.w_measurement[i],arguments.host,j,t))
return
else:
t=arguments.last_ts[0]
for i in range(0, len(predicted_list[0])):
var = np_predicted_list[:,i]
for j in var:
t = int(t)+ti
print (t)
os.system("docker-compose exec influxdb curl -i -silent -XPOST 'http://localhost:8086/write?db=%s&precision=s' --data-binary '%s,host=%s %s=%s %d'" % (arguments.data_base,arguments.w_measurement[0],arguments.host,arguments.w_parameters[i],j,t))
return
def seperate_train_test_datasets(x, y, batch_size, input_seq_len, output_seq_len, number_of_test_batch_sets):
last_test_datapoint_index = input_seq_len+output_seq_len+(batch_size*number_of_test_batch_sets)
total_datapoints = len(x)
#checking the ratio between train and test dataset size
if total_datapoints*.25 <= last_test_datapoint_index:
import warnings
warnings.warn("Number of test datapoints is more than 25% of total number of datapoints")
assert (total_datapoints*.5 >= last_test_datapoint_index), "Number of test datapoints is more than 50% of total number of datapoints"
x_test = x[:last_test_datapoint_index]
y_test = y[:last_test_datapoint_index]
x_train = x[last_test_datapoint_index:]
y_train = y[last_test_datapoint_index:]
return x_train, y_train, x_test, y_test
def generate_train_batches(x, y, batch_size, input_seq_len, output_seq_len,time_major,seq_batch,last_batch_no,n_in_features,n_out_feature):
import numpy as np
total_start_points = len(x) - input_seq_len - output_seq_len
#For creating the batches from sequential or random indices
if seq_batch:
if last_batch_no >= total_start_points-batch_size:
last_batch_no=0
#Selecting successive indices for creating batches
start_x_idx = np.arange(last_batch_no, last_batch_no + batch_size)
last_batch_no +=len(start_x_idx)
else:
#Selecting random indices for creating batches
start_x_idx = np.random.choice(range(total_start_points), batch_size, replace = False)
last_batch_no = 0
input_batch_idxs = [(range(i, i+input_seq_len)) for i in start_x_idx]
input_seq = np.take(x, input_batch_idxs, axis = 0)
output_batch_idxs = [(range(i+input_seq_len, i+input_seq_len+output_seq_len)) for i in start_x_idx]
output_seq = np.take(y, output_batch_idxs, axis = 0)
#Outputs the batches in time major (shape = (time,batchsize,features), if selected)
if time_major:
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features)).transpose((1,0,2))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature)).transpose((1,0,2))
return input_seq, output_seq, last_batch_no # in shape: (time_steps, batch_size, feature_dim)
else:
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature))
return input_seq, output_seq,last_batch_no
def generate_test_batches(x, y, batch_size, input_seq_len, output_seq_len,time_major,n_in_features,n_out_feature):
import numpy as np
total_start_points = len(x) - input_seq_len - output_seq_len
#Selecting random indices for creating batches
start_x_idx = np.random.choice(range(total_start_points), batch_size)
input_batch_idxs = [(range(i, i+input_seq_len)) for i in start_x_idx]
input_seq = np.take(x, input_batch_idxs, axis = 0)
output_batch_idxs = [(range(i+input_seq_len, i+input_seq_len+output_seq_len)) for i in start_x_idx]
output_seq = np.take(y, output_batch_idxs, axis = 0)
#Outputs the batches in time major (shape = (time,batchsize,features), if selected)
if time_major:
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features)).transpose((1,0,2))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature)).transpose((1,0,2))
return input_seq, output_seq # in shape: (time_steps, batch_size, feature_dim)
else:
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature))
return input_seq, output_seq # in shape: (batch_size, time_steps, feature_dim)
def generate_inference_batches (x, y, batch_size, input_seq_len ,time_major,n_in_features,n_out_feature):
import numpy as np
output_seq_len =input_seq_len
total_start_points = len(x) - input_seq_len -output_seq_len
start_x_idx =np.arange(total_start_points - batch_size ,total_start_points)
""" for production we need to pass the last data as input so we can get the next output"""
if n_in_features != n_out_feature:
input_batch_idxs_b = [(range(i+input_seq_len, i+input_seq_len+output_seq_len)) for i in start_x_idx]
last_batch = np.take(x, input_batch_idxs_b, axis = 0)
print("last batch")
else:
last_batch=None
#Selecting random indices for creating batches
input_batch_idxs = [(range(i, i+input_seq_len)) for i in start_x_idx]
input_seq = np.take(x, input_batch_idxs, axis = 0)
output_batch_idxs = [(range(i+input_seq_len, i+input_seq_len+output_seq_len)) for i in start_x_idx]
output_seq = np.take(y, output_batch_idxs, axis = 0)
#Outputs the batches in time major (shape = (time,batchsize,features), if selected)
if time_major:
if n_in_features != n_out_feature:
last_batch =(last_batch.reshape(last_batch.shape[0],last_batch.shape[1],n_in_features)).transpose((1,0,2))
#last_batch=last_batch[:,-1:,:]
print(last_batch.shape)
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features)).transpose((1,0,2))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature)).transpose((1,0,2))
return input_seq,output_seq,last_batch # in shape: (time_steps, batch_size, feature_dim)
else:
if n_in_features != n_out_feature:
last_batch =(last_batch.reshape(last_batch.shape[0],last_batch.shape[1],n_in_features)).transpose((1,0,2))
#last_batch=last_batch[-1:,:,:]
input_seq =(input_seq.reshape(input_seq.shape[0],input_seq.shape[1],n_in_features))
output_seq=(output_seq.reshape(output_seq.shape[0],output_seq.shape[1],n_out_feature))
return input_seq,output_seq,last_batch # in shape: (batch_size, time_steps, feature_dim)
def batch_data_ploting(previous_data,prediction_data,target_data):
test_in=previous_data
pred_outputs=prediction_data
test_out=target_data
#previous data encoder input data
previous_a=test_in[:,0,:]
previous_b =test_in[-1,:,:]
previous_in_seq=np.concatenate((previous_a[:-1],previous_b) ,axis=0)
#Prediction data
prediction_a=pred_outputs[:,0,:]
prediction_b =pred_outputs[-1,:,:]
prediction_seq=np.concatenate((prediction_a[:-1],prediction_b) ,axis=0)
#real target data
target_a=test_out[:,0,:]
target_b =test_out[-1,:,:]
target_seq=np.concatenate((target_a[:-1],target_b) ,axis=0)
target_seq=scaler.inverse_transform(target_seq)
prediction_seq=scaler.inverse_transform(prediction_seq)
previous_in_seq=scaler.inverse_transform(previous_in_seq)
l1, = plt.plot(range(len(previous_in_seq),len(previous_in_seq)+len(prediction_seq)), prediction_seq, 'b', label = 'prediction_X')
l2, = plt.plot(range(len(previous_in_seq),len(previous_in_seq)+len(target_seq)), target_seq, 'y', label = 'actual_data_Y')
l3, = plt.plot(range(len(previous_in_seq)), previous_in_seq, 'r', label = 'previous_data')
plt.legend(handles = [l1, l2,l3], loc = 'upper left')
plt.show()
def last_batch_data_ploting(previous_data,prediction_data,scaler,target_data,time_major,color):
if time_major:
actual_previous=previous_data[:,-1,:]
final_batch_output=prediction_data[:,-1,:]
else:
actual_previous=previous_data[-1,:,:]
final_batch_output=prediction_data[-1,:,:]
if scaler:
actual_previous = scaler.inverse_transform(actual_previous)
final_batch_output = scaler.inverse_transform(final_batch_output)
if target_data is not None:
if time_major:
actual_target=target_data[:,-1,:]
else :
actual_target=target_data[-1,:,:]
if scaler:
actual_target = scaler.inverse_transform(actual_target)
l1, = plt.plot(range(len(actual_previous),len(actual_previous)+len(final_batch_output)), final_batch_output, color, label = 'prediction_X')
l2, = plt.plot(range(len(actual_previous),len(actual_previous)+len(actual_target)), actual_target, 'y', label = 'actual_data_Y')
l3, = plt.plot(range(len(actual_previous)), actual_previous, 'r', label = 'previous_data')
plt.legend(handles = [l1, l2,l3], loc = 'upper left')
plt.show()
else:
l1, = plt.plot(range(len(actual_previous),len(actual_previous)+len(final_batch_output)), final_batch_output, 'k', label = 'prediction_X')
l3, = plt.plot(range(len(actual_previous)), actual_previous, 'r', label = 'previous_data')
plt.legend(handles = [l1,l3], loc = 'upper left')
plt.show()
def load_data(arguments):
#load dataset
#df = pd.read_excel('./sin_wave.xlsx')
#df = pd.read_excel('./Incident_monthly_full_data.xlsx')
df,_ = read_input(arguments)
#df =pd.read_csv("all_load_average_data.csv" , index_col='time')
#df=df['netdata.system.load.load1.csv']
df.fillna(0 ,inplace=True)
print(df.tail())
#df.drop(['netdata.system.io.in.csv','netdata.system.io.out.csv','netdata.system.net.received.csv','netdata.system.net.sent.csv'] , inplace =True ,axis =1)
#df_Y=df[2].values
#print("target " ,df_Y[:5])
df_train=df[:].values
print("data size",len(df))
X = df_train.reshape(-1,1)
Y = df_train.reshape(-1,1)
print("X shape :{} , Y Shape {}".format(X.shape,Y.shape))
from sklearn.preprocessing import MinMaxScaler
scaler_y= MinMaxScaler()
#scaler=None
#scaler = MinMaxScaler()
Y =scaler_y.fit_transform(Y)
X =scaler_y.fit_transform(X)
x_train=X
y_train=Y
x_test =0
y_test =0
#Create train and test datasets
#x_train, y_train, x_test, y_test = seperate_train_test_datasets(X, Y, arguments.batch_size, arguments.input_seq_len, arguments.output_seq_len, arguments.number_of_test_batch_sets)
return x_train, y_train, x_test, y_test, scaler_y
class parameters(object):
def __init__(self):
self.data_base="aadata"
self.parameter= ["value"] #"cpu_utilization","Disk_utilization","load_average",
#"process_running","process_blocked","Free_Ram",
#"context_switches","forks_started","proccess_active",
#"cpu_iowait"]#,"jitter_average","interrupts" ]
self.tables = [
"sqldata"
] #multiple input measurements can be added
self.w_measurement = [
"sqldata_Pred2"
] #multiple output measurements can be added
self.host ="dedusgfa003"
self.interval = 86400 #seconds
self.limit = 700 #30000
self.multiple_tables = False
self.w_parameters = ["sql_e_Pred"]
self.decoder_input_as_target=False
self.use_attention=True
self.bidir =True
self.inference = False
self.time_major = True
self.seq_batch = False
self.last_batch_no = 0
self.n_in_features=1
self.n_out_feature=1
self.input_seq_len =10
self.output_seq_len =10
self.batch_size =64
self.number_of_test_batch_sets =0
self.last_ts = [start_time]
self.learning_rate = 0.003
self.lambda_l2_reg = 0.002
## Network Parameters
self.in_n_features=1
self.out_feature=1
# size of LSTM Cell
self.hidden_dim = 128
# num of input signals
self.input_dim = self.n_in_features
# num of output signals
self.output_dim = self.n_out_feature
# num of stacked lstm layers
# gradient clipping - to avoid gradient exploding
self.GRADIENT_CLIPPING = 2.5
self.max_gradient_norm = 5.0
self.number_of_layers=4
self.keep_prob =1
self.epochs =1000#100000 #500#10000
self.keep_prob = 1
self.ckpt_dir = "checkpoints/"
self.enc_length = self.input_seq_len
self.dec_length = self.output_seq_len
class Model(object):
def __init__(self, hparam):
self.enc_inp = tf.placeholder(tf.float32, shape=(None ,None,hparam.in_n_features),name="enc_inp")
#enc_cell=tf.nn.rnn_cell.BasicLSTMCell(hparam.hidden_dim)
self.enc_seq_len = tf.placeholder(tf.int32, [hparam.batch_size])
def get_a_cell(hidden_dim, keep_prob):
lstm = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim)
drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
with tf.name_scope('lstm'):
stacked_enc = tf.nn.rnn_cell.MultiRNNCell([get_a_cell(hparam.hidden_dim,hparam.keep_prob) for _ in range(hparam.number_of_layers)])
#bidirectional RNN
if hparam.bidir :
encoder_outputs,all_final_states = tf.nn.bidirectional_dynamic_rnn(
stacked_enc,stacked_enc,self.enc_inp, time_major=True, dtype=tf.float32)
encoder_outputs=tf.concat(encoder_outputs, -1)
if arguments.number_of_layers == 1:
encoder_state = all_final_states
else:
# alternatively concat forward and backward states
encoder_state = []
for layer_id in range(arguments.number_of_layers):
con_h= tf.concat((all_final_states[0][layer_id].h, all_final_states[1][layer_id].h), 1)
con_c= tf.concat((all_final_states[0][layer_id].c, all_final_states[1][layer_id].c), 1)
final_states= tf.contrib.rnn.LSTMStateTuple(c=con_c, h=con_h)
encoder_state.append(final_states)
#encoder_state.append(all_final_states[0][layer_id]) # forward
#encoder_state.append(all_final_states[1][layer_id]) # backward
encoder_state = tuple(encoder_state)
with tf.name_scope('lstm_decoder'):
stacked_decoder = tf.nn.rnn_cell.MultiRNNCell([get_a_cell(hparam.hidden_dim*2,1) for _ in range(hparam.number_of_layers)])
print("Using BID RNN")
else:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(stacked_enc, self.enc_inp,sequence_length=self.enc_seq_len, dtype=tf.float32, time_major=hparam.time_major)
with tf.name_scope('lstm_decoder'):
stacked_decoder = tf.nn.rnn_cell.MultiRNNCell([get_a_cell(hparam.hidden_dim,1) for _ in range(hparam.number_of_layers)])
##Decoder
self.decoder_targets = tf.placeholder(tf.float32, shape=(None,None,hparam.out_feature),name='decoder_targets')
self.decoder_lengths = tf.placeholder(tf.int32, shape=(hparam.batch_size), name="decoder_length")
## decoder input as decoder target
if hparam.decoder_input_as_target:
print("decoder_input_as_target")
decoder_inputs =tf.concat(((tf.zeros_like(self.decoder_targets[:1], dtype=tf.float32, name="GO")),self.decoder_targets[:-1]),axis=0)
else :
print("enc_inp_input_as_target")
decoder_inputs =tf.concat(((tf.zeros_like(self.enc_inp[:1], dtype=tf.float32, name="GO")),self.enc_inp[:-1]),axis=0)
#Decoder Cell
"""def get_a_cell(hidden_dim):# keep_prob):
lstm = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim)
#drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return lstm
with tf.name_scope('lstm_decoder'):
stacked_decoder = tf.nn.rnn_cell.MultiRNNCell([get_a_cell(hparam.hidden_dim) for _ in range(hparam.number_of_layers)])
"""
#Output layer
projection_layer = layers_core.Dense(hparam.out_feature, use_bias=False)
##Training Decoder
with tf.variable_scope("decode"):
# Helper
helper = tf.contrib.seq2seq.TrainingHelper(decoder_inputs, self.decoder_lengths,name="helper", time_major=hparam.time_major)
#Attention
if hparam.use_attention:
print("Using Attention")
if hparam.time_major:
attention_states =tf.transpose(encoder_outputs, [1, 0, 2])
else :
attention_states =encoder_outputs
attention_mechanism =tf.contrib.seq2seq.BahdanauAttention(hparam.hidden_dim, attention_states,memory_sequence_length=self.enc_seq_len) # Create an attention mechanism
stacked_decoder = tf.contrib.seq2seq.AttentionWrapper(stacked_decoder, attention_mechanism,attention_layer_size=hparam.hidden_dim)
initial_state = stacked_decoder.zero_state(hparam.batch_size, tf.float32).clone(cell_state=encoder_state)
else:
initial_state = encoder_state
decoder = tf.contrib.seq2seq.BasicDecoder(stacked_decoder, helper, initial_state, output_layer=projection_layer)
final_outputs, _final_state, _final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder,output_time_major=hparam.time_major)
self.logits = final_outputs.rnn_output
"""
##Inference Decoder
with tf.name_scope('lstm_inference_decoder'):
inference_stacked_decoder=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(hparam.hidden_dim) for _ in range(hparam.number_of_layers)])
with tf.variable_scope("decode", reuse=True):
# Inference Helper
def initialize_fn():
finished = tf.tile([False], [hparam.batch_size])
start_inputs = tf.fill([hparam.batch_size, hparam.out_feature], -1.)
print("start_inputs___",start_inputs.shape)
return (finished, start_inputs)
def sample_fn(time, outputs, state):
return tf.constant([0])
def next_inputs_fn(time, outputs, state, sample_ids):
finished = (time+1) >= hparam.output_seq_len
#finished =time >= output_seq_len
next_inputs = outputs
print("outputs_______",outputs.shape)
return (finished, next_inputs, state)
inference_helper = tf.contrib.seq2seq.CustomHelper(initialize_fn=initialize_fn,
sample_fn=sample_fn,
next_inputs_fn=next_inputs_fn)
#Attention
if hparam.use_attention:
inference_stacked_decoder = tf.contrib.seq2seq.AttentionWrapper(inference_stacked_decoder, attention_mechanism,attention_layer_size=hparam.hidden_dim)
initial_state = inference_stacked_decoder.zero_state(hparam.batch_size, tf.float32).clone(cell_state=encoder_state)
else:
initial_state = encoder_state
inference_decoder = tf.contrib.seq2seq.BasicDecoder(inference_stacked_decoder, inference_helper, initial_state, output_layer=projection_layer)
inference_final_outputs, inference_final_state, inference_final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(inference_decoder,output_time_major=hparam.time_major)
self.inference_logits = inference_final_outputs.rnn_output
"""
# Train
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# Train
#self.loss =tf.losses.mean_squared_error(labels=self.decoder_targets,logits=self.logits))
# Train
with tf.variable_scope('train_Loss'):
self.loss_ = tf.reduce_mean(tf.nn.l2_loss(self.logits-self.decoder_targets))
tf.summary.scalar('train_loss', self.loss_)
#self.inference_loss_ =tf.reduce_mean( tf.nn.l2_loss(self.inference_logits-self.decoder_targets))
#tf.tf.summary.scalar('inference_loss', self.inference_loss)
"""
tf.summary.scalar(
"train_Loss",
self.loss_
)
tf.summary.scalar(
"inference_loss",
self.inference_loss_
)
# L2 loss"""
reg_loss = 0
for tf_var in tf.trainable_variables():
if not ("Bias" in tf_var.name or "Output_" in tf_var.name):
reg_loss += tf.reduce_mean(tf.nn.l2_loss(tf_var))
tf.summary.scalar('reg_loss',reg_loss)
self.loss = self.loss_ + hparam.lambda_l2_reg * reg_loss
#self.inference_loss= self.inference_loss_ + hparam.lambda_l2_reg * reg_loss
tf.summary.scalar('train_loss_l2', self.loss)
#tf.summary.scalar('inference_loss_l2', self.inference_loss)
# Optimization
optimizer = tf.train.AdamOptimizer(hparam.learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,name='Adam')
#self.train_op=optimizer.minimize(self.loss)
self.train_op=optimizer.minimize(self.loss, global_step=self.global_step)
self.saver = tf.train.Saver()
self.merged = tf.summary.merge_all()
def create_model(sess, arguments):
model = Model(arguments)
sess.run(tf.global_variables_initializer())
#ckpt = tf.train.get_checkpoint_state(arguments.ckpt_dir)
#if ckpt and ckpt.model_checkpoint_path:
#print("Restoring old model parameters from %s" %ckpt.model_checkpoint_path)
model.saver.restore(sess,'./model_20000')
#else:
#print("Created new model.")
return model
def train_inference(sess, model, arguments, input_batch, out_seq):
#print ("Started Inference")
#input_batch,out_seq = generate_inference_batches(x_train, y_train, arguments.batch_size, arguments.input_seq_len, arguments.time_major)
dec_targets = np.zeros((out_seq.shape))
feed_dict = {
model.enc_inp:input_batch,
model.decoder_targets:dec_targets, #feeding the targets as zeroes
model.decoder_lengths: np.ones((arguments.batch_size), dtype=int) * arguments.dec_length,
model.enc_seq_len : np.ones((arguments.batch_size), dtype=int) * arguments.enc_length
}
final_preds,inf_loss = sess.run([model.inference_logits,model.inference_loss], feed_dict)
if type(final_preds) == list:
final_preds = final_preds[0]
#print("Start_time" ,start_time)
#pred_outputs = cp_final_preds.copy()
return input_batch,out_seq,final_preds,inf_loss
def validation(sess, model, arguments, x_test, y_test):
#Creating test input and output batches
test_in, test_out= generate_test_batches(x_test, y_test, arguments.batch_size, arguments.input_seq_len, arguments.output_seq_len, arguments.time_major,arguments.n_in_features,arguments.n_out_feature)
test_dec_in =test_out.copy()
test_dec_in =np.zeros((test_dec_in.shape))
test_tar=test_dec_in
feed_dict = {
model.enc_inp:test_in,
model.decoder_targets:test_tar, # feeding the targets as zeroes
model.decoder_lengths: np.ones((arguments.batch_size), dtype=int) * arguments.dec_length,
model.enc_seq_len : np.ones((arguments.batch_size), dtype=int) * arguments.enc_length
}
pred_loss,validation_summary = sess.run([model.loss,model.merged], feed_dict)
return pred_loss,validation_summary
def train(sess, model, arguments, x_train, y_train, x_test, y_test,scaler,train_writer,test_writer):
epoch_num=0
batches_to_comp_all_data = int(arguments.limit/arguments.batch_size)
train_time=time.time()
print ("Training the model")
for t in range(arguments.epochs):
train_input_batch, train_output_batch, arguments.last_batch_no = generate_train_batches(x_train, y_train, arguments.batch_size, arguments.input_seq_len, arguments.output_seq_len, arguments.time_major, arguments.seq_batch, arguments.last_batch_no,arguments.n_in_features,arguments.n_out_feature)
#train_input_batch, train_output_batch, arguments.last_batch_no = generate_train_batches(x_train, y_train, arguments.batch_size, arguments.input_seq_len, arguments.output_seq_len, arguments.time_major, arguments.seq_batch, arguments.last_batch_no,arguments.n_features)
feed_dicts = {
model.enc_inp:train_input_batch,
model.decoder_targets:train_output_batch,
model.decoder_lengths: np.ones((arguments.batch_size), dtype=int) * arguments.dec_length,
model.enc_seq_len : np.ones((arguments.batch_size), dtype=int) * arguments.enc_length
}
_, loss_value,global_steps,train_summary = sess.run([model.train_op, model.loss,model.global_step,model.merged], feed_dict=feed_dicts)
# val_loss,validation_summary = validation(sess, model, arguments, x_test, y_test)
val_loss=None
#train_loses.append(loss_value)
if arguments.epochs % batches_to_comp_all_data ==0:
epoch_num +=1
#_,_, _,inf_loss,test_summary = inference(sess, model, arguments, x_train, y_train)
train_writer.add_summary(train_summary, global_steps)
#test_writer.add_summary(validation_summary, global_steps)
test_loss=None
#inf_loss=None
#valid_test_loses.append(test_loss)
if t %100 ==0:
print("Iteration/Total_Iteration :: {}/{} ,global_steps {} Train_loss_value::{} , Validation_loss {} ,Time :: {}".format(t,arguments.epochs,global_steps,loss_value,val_loss,(time.time() - train_time)))
if global_steps %1000 ==0 and global_steps >=1000:
model_name='./model_' + str(global_steps)
model.saver.save(sess,model_name)
'''
for i in range(0,18):
global start_time
start_time =1550217600 +(i*86400)
arguments.last_ts =[start_time]
x_train, y_train, x_test, y_test, scaler = load_data(arguments)
input_batch, out_seq, predictions, inf_loss = inference(sess, model, arguments, x_train, y_train)
if arguments.time_major:
final_batch_output = predictions[:,-1,:]
else:
final_batch_output = predictions[-1,:,:]
final_batch_output = scaler.inverse_transform(final_batch_output)
# write_data(arguments,final_batch_output)
start_time =1550217600
'''
#model.saver.save(sess, arguments.ckpt_dir)
model.saver.save(sess, './model')
print("Checkpoint saved at model_load")
#start_time=start_time_value
train_time=time.time() -train_time
return train_input_batch,train_output_batch,_,train_time,loss_value,test_loss,global_steps
def inference(sess, model, arguments, x_train, y_train):
print ("Started Inference")
input_batch,out_seq,last_batch = generate_inference_batches(x_train, y_train, arguments.batch_size, arguments.input_seq_len, arguments.time_major,arguments.n_in_features,arguments.n_out_feature)
#print("in", input_batch[:,-1,:] )
#print("out",out_seq[:,-1,:])
if arguments.n_in_features != arguments.n_out_feature:
enc_in = last_batch
print("singnal are not same input is last batch")
else:
enc_in = out_seq
print("features are equal")
dec_targets = np.zeros((out_seq.shape))
feed_dict = {
model.enc_inp:enc_in,
model.decoder_targets:dec_targets, #feeding the targets as zeroes
model.decoder_lengths: np.ones((arguments.batch_size), dtype=int) * arguments.dec_length,
model.enc_seq_len : np.ones((arguments.batch_size), dtype=int) * arguments.enc_length,
#model.keep_prob : 1
}
"""if arguments.loopback:
final_preds, inf_loss = sess.run([model.inference_logits,model.inference_loss], feed_dict)
else:"""
final_preds, inf_loss = sess.run([model.logits, model.loss], feed_dict)
if type(final_preds) == list:
final_preds = final_preds[0]
return input_batch,out_seq, final_preds, inf_loss
def capture_info(arguments,train_time,loss_value,test_loss):
f=open("meta_data.txt","a+")
f.write("read table : %s\n write table :%s \n interval : %d\n limit :%d \n decoder_input_as_target : %r\n attention: %r \n inference: %r \n time major : %r \n seq_batch: %r\ninput seq length:%d\n batch size:%d\n learning rate:%f \n hidden_dim:%d\n epochs:%d \ntrain_time =%d \nloss_value=%f \n\n\n\n"%(arguments.tables[0],arguments.w_measurement[0],arguments.interval,arguments.limit,arguments.decoder_input_as_target,arguments.use_attention,arguments.inference,arguments.time_major,arguments.seq_batch,arguments.input_seq_len,arguments.batch_size,arguments.learning_rate,arguments.hidden_dim,arguments.epochs,train_time,loss_value))
f.close()
return
#start_time_value=1535412645 #time.time() August 13, 2018 3:30:45 AM
start_time =1543622400 #+(i*86400) #time.time() # start_time_value
tf.reset_default_graph()
sess = tf.InteractiveSession()
train_writer = tf.summary.FileWriter("logs/train", sess.graph)
test_writer = tf.summary.FileWriter("logs/test", sess.graph)
arguments = parameters()
model = create_model(sess, arguments)
x_train, y_train, x_test, y_test, scaler = load_data(arguments)
#print("x_train and Y_train" ,x_train[-5:] , "\n" ,y_train[-5:])
train_input_batch,train_output_batch,train_outputs,train_time,loss_value,test_loss,global_steps=train(sess, model, arguments, x_train, y_train, x_test, y_test,scaler,train_writer,test_writer)
for i in range(0,18):
global strat_time
start_time =1543622400 +(i*86400) #time.time() # start_time_value
arguments.last_ts =[start_time]
# tf.reset_default_graph()
# sess = tf.InteractiveSession()
# train_writer = tf.summary.FileWriter("logs/train", sess.graph)
# test_writer = tf.summary.FileWriter("logs/test", sess.graph)
# arguments = parameters()
# model = create_model(sess, arguments)
x_train, y_train, x_test, y_test, scaler = load_data(arguments)
# print("test: " , x_test[:5] , "Y ", y_test[:5])
#train_input_batch,train_output_batch,train_outputs,train_time,loss_value,test_loss,global_steps=train(sess, model, arguments, x_train, y_train, x_test, y_test,scaler,train_writer,test_writer)
input_batch, out_seq, predictions, inf_loss = inference(sess, model, arguments, x_train, y_train)
if arguments.time_major:
final_batch_output = predictions[:,-1,:]
else:
final_batch_output = predictions[-1,:,:]
final_batch_output = scaler.inverse_transform(final_batch_output)
write_data(arguments,final_batch_output)
#color='g' # g r b k.d(45k e)
#last_batch_data_ploting(input_batch,predictions,scaler,out_seq,arguments.time_major,color )
#capture_info(arguments,train_time,loss_value,test_loss)
print("Final Loses::: , Train_loss {} , Test_loss {} , Infrence_loss {}, Global_steps {}".format(loss_value,test_loss,inf_loss,global_steps))
sess.close()
| [
"root@instance-2.us-east1-b.c.ambient-odyssey-225004.internal"
] | root@instance-2.us-east1-b.c.ambient-odyssey-225004.internal |
201d528f01419e9342de599c7b739aa878027e60 | fbd19c4627fd800a1d32fc2d0b5eebc5c3989d51 | /Algebra2/June2016/question3.py | 1c0e34511c34b66227b9a0e636f28b066a65af51 | [] | no_license | AhhhHmmm/Regents-Problem-Generator | 147df493ff70f7795e9620336691642a18705d70 | 12e1b21cad6c1c9f6dcc9231d4e7588998b72794 | refs/heads/master | 2021-04-28T16:15:30.738142 | 2018-02-19T03:41:37 | 2018-02-19T03:41:37 | 122,011,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,298 | py | import random
import string
import pyperclip
def generateQuestion():
problemText = '''\paragraph{\\arabic{qnumber}.} \\hspace{-4mm} Given $i$ is the imaginary unit, $\\displaystyle (NUMBER1NUMBER2LETTER1i)^2$ in simplest form is:
\\\\
{
\\renewcommand{\\arraystretch}{2.0}\\begin{tabular}{p{2in} p{2in}}
\\circled{1} CHOICE1
& \\circled{3} CHOICE3 \\\\
\\circled{2} CHOICE2
& \\circled{4} CHOICE4 \\\\
\\end{tabular}
}
% Source: Source
\\stepcounter{qnumber}'''
slots = ['NUMBER1', 'SIGN', 'NUMBER2', 'LETTER1', 'CHOICE1', 'CHOICE2', 'CHOICE3', 'CHOICE4',]
letters = list(string.ascii_lowercase)
letters.remove('i')
number1 = random.choice([-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10])
number2 = random.choice([-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10])
letter1 = random.choice(letters)
# number1 = -2
# number2 = 1
# letter1 = 'y'
if number2 < 0:
sign = '-'
else:
sign = '+'
# choices
if abs(number2 ** 2) == 1:
firstPlaceHolder = ''
else:
firstPlaceHolder = number2 ** 2
# choice1
if number1 * number2 < 0:
choiceSign = '-'
else:
choiceSign = '+'
choiceA = '${}{}^2{}{}{}i+{}$'.format(firstPlaceHolder,letter1, sign, abs(2 * number1 * number2), letter1, number1 ** 2)
#choice2
if number1 * number2 < 0:
choiceSign = '-'
else:
choiceSign = '+'
choiceB = '$-{}{}^2{}{}{}i+{}$'.format(firstPlaceHolder,letter1, sign, abs(2 * number1 * number2), letter1, number1 ** 2)
choiceC = '$-{}{}^2+{}$'.format(firstPlaceHolder, letter1, number1 ** 2)
choiceD = '${}{}^2+{}$'.format(firstPlaceHolder, letter1, number1 ** 2)
# shufflechoices
choices = [choiceA, choiceB, choiceC, choiceD]
random.shuffle(choices)
choice1 = choices[0]
choice2 = choices[1]
choice3 = choices[2]
choice4 = choices[3]
for slot in slots:
replacementText = str(eval(slot.lower()))
if slot == 'NUMBER2' and int(replacementText) > 0:
replacementText = '+' + replacementText
if slot == 'NUMBER2' and int(replacementText) == 1:
replacementText = '+'
elif slot == 'NUMBER2' and int(replacementText) == -1:
replacementText = '-'
problemText = problemText.replace(slot, replacementText)
return problemText
if __name__ == '__main__':
questionText = generateQuestion()
pyperclip.copy(questionText)
print(questionText) | [
"cary.e.riina@gmail.com"
] | cary.e.riina@gmail.com |
3eb9f8311c19907bd05c497e4d9e866c946320af | 53c1f1e3fa4cbba4466a747fb136e6927def5bbe | /PollSite/urls.py | f88d1483bf5eb98680fb6482fca8bae1f498d1ab | [] | no_license | atulya22/polling-app | fed4515133416b1a3f2e05daeb306328e3072de4 | 856885d49864ca2eb9ca5c8118e4699988b73b78 | refs/heads/master | 2022-10-12T06:22:47.864919 | 2020-06-12T02:59:00 | 2020-06-12T02:59:00 | 267,447,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """PollSite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls'))
]
| [
"atulya.shetty@hotmail.com"
] | atulya.shetty@hotmail.com |
3e91fc3a8877a7a3cadfa72e6a20735be6943aa6 | 1a2991d366ea6ed037e1cae7c9b8ac434d8ae7a7 | /analyze_critical_cache_size_fib_1a.py | dbaae94dcbe34ebebde82293a48e986924704a51 | [] | no_license | benjamingarrett/table_maker100 | 15c91be26e666894f3a5b4df0175e7a3a5d8fab9 | fd2725188ed8459950f112a312de7e5970f4cb5f | refs/heads/master | 2022-06-16T14:48:59.027859 | 2022-05-26T23:52:06 | 2022-05-26T23:52:06 | 188,909,736 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | import csv,os,sys,matplotlib as plt
from math import log
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
def running_max(data,n0,n2):
running_max_x = [data[n0][0]]
running_max_y = [data[n0][1]]
j = n0
while j<=n2:
running_max_x.append(data[j][0])
if data[j][1] > running_max_y[-1]:
running_max_y.append(data[j][1])
else:
running_max_y.append(running_max_y[-1])
j+=1
return running_max_x, running_max_y
def left_endpoints(x,y,n1):
mx_x=[]
mx_y=[]
j=0
while x[j]<=n1:
if y[j] > y[j-1]:
mx_x.append(x[j])
mx_y.append(y[j])
j+=1
return mx_x, mx_y
def right_endpoints(x,y,n1):
mn_x=[]
mn_y=[]
j=0
while x[j]<=n1:
if y[j] > y[j-1]:
mn_x.append(x[j-1])
mn_y.append(y[j-1])
j+=1
return mn_x, mn_y
def semilogx_regression(x,y,basex=2):
lgx = [log(k,basex) for k in x]
m, b, r, p, err = stats.linregress(lgx,y)
return m, b
def second_method(mx_x,mx_y,mn_x,mn_y,m_upper,b_upper,m_lower,b_lower,basex=2,eps=0.0001):
m_upper_best = m_upper
success = False
#success = True
while not success:
j = 0
m_upper_best += eps
while j<len(mx_x):
if m_upper_best*log(mx_x[j],basex)+b_upper < mx_y[j]:
success = False
break
else:
success = True
j += 1
#print('{} {}'.format(success,m_upper_best))
m_lower_best = m_lower
success = False
#success = True
while not success:
j = 0
m_lower_best -= eps
while j<len(mn_x):
if m_lower_best*log(mn_x[j],basex)+b_lower > mn_y[j]:
success = False
break
else:
success = True
j += 1
#print('{} {}'.format(success,m_lower_best))
return m_upper_best, b_upper, m_lower_best, b_lower
def confirm_conjecture(m_upper,b_upper,m_lower,b_lower,data_x,data_y,basex=2):
upper_errors = 0
lower_errors = 0
j = 0
while j<len(data_x):
if data_y[j] > m_upper*log(data_x[j],basex)+b_upper:
upper_errors += 1
if data_y[j] < m_lower*log(data_x[j],basex)+b_lower:
lower_errors += 1
j += 1
return upper_errors, lower_errors
lists=[(int(x[0]),int(x[1]),int(x[2])) for x in [ln.rstrip('\n').split(',') for ln in open(sys.argv[1])]]
lg_base=2
# each item in lists is an experimental trial
if len(sys.argv) >= 3:
n0 = int(sys.argv[2])-1 # lowest n
else:
n0 = 0
if len(sys.argv) >= 4:
n1 = int(sys.argv[3])-1 # highest n
else:
n1 = len(lists)-1
if len(sys.argv) >= 5:
n2 = int(sys.argv[4])-1 # confirmation max n
else:
n2 = len(lists)-1
print('using n between {} and {}, confirming to {}'.format(n0,n1,n2))
j = n0
d_x, d_y = [], []
while j<=n2:
d_x.append(lists[j][0])
d_y.append(lists[j][1])
j+=1
running_max_x, running_max_y = running_max(lists,n0,n2)
mx_x, mx_y = left_endpoints(running_max_x,running_max_y,n1)
mn_x, mn_y = right_endpoints(running_max_x,running_max_y,n1)
m_max, b_max = semilogx_regression(mx_x,mx_y,lg_base)
m_min, b_min = semilogx_regression(mn_x,mn_y,lg_base)
bound_x = [lists[n0][0],lists[n1][0]]
conjecture_x = [lists[n0][0],lists[n2][0]]
# method 0 - original regression
upper_y = [m_max*log(lists[n0][0],lg_base)+b_max,m_max*log(lists[n1][0],lg_base)+b_max]
lower_y = [m_min*log(lists[n0][0],lg_base)+b_min,m_min*log(lists[n1][0],lg_base)+b_min]
# method 1 - adjust slope to bound sample data
m_upper_second, b_upper_second, m_lower_second, b_lower_second = second_method(mx_x,mx_y,mn_x,mn_y,m_max,b_max,m_min,b_min)
m_upper_second_r, b_upper_second_r, m_lower_second_r, b_lower_second_r = second_method(running_max_x[0:n1],running_max_y[0:n1],running_max_x[0:n1],running_max_y[0:n1],m_max,b_max,m_min,b_min)
print('using endpoints {} {} {} {}'.format(m_upper_second, b_upper_second, m_lower_second, b_lower_second))
print('Use running max {} {} {} {}'.format(m_upper_second_r, b_upper_second_r, m_lower_second_r, b_lower_second_r))
upper_y_second = [m_upper_second*log(lists[n0][0],lg_base)+b_upper_second, m_upper_second*log(lists[n2][0],lg_base)+b_upper_second]
lower_y_second = [m_lower_second*log(lists[n0][0],lg_base)+b_lower_second, m_lower_second*log(lists[n2][0],lg_base)+b_lower_second]
up_errors_2,lo_errors_2 = confirm_conjecture(m_upper_second,b_upper_second,m_lower_second,b_lower_second,running_max_x,running_max_y)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogx(d_x,d_y,basex=lg_base,label='critical cache size for problem size (original data)')
ax.semilogx(running_max_x,running_max_y,basex=lg_base,label='running max of critical cache size')
ax.semilogx(mx_x,mx_y,'o',basex=lg_base,label='left endpoints of running max')
ax.semilogx(mn_x,mn_y,'*',basex=lg_base,label='right endpoints of running max')
ax.semilogx(bound_x,lower_y,basex=lg_base,label='regression for right endpoints {} {}'.format(m_min,b_min))
ax.semilogx(bound_x,upper_y,basex=lg_base,label='regression for left endpoints {} {}'.format(m_max,b_max))
#ax.semilogx(conjecture_x,lower_y_first,basex=lg_base,label='lower (1st method) {} {} errors {}'.format(m_lower_first,b_lower_first,lo_errors_1))
#ax.semilogx(conjecture_x,upper_y_first,basex=lg_base,label='upper (1st method) {} {} errors {}'.format(m_upper_first,b_upper_first,up_errors_1))
ax.semilogx(conjecture_x,lower_y_second,basex=lg_base,label='lower (2nd method) {} {} errors {}'.format(m_lower_second,b_lower_second,lo_errors_2))
ax.semilogx(conjecture_x,upper_y_second,basex=lg_base,label='upper (2nd method) {} {} errors {}'.format(m_upper_second,b_upper_second,up_errors_2))
#ax.semilogx(conjecture_x,lower_y_third,basex=lg_base,label='lower (3rd method) {} {} errors {}'.format(m_lower_second,b_lower_first,lo_errors_3))
#ax.semilogx(conjecture_x,upper_y_third,basex=lg_base,label='upper (3rd method) {} {} errors {}'.format(m_upper_second,b_upper_first,up_errors_3))
ax.legend(loc='upper left')
plt.xlabel('problem size (n)')
plt.ylabel('critical cache size')
t = 'Fibonacci version 1a, critical cache size as a function of problem size\n'
t+= 'critical cache size is the smallest cache size having cache misses which are minimal for problem size\n'
t+= 'sample n is between {} and {}, '.format(lists[n0][0],lists[n1][0])
t+= 'confirmation n is between {} and {}\n'.format(lists[n0][0],lists[n2][0])
#t+= '1st method: adjust y-intercept\n'
#t+= '2nd method: adjust slope\n'
#t+= '3rd method: use intercept from first, slope from second'
plt.title(t)
plt.show()
| [
"bgarrett@temple.edu"
] | bgarrett@temple.edu |
e1e5622cf3c28204c5ae5bd30842576ad64cd4c5 | df936ab5dea329cf5f7cb8a6c8f8313cf8f3230c | /test_framework/.ipynb_checkpoints/constants-checkpoint.py | df90dca2ff2414a07b615bc8359efadd67861c6e | [
"MIT"
] | permissive | chuck-b/ml_occupancy_model_comparison | e2350f8398eef122c81646d98b8888d118fb6ad5 | c0e13ebc0fdfd0e81259ff8755923470e7f11abe | refs/heads/master | 2020-04-18T08:09:35.812765 | 2019-05-24T15:16:06 | 2019-05-24T15:16:06 | 167,386,623 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py |
COLUMN_VARIABLES = ['M_t', 'H_t', 'W_t']
RAW_DATA_DIRECTORY = '/home/brent/projects/occupancy_state/data'
RAW_DATA_DIRECTORY = '/Users/chuck/Documents/PhD_Work/home_occupancy_state/data'
TRAIN_TEST_SPLIT_VALS = {'test_1':'2017-08-06', 'test_2':'2017-01-15',
'test_3':'2017-05-14', 'test_4':'2016-10-16'}
| [
"30674217+chuck-b@users.noreply.github.com"
] | 30674217+chuck-b@users.noreply.github.com |
0c952fc3aa4aa09f6feda7658c8d267c80f1993a | c9a5b96ee2ccfddc08d10b46fd164ef49984e910 | /python/binarytree/102_binaryTreeLevelOrder.py | 3dfe2b5d2d4bc605f45465af5948bc85fb3b9755 | [] | no_license | Davidhfw/algorithms | 743e2c9bb891d289882c565c30a86b591c014f3f | cb6491f3be750fd0be6ee6c9eba14fc441e870b2 | refs/heads/master | 2023-03-07T19:48:47.583112 | 2021-02-24T14:18:22 | 2021-02-24T14:18:22 | 278,771,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | # 题目描述
# 给你一个二叉树,请你返回其按 层序遍历 得到的节点值。 (即逐层地,从左到右访问所有节点)。
# 示例:
# 二叉树:[3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
# 返回其层次遍历结果:
#
# [
# [3],
# [9,20],
# [15,7]
# ]
# 解题思路: 1 广度优先遍历: 将每一层的节点加入到队列中, 遍历队列,将值加入列表中
# 解题思路2: 深度优先遍历, 将每一次的结果加入到列表中,并记录列表层数,实现对树的层次遍历
# Definition for a binary tree node.
import collections
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def level_order_bfs(self, root):
if not root:
return []
# 创建一个双端队列,用来保存树中每一层的节点
queue = collections.deque()
res = []
# 先将root节点加入到队列中
queue.append(root)
# visited = set()
# 只要队列不为空,就一直循环
while queue:
cur_level_len = len(queue)
cur_res = []
for _ in range(cur_level_len):
cur_node = queue.popleft()
cur_res.append(cur_node.val)
if cur_node.left:
queue.append(cur_node.left)
if cur_node.right:
queue.append(cur_node.right)
res.append(cur_res)
return res
def level_order_dfs(self, root):
if not root: return []
self.res = []
self._dfs(root, 0)
return self.res
def _dfs(self, node, level):
if not node: return
if len(self.res) < level + 1:
self.res.append([])
self.res[level].append(node.val)
self._dfs(node.left, level + 1)
self._dfs(node.right, level + 1)
if __name__ == '__main__':
root = TreeNode(9)
root.left = TreeNode(11)
root.right = TreeNode(9)
root.right.left = TreeNode(7)
root.right.right = TreeNode(0)
res = Solution().level_order_bfs(root)
print(res)
| [
"wuhaifenger@163.com"
] | wuhaifenger@163.com |
b3e5a17f360ef8e8b663ce5a0ab75242da5653b7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2633.py | 5aadfd5c6ad8f97ff1c98cd44fa7abe439d9a7dd | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
$ID = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
eccccd053ebbb0f41606bb46dd008d8a9db4b5c7 | 233bf4637fc9d59038f2ede3c9d634356d403498 | /estocastica/estadisticaConceptos/env/Scripts/bokeh-script.py | 3212c78602142ac8410ce974cc7f5077b8f87748 | [] | no_license | nanoDevep/PythonCoursesPlatzi | d89d186fed418ae690ddb385da0dfcd2bd4e3a15 | 0d6865013d3515e7dd98d2c63e31b4cbd7816015 | refs/heads/main | 2023-03-06T02:48:23.509211 | 2021-02-18T14:04:28 | 2021-02-18T14:04:28 | 340,063,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | #!c:\users\alcum\onedrive\documents\pythonpro\estocastica\estadisticaconceptos\env\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'bokeh==2.2.3','console_scripts','bokeh'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'bokeh==2.2.3'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('bokeh==2.2.3', 'console_scripts', 'bokeh')())
| [
"nanodevep@gmail.com"
] | nanodevep@gmail.com |
bf907fe38c4e0e7f7ff395d2e45c359898045560 | 8c7c48b9c3e00309730452ac12c63d7f09ed5411 | /Mix/st002 - sign.py | b7b575c5b5c099ae2b5067ca2973c38c9d3798a6 | [] | no_license | eugennix/python | dfe12bdb9b7a4e0d5d33722235aca02f1eb45caf | 43fdbf0565080abe296a8c93cef5142a57e77be4 | refs/heads/master | 2023-05-11T23:30:42.080518 | 2023-04-30T09:36:31 | 2023-04-30T09:36:31 | 211,489,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | """
Напишите функцию f(x), принимающую дробное число x и
возвращающую значение следующей функции, определённой на всей числовой прямой:
1−(x+2)^2 при x≤−2
f(x)= −x/2, при −2<x≤2
(x−2)^2+1 при 2<x
Требуется реализовать только функцию, решение не должно осуществлять операций ввода-вывода.
"""
# v1
def f(x):
if -2 < x <= 2:
res = -x / 2
elif x < 0:
res = 1 - (x + 2)**2
else:
res = 1 + sign(x) * (x - 2)**2
return res
#v2
sign = lambda x: x and (1, -1)[x < 0]
f2 = lambda x: -x/2 if -2 < x <= 2 else (x and (1,-1)[x<0])*(abs(x)-2)**2 + 1
print(f(int(input('x : '))))
print(1**-3)
print(1**3)
for i in range(-5,5):
print(f(i), f2(i)) | [
"eugennix@gmail.com"
] | eugennix@gmail.com |
1109d9fc12450d72c877f8421524dec14856099a | 937fed25ae53e5b6114478fd78ad5d270c6e141c | /a2b2 lp2.2/questão.py | 76bf9baec3dde8bedab049c6bec043e3f2922e0c | [] | no_license | IgorMChaves/lp2.2 | d7f0a2e98c403eeba38c0ea8749db0f1e38b9879 | e92b7e1e693440e6120b88fbe74262d39a433244 | refs/heads/main | 2023-07-12T23:03:08.032212 | 2021-08-27T19:59:25 | 2021-08-27T19:59:25 | 391,220,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | arquivo = open("notas.csv", "r")
arquivo.seek(0,0)
mod = []
a = arquivo.readlines()
arquivo.seek(0,0)
cont = len(arquivo.read().split())
arquivo.seek(0,0)
str_arquivo = arquivo.read()
arquivo.seek(0,0)
aux = []
vet = []
n = 0
aux_medias = []
adicao = []
for m in range(0, cont, 6):
vet.append(arquivo.read().split()[m])
arquivo.seek(0,0)
altera = "".join(vet[n])
str_arquivo = str_arquivo.replace(altera, "")
arquivo.seek(0,0)
n = n+1
for m in range(0, n):
aux_total = a[m].replace(vet[m], "").replace(",","").split()
maximo = max(aux_total)
minimo = min(aux_total)
mod.append(a[m].replace(maximo, "").replace(minimo, ""))
vet_aux = vet
vet = sorted(set(vet))
for m in range(0, len(vet)):
for n in range(0, len(mod)):
if vet[m] in mod[n]:
aux.append(mod[n])
aux_medias = "".join(aux)
for m in range(0, len(vet)):
aux_medias = aux_medias.replace(vet[m],"")
split_aux = aux_medias.replace(",","").split()
for m in range(0, len(split_aux), 3):
compilador = 0
for n in range(m, m+3):
compilador = float(split_aux[n]) + compilador
adicao.append(round(compilador/3, 2))
vet_aux = sorted(vet_aux)
tentativa = []
var_media = []
ajuda = []
for m in range(0, len(vet_aux)):
testando = vet_aux[m], adicao[m]
tentativa.append(testando)
tentativa = list(tentativa)
for m in range(0, len(vet)):
for n in range(0, len(tentativa)):
if vet[m] in tentativa[n]:
var_media.append(tentativa[n][1])
if n == len(tentativa)-1:
var_media.append("\n")
for m in range(0, len(var_media)):
notas = [str(i) for i in var_media]
var_media = " ".join(notas).split("\n")
resultado = 0
i = 0
tentativa.clear()
print('---TOTAL---')
for m in range(0, len(var_media)-1):
var_media[m] = sorted(var_media[m].split())[len(var_media[m].split())-2:len(var_media[m].split())]
resultado = 0
for n in range(0,2):
resultado = float(var_media[m][n]) + resultado
if m != len(var_media)-1:
print("".join(vet[i]).replace(",", " ->"), round(resultado, 2))
i = i+1 | [
"igor.1904.chaves@gmail.com"
] | igor.1904.chaves@gmail.com |
8389ee6aab1b0b70d717cb058b2851a24daaa3aa | e776bed93b58050c26e8d5c37709cca0b48717c5 | /QtProject/server/server.py | e56799c197bd0c16fa76a696560e5d4bcc31a7a0 | [] | no_license | 4e6yPe4eK/Qt | 4e0f25c088f377433bc96d95881ee2c3b08a819f | cdd5d30cf6c31e6b642c7fd6082e67c178d27c18 | refs/heads/main | 2023-01-28T14:29:25.694530 | 2020-12-02T10:07:02 | 2020-12-02T10:07:02 | 317,820,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | # -*- coding: utf-8 -*-
# Самая сложная часть проекта - серверная
import sqlite3
import uuid
import socket
import shlex
import os
import datetime
con = sqlite3.connect(r'info.db')
cur = con.cursor()
qte = shlex.quote
def reg_check_login(login):
# Проверка логина на существование
return len(list(cur.execute(rf'SELECT name FROM login WHERE name = ?', (login, )))) > 0
def reg(login, salt, key):
# Функция сохранения аккаунта в базу данных
if reg_check_login(login):
return
cur.execute(fr'INSERT INTO login (name, salt, key) VALUES (?, ?, ?)', (login, str(salt), str(key)))
con.commit()
def login_get_salt(login):
# Получение "соли" по имени пользователя
ret = list(cur.execute(fr'SELECT salt FROM login WHERE name = ?', (login, )))
if len(ret) > 0:
return ret[0][0]
def login(login, key):
# Вход с возвратом ключа авторизации
ret = list(cur.execute(fr'SELECT name FROM login WHERE name = ? AND key = ?', (login, str(key))))
if len(ret) > 0:
id = str(uuid.uuid4())
cur.execute(fr'DELETE FROM keys WHERE name = ?', (login, ))
cur.execute(fr'INSERT INTO keys (name, key) VALUES (?, ?)', (login, id))
return id
def add_row(key, name, about, file_name):
# Добавление данных в таблицу
ret = list(cur.execute(fr'SELECT name FROM keys WHERE key = ?', (key, )))
if len(ret) > 0:
author = ret[0][0]
s = 'INSERT INTO data(author, name, about, file_name, file) VALUES(?, ?, ?, ?, ?)'
if file_name == '#None#':
data = (author, name, about, None, None)
else:
data = (author, name, about, file_name, open(file_name, 'rb').read())
cur.execute(s, data)
con.commit()
os.remove(file_name)
def get_all_rows(key):
# Получение всех данных пользователя по его ключу
ret = list(cur.execute('SELECT name FROM keys WHERE key = ?', (key,)))
if len(ret) > 0:
ret = cur.execute('SELECT name, time, about, file_name FROM data WHERE author = ?', (ret[0][0], ))
return list(ret)
def get_file(key, date):
# Получение файла пользователя по его ключу и времени сохранения
ret = list(cur.execute('SELECT name FROM keys WHERE key = ?', (key,)))
if len(ret) > 0:
s = 'SELECT file_name, file FROM data WHERE author = ? AND time = ?'
flt = '0987654321 :-'
data = (ret[0][0], ''.join(list(filter(lambda x: x in flt, date))))
ret = list(cur.execute(s, data))
if len(ret):
return ret[0]
sock = socket.socket()
sock.bind(('', 20951))
log_file = open('server.log', 'a+')
while True:
sock.listen(1)
conn, addr = sock.accept()
data_list = []
data = conn.recv(1024)
if data:
try:
data = data.decode('utf-8')
log_file.write(str(datetime.datetime.today()) + " ||| " + data + '\n')
log_file.flush()
os.fsync(log_file.fileno())
data = shlex.split(data)
if data[0] == 'reg_check_login':
if not reg_check_login(data[1]):
conn.send(b'False')
else:
conn.send(b'True')
elif data[0] == 'reg':
reg(data[1], data[2], data[3])
elif data[0] == 'login_get_salt':
s = login_get_salt(data[1])
if s:
conn.send(s.encode('utf-8'))
elif data[0] == 'login':
s = login(data[1], data[2])
if s:
conn.send(s.encode('utf-8'))
elif data[0] == 'add_row':
key = data[1]
name = data[2]
about = data[3]
file_name = data[4]
if file_name != '#None#':
file = open(file_name, 'wb')
while True:
data = conn.recv(1024)
file.write(data)
if not data:
break
file.close()
add_row(key, name, about, file_name)
elif data[0] == 'get_all_rows':
key = data[1]
ret = qte(str(get_all_rows(key))).encode('utf-8')
conn.send(ret)
elif data[0] == 'get_file':
ret = get_file(data[1], data[2])
file = open(ret[0], 'wb')
file.write(ret[1])
file.close()
with open(ret[0], 'rb') as file:
data = file.read(1024)
while data:
conn.send(data)
data = file.read(1024)
file.close()
conn.send(b'')
conn.close()
os.remove(ret[0])
except Exception as err:
log_file.write('===================================\n' +
str(datetime.datetime.today()) + ' ||| ' + str(err) +
'\n===================================\n')
log_file.flush()
os.fsync(log_file.fileno())
| [
"noreply@github.com"
] | 4e6yPe4eK.noreply@github.com |
effff6ef63e0b83062a803e9c520c22bb11dfa64 | eace74f71484550153ee4e230861e29f298b373b | /min.1.py | af90a7b0d24931355eaebe75e9b76c537587d52e | [] | no_license | AlekhyaMupparaju/pyothon | 187a8619dd3c2750ff30789813135bf5cdb41042 | a136255a2a7e24b40d3a55341aa7bda5ed4f0f78 | refs/heads/master | 2020-03-27T03:06:02.961797 | 2019-02-23T07:10:50 | 2019-02-23T07:10:50 | 145,838,858 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | min= int(raw_input())
def lowest(arr,n):
min= arr[0]
for i in range(1, n):
if arr[i] < min:
min = arr[i]
return min
arr = [1,2,3,4,5]
n = len(arr)
Ans = lowest(arr,n)
print (Ans)
| [
"noreply@github.com"
] | AlekhyaMupparaju.noreply@github.com |
dd8121161e7823faa47d12b3c3de90b0f64b3ddf | 6e4351defca46f48ba75a5f85470b3bac9acb80d | /get_product_logo.py | 6c27a9b19b56aa0b7353da982f9b05d1071d5186 | [] | no_license | pelowok/potato-webscraper | e1e34dc2da84d17ef608c85b05862395e69c75e5 | 0b02ca7bee1678bcf58576eb142744e67436fad9 | refs/heads/master | 2021-03-27T16:52:26.489589 | 2017-02-15T16:53:01 | 2017-02-15T16:53:01 | 68,972,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | from bs4 import BeautifulSoup
def get_product_logo(soup=''):
"""retrieves product logo from html - helps to determine product name"""
logo = []
if soup.find("div", {"class" : "logo"}):
logo = soup.find("div", {"class" : "logo"}).find("img")['src']
elif soup.find("div", {"id" : "logo"}):
logo = soup.find("div", {"id": "logo"}).find("img")['src']
elif soup.find("div", {"id": "identity"}):
logo = soup.find("div", {"id": "identity"}).find("img")['src']
elif soup.find("a", {"title": "MAXfocus"}):
logo = soup.find("a", {"title": "MAXfocus"}).find("img")['src']
elif soup.find("img", {"title": "MaxBackup-Logo.png"}):
logo = soup.find("img", {"title": "MaxBackup-Logo.png"})['src']
elif soup.find("img", {"title": "GFI MAX"}):
logo = soup.find("img", {"title": "GFI MAX"})['src']
else:
print "product_logo not found"
# remove the cache-busting from end of URLs
logo = logo.split("?")[0]
return logo | [
"zak@mynameiszak.com"
] | zak@mynameiszak.com |
0d23bd2ace5526d7b5eefa7986ad5b5b1b339823 | 72ef98ece9166f1499aea2066daad075ca16a5a3 | /mv2pygame_test.py | 36952eef83853877d634cd1457181871670937fa | [
"MIT"
] | permissive | robotika/h264-drone-vision | fead1eb57582127b00cfa66a797122a35590b23e | 9c212370be5edf2b5b703ea63a4244bf0955d378 | refs/heads/master | 2021-06-14T22:05:03.422516 | 2015-03-23T10:05:33 | 2015-03-23T10:05:33 | 13,609,669 | 3 | 5 | null | 2013-12-03T15:36:24 | 2013-10-16T04:26:35 | Python | UTF-8 | Python | false | false | 964 | py | from mv2pygame import *
import unittest
class Mv2PygameTest( unittest.TestCase ):
def testAddPic( self ):
pic1 = [[(1,1),(2,3)]]
self.assertEqual( addPic(pic1,pic1), [[(2,2),(4,6)]])
def testEstMovement( self ):
pic = [[(0,0), (-1,0)],[(0,1),(0,0)]]
self.assertEqual( estMovement(pic), ((0.5,0), (-0.5,0)) )
def testLeastQuare( self ):
pic = eval(open("tmp126.txt").read())
(k1x,k0x),(k1y,k0y) = estMovement( pic )
# print -16*k0x/k1x, -16*k0y/k1y
def testCompensateMovement( self ):
self.assertEqual( compensateMovement( [[(0,1),(2,3)]], ((0,0),(0,0)) ),
[[(0,1),(2,3)]] )
# self.assertEqual( compensateMovement( [[(0,1),(2,3)]], ((2,1),(3,-1)) ),
# [[(1,1-1),(2+2+1,3)]] )
def testAverageShift( self ):
self.assertEqual( averageShift( [[(1,2),(3,4)]] ), (2,3) )
self.assertEqual( subShift( [[(1,2),(3,4)]], (2,3) ), [[(-1,-1),(1,1)]] )
if __name__ == "__main__":
unittest.main()
| [
"martind@mapfactor.com"
] | martind@mapfactor.com |
ae185518b0e8ce198cc06cc8c0d42df5a4909135 | 1f741fa73e569657bab183dc9a1512cbdc45ed84 | /I0320091_Exercise9.10.py | a81651daf01320224cbfc5ff752e2d4d54adac98 | [] | no_license | sasareginaa/Salsabila-Putri-Regina_I0320091_Wildan_Tugas9 | abac4a8ffba8c1b40aadd54df0c78232fbaa30f0 | cddccaab6566c6eafc22be425dda169690fe04f9 | refs/heads/main | 2023-04-23T03:32:11.467077 | 2021-04-30T10:19:09 | 2021-04-30T10:19:09 | 362,104,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | #Exercise 9.10
# nilai awal (sebelum dibalik)
A = [100, -700, 300, 400, 800]
# membalik urutan elemen array
A.reverse()
print(A) | [
"sasaregina06@gmail.com"
] | sasaregina06@gmail.com |
25a67c4819e5f76e8597007afbef568d28dcd9f0 | 63c8b9227a6b3178d918769042ecb060acc557be | /symphony/cli/pyinventory/graphql/add_service_endpoint_mutation.py | 4f7d20fa43ca5e0c5407290c9053e8a3f6f0fe27 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | snwfdhmp/magma | 7c4898db68d2668fd39ed25f73bb9a2bc5959066 | 8b3ff20a2717337a83c8ef531fa773a851d2e54d | refs/heads/master | 2020-12-06T09:06:25.806497 | 2020-01-07T18:27:09 | 2020-01-07T18:28:51 | 232,418,366 | 1 | 0 | NOASSERTION | 2020-01-07T21:12:28 | 2020-01-07T21:12:27 | null | UTF-8 | Python | false | false | 3,003 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
def enum_field(enum_type):
def encode_enum(value):
return value.value
def decode_enum(t, value):
return t(value)
return field(
metadata={
"dataclasses_json": {
"encoder": encode_enum,
"decoder": partial(decode_enum, enum_type),
}
}
)
class ServiceEndpointRole(Enum):
CONSUMER = "CONSUMER"
PROVIDER = "PROVIDER"
@dataclass_json
@dataclass
class AddServiceEndpointInput:
id: str
portId: str
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class AddServiceEndpointMutation:
__QUERY__ = """
mutation AddServiceEndpointMutation($input: AddServiceEndpointInput!) {
addServiceEndpoint(input: $input) {
id
name
externalId
customer {
id
name
externalId
}
endpoints {
id
port {
id
}
role
}
links {
id
}
}
}
"""
@dataclass_json
@dataclass
class AddServiceEndpointMutationData:
@dataclass_json
@dataclass
class Service:
@dataclass_json
@dataclass
class Customer:
id: str
name: str
externalId: Optional[str] = None
@dataclass_json
@dataclass
class ServiceEndpoint:
@dataclass_json
@dataclass
class EquipmentPort:
id: str
id: str
port: EquipmentPort
role: ServiceEndpointRole = enum_field(ServiceEndpointRole)
@dataclass_json
@dataclass
class Link:
id: str
id: str
name: str
endpoints: List[ServiceEndpoint]
links: List[Link]
externalId: Optional[str] = None
customer: Optional[Customer] = None
addServiceEndpoint: Optional[Service] = None
data: Optional[AddServiceEndpointMutationData] = None
errors: Any = None
@classmethod
# fmt: off
def execute(cls, client, input: AddServiceEndpointInput):
# fmt: off
variables = {"input": input}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f3dad400ea2ff76d29dd558d1607c713866aa9ff | 02840549b18d93f01fefe3e27336fbfacafd7956 | /entertainment_center.py | 51b3b6d29c158b230e79a3ea067e8d2da3c51deb | [] | no_license | gapb/udacity-nanodegree | 88de3011aa6365c3d2d4a35a072982d163054b59 | 1c10a2930ad67cea3aa7ffb2ce36dddf9303d9ea | refs/heads/master | 2021-01-13T01:25:44.203372 | 2015-08-22T22:38:57 | 2015-08-22T22:38:57 | 41,226,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,894 | py | # coding: utf-8
"""This module contains the code to load the movie data for the entertainment
center (stored as string literals), and call fresh_tomatoes.open_movies() to
convert the data into a web page. Future versions of this program may store the
movie information in an external file, which this program would parse before
feeding to fresh_tomatoes.py
"""
import media, fresh_tomatoes
__author__ = 'gilbertpodell-blume'
def main():
# Step 1: declare the required Movie objects
fury_road = media.Movie("Mad Max: Fury Road", "[find or write a good"
"synopsis]",
"https://upload.wikimedia.org/wikipedia/en/2/23/"
"Max_Mad_Fury_Road_Newest_Poster.jpg",
"https://www.youtube.com/watch?v=cdLl1GVjOrc")
the_lives_of_others = media.Movie("The Lives of Others",
"[find or write a good synopsis]",
"https://upload.wikimedia.org/wikipedia/"
"en/9/9f/Leben_der_anderen.jpg",
"https://www.youtube.com/watch?v=n3_iLOp6"
"IhM")
yojimbo = media.Movie("Yojimbo", "[find or write a good synopsis]",
"https://upload.wikimedia.org/wikipedia/en/8/8b/"
"Yojimbo_%28movie_poster%29.jpg",
"https://www.youtube.com/watch?v=y_1iT_GmHTE")
castle_in_the_sky = media.Movie("Castle in the Sky",
"[find or write a good synopsis]",
"https://upload.wikimedia.org/wikipedia/en/"
"4/40/Castle_in_the_Sky_%28Movie_Poster%29."
"jpg",
"https://www.youtube.com/watch?v=8ykEy-yPBF"
"c")
micmacs = media.Movie("Micmacs", "[find or write a good synopsis]",
"https://upload.wikimedia.org/wikipedia/en/7/75/"
"Micmacs_à_tire-larigot.jpg",
"https://www.youtube.com/watch?v=TjKW0tG7I8s")
labyrinth = media.Movie("Labyrinth", "[find or write a good synopsis]",
"https://upload.wikimedia.org/wikipedia/en/6/6b/"
"Labyrinth_ver2.jpg",
"https://www.youtube.com/watch?v=XRcOZZDvMv4")
# Step 2: compile the Movie objects into a list of movies
movies = {fury_road, the_lives_of_others, yojimbo, castle_in_the_sky,
micmacs, labyrinth}
# Step 3: pass the list of movies to fresh_tomatoes.open_movies_page(movies)
fresh_tomatoes.open_movies_page(movies)
if __name__ == "__main__":
main()
| [
"gilbertpodell-blume@Peregrin-II.local"
] | gilbertpodell-blume@Peregrin-II.local |
f09cb52deec75543a69ae11c1e3c81d217d918b9 | 6abb051913b0cbf07ff9e599108f80161805fadb | /assignment2/q1_softmax.py | 0d80e70a56b54e5a36a06b1a4855ec7f8a2d23fe | [] | no_license | jigargandhi/stanford224 | ca440f323611fdbcb034d1a3bb42e09884054055 | ef2e4e9facd7d75e1890444ceef5c5b606879304 | refs/heads/master | 2021-01-20T07:31:40.389137 | 2017-09-12T13:34:24 | 2017-09-12T13:34:24 | 101,545,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | import numpy as np
import tensorflow as tf
#from utils.general_utils import test_all_close
def softmax(x):
"""
Compute the softmax function in tensorflow.
You might find the tensorflow functions tf.exp, tf.reduce_max,
tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
not need to use all of these functions). Recall also that many common
tensorflow operations are sugared (e.g. x * y does a tensor multiplication
if x and y are both tensors). Make sure to implement the numerical stability
fixes as in the previous homework!
Args:
x: tf.Tensor with shape (n_samples, n_features). Note feature vectors are
represented by row-vectors. (For simplicity, no need to handle 1-d
input as in the previous homework)
Returns:
out: tf.Tensor with shape (n_sample, n_features). You need to construct this
tensor in this problem.
"""
### YOUR CODE HERE
max = tf.reduce_max(x, axis= 1, keep_dims=True)
y = tf.subtract(x, max)
e_y = tf.exp(y)
sum = tf.reduce_sum(e_y,axis = 1, keep_dims=True)
out =tf.divide(e_y, sum)
### END YOUR CODE
return out
def cross_entropy_loss(y, yhat):
"""
Compute the cross entropy loss in tensorflow.
The loss should be summed over the current minibatch.
y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
be of dtype tf.float32.
The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
solutions are possible, so you may not need to use all of these functions).
Note: You are NOT allowed to use the tensorflow built-in cross-entropy
functions.
Args:
y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
probability distribution and should sum to 1.
Returns:
out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
tensor in the problem.
"""
### YOUR CODE HERE
log_yhat = tf.log(yhat)
y_log_yhat = tf.to_float( y)*log_yhat
sum = tf.reduce_sum(tf.reduce_sum(y_log_yhat,axis= 0),axis=0)
minus1 = tf.constant(-1,dtype=tf.float32)
out = minus1 * sum
### END YOUR CODE
return out
def test_softmax_basic():
"""
Some simple tests of softmax to get you started.
Warning: these are not exhaustive.
"""
test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))
with tf.Session() as sess:
test1 = sess.run(test1)
print(test1)
#test_all_close("Softmax test 1", test1, np.array([[0.26894142, 0.73105858],
#[0.26894142, 0.73105858]]))
test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))
with tf.Session() as sess:
test2 = sess.run(test2)
#test_all_close("Softmax test 2", test2, np.array([[0.73105858, 0.26894142]]))
print ("Basic (non-exhaustive) softmax tests pass\n")
def test_cross_entropy_loss_basic():
"""
Some simple tests of cross_entropy_loss to get you started.
Warning: these are not exhaustive.
"""
y = np.array([[0, 1], [1, 0], [1, 0]])
yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])
test1 = cross_entropy_loss(
tf.constant(y, dtype=tf.int32),
tf.constant(yhat, dtype=tf.float32))
with tf.Session() as sess:
test1 = sess.run(test1)
print(test1)
expected = -3 * np.log(.5)
#test_all_close("Cross-entropy test 1", test1, expected)
print ("Basic (non-exhaustive) cross-entropy tests pass")
if __name__ == "__main__":
test_softmax_basic()
test_cross_entropy_loss_basic()
| [
"jigarr.gandhi@gmail.com"
] | jigarr.gandhi@gmail.com |
01871cfce2c502981e0152153154624ef523bed7 | cab65864d5e082da2635119416e5f11962ab9d80 | /life.py | 9c773f29d4ae7787b27343473fec793507fe8cc5 | [] | no_license | cassioms/gameoflife | 9fc3e65351e82a3e08b86e8806ed79b427e13001 | 46904207cb5b72824f3c4e8db24dfbfda5fe4305 | refs/heads/master | 2020-03-28T04:43:34.784408 | 2018-09-10T20:08:07 | 2018-09-10T20:08:07 | 147,733,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | from copy import deepcopy
from random import randint
from pygame import gfxdraw
class Life:
def __init__(self, screen, width, height, offset_width, offset_height, starting_points, color, size):
self.color = color
self.screen = screen
self.screen_width = width
self.screen_height = height
self.screen_offset_width = offset_width
self.screen_offset_height = offset_height
self.size = size
self.points = [0] * self.get_height()
for y in range(self.get_height()):
self.points[y] = [0] * self.get_width()
for n in range(starting_points):
point = self.generate_random_point()
self.points[point[1]][point[0]] = 1
self.neighbours = [0] * self.get_height()
for y in range(self.get_height()):
self.neighbours[y] = [0] * self.get_width()
for y in range(self.get_height()):
for x in range(self.get_width()):
self.neighbours[y][x] = self.get_neighbours(y, x)
def get_width(self, width=-1):
if (width == -1):
width = self.screen_width
return int(width/self.size)
def get_height(self, height=-1):
if (height == -1):
height = self.screen_height
return int(height/self.size)
def generate_random_point(self):
return randint(0, self.get_height() - 1), randint(0, self.get_width() - 1)
def get_neighbours(self, y, x):
x_range = [x-1, x, x+1]
y_range = [y-1, y, y+1]
if x == 0:
x_range[0] = self.get_width() - 1
if x == self.get_width() - 1:
x_range[2] = 0
if y == 0:
y_range[0] = self.get_height() - 1
if y == self.get_height() - 1:
y_range[2] = 0
return y_range, x_range
def count_neighbours(self, y, x):
count = 0
y_range, x_range = self.neighbours[y][x]
for i in y_range:
for j in x_range:
if self.points[i][j] == 1:
count = count + 1
if self.points[y][x] == 1 and count > 0:
count = count - 1
return count
def next_gen(self):
new_gen = deepcopy(self.points)
for y in range(0, self.get_height()):
neighbours = 0
for x in range(0, self.get_width()):
neighbours = self.count_neighbours(y, x)
if self.points[y][x] == 1 and (neighbours < 2 or neighbours > 3):
new_gen[y][x] = 0
elif self.points[y][x] == 0 and neighbours == 3:
new_gen[y][x] = 1
self.points = new_gen
def update(self):
for y in range(0, self.get_height()):
for x in range(0, self.get_width()):
if self.points[y][x] == 1:
gfxdraw.box(self.screen,
(x*self.size + self.screen_offset_width, y*self.size + self.screen_offset_height,
self.size, self.size),
self.color)
def toggle_clicked(self, pos):
x, y = pos
if x < self.screen_offset_width or x >= self.screen_width \
or y < self.screen_offset_height or y >= self.screen_height:
return
#print(f'Mouse clicked on x:{x}, y:{y}')
y_off = y - self.screen_offset_height
x_off = x - self.screen_offset_width
current_value = self.points[self.get_height(y_off)][self.get_width(x_off)]
if current_value == 0:
self.points[self.get_height(y_off)][self.get_width(x_off)] = 1
else:
self.points[self.get_height(y_off)][self.get_width(x_off)] = 0
| [
"cassio.silva@entelgy.com"
] | cassio.silva@entelgy.com |
0b0fc70f8b07da1dad31d78121f58e3719413334 | a18094360ff3e4e8ba3d4ff173ce7ac9beba35e8 | /练习/队列.py | b9b2f4fcbbcc4f81ce554a6dd5c931e6809f1624 | [] | no_license | xunihao1993/haohao_code | dccbd6a7a6ef937108c949326e3ec49ccb9c87d6 | e314bf697d7cb8b90cb02da108ebbfaf5a8ef2f6 | refs/heads/master | 2022-05-28T11:58:29.799160 | 2021-03-25T02:12:13 | 2021-03-25T02:12:13 | 255,108,292 | 1 | 0 | null | 2022-04-22T23:31:45 | 2020-04-12T15:07:39 | Python | UTF-8 | Python | false | false | 851 | py | # -*- coding: utf-8 -*-
import multiprocessing
import time
import os
# 获取数据
def get_data(queue):
data = queue.get()
print('读取数据',data)
# 写入数据
def put_data(queue):
# 拼接数据
data = str(os.getpid())+"_"+str(time.time())
print("压入数据:",data)
queue.put(data)
if __name__ == '__main__':
# 创建队列
q = multiprocessing.Queue(3)
# 创建5个进程用于写数据
record1 = []
for i in range(5):
p = multiprocessing.Process(target=put_data,args=(q,))
p.start()
record1.append(p)
# 创建5个进程用于读数据
record2 = []
for i in range(5):
p =multiprocessing.Process(target = get_data,args=(q,))
p.start()
record2.append(p)
for p in record1:
p.join()
for p in record2:
p.join()
| [
"xu.ni.hao@163.com"
] | xu.ni.hao@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.