blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83ce57670722076cf553de1a1b5798dc1a6a2754 | d031af7dc049827c98efaf4a2795ce42a6b88190 | /pyGPIO_2pin_km.py | 94a610bbd04a3740a347ea602b3e783d2f2467bf | [] | no_license | JPWS2013/PhoenixRacing_RPi | 0d40f32d4e207d38d5e67e7efc3368eb9010f813 | 024e03089622a24fabde6786c7570e0dd93ffb14 | refs/heads/master | 2021-01-15T17:28:19.890141 | 2013-07-23T01:59:37 | 2013-07-23T01:59:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,467 | py | from __future__ import division
import RPi.GPIO as GPIO
import datetime
import csv
import os
import db_wrapper
def upload_dropbox(results_file, fig):
db = db_wrapper.DropboxTerm()
print results_file
db.do_put(results_file, 'cvt_tests/'+results_file)
db.do_put(fig, 'cvt_tests/'+fig)
#setup the board layout
SPEDO_PIN = 16
TACH_PIN = 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(SPEDO_PIN, GPIO.IN)
GPIO.setup(TACH_PIN, GPIO.IN)
#initialize variables
spedo_state = GPIO.input(SPEDO_PIN)
tach_state = GPIO.input(TACH_PIN)
firstTime = lastUpdate = last_tach = last_spedo = datetime.datetime.now()
numberOfMagnets_spedo = numberOfMagnets_tach = 2
rpm_spedo = rpm_tach = averagedRPM_spedo = averagedRPM_tach = 0
alpha = .5 #filter constant
#setup csv stuff
f_name = "../cvt_test/CVT_Test_" + str(datetime.datetime.now()) + ".csv"
dataFile = open(f_name,'w+')
dataWriter = csv.writer(dataFile)
initMsg = 'Starting Test %d/%d/%d %d:%d:%2f' % (firstTime.day, firstTime.month, firstTime.year, firstTime.hour, firstTime.minute, firstTime.second+firstTime.microsecond/1000000.0)
dataWriter.writerow([initMsg])
# dataWriter.writerow(['Spedometer','Tachometer','Time since start'])
#main loop
while True:
try:
now = datetime.datetime.now()
#SPEEDO
#an edge of the magnet
if GPIO.input(SPEDO_PIN) is not spedo_state:
spedo_state = not spedo_state
#a leading edge of the magnet
if GPIO.input(SPEDO_PIN) is False:
dt = max(1, (now - last_spedo).microseconds)/1000000.0
rpm_spedo = 60.0 / numberOfMagnets_spedo / dt
averagedRPM_spedo = averagedRPM_spedo*(1-alpha) + rpm_spedo*alpha
last_spedo = now
#catch the case when the input stops
elif now - last_spedo > datetime.timedelta(seconds=0.25):
print 'too slow'
averagedRPM_spedo = max(1e-4,(averagedRPM_spedo) / 3)
last_spedo = now
#TACH
#an edge of the magnet
if GPIO.input(TACH_PIN) is not tach_state:
tach_state = not tach_state
#a leading edge of the magnet
if GPIO.input(TACH_PIN) is False:
dt = max(1, (now - last_tach).microseconds)/1000000.0
rpm_tach = 60.0 / numberOfMagnets_tach / dt
averagedRPM_tach = averagedRPM_tach*(1-alpha) + rpm_tach*alpha
last_tach = now
#catch the case when the input stops
elif now - last_tach > datetime.timedelta(seconds=0.25):
print 'too slow'
averagedRPM_tach = max(1e-4,(averagedRPM_tach * 2) / 3)
last_tach = now
#print and log data
if now - lastUpdate > datetime.timedelta(seconds=0.5):
print "Spedo: %3f Tach: %3f" % (averagedRPM_spedo, averagedRPM_tach)
if averagedRPM_tach > 0.01 or averagedRPM_spedo > 0.01:
dataWriter.writerow([averagedRPM_spedo,averagedRPM_tach,round((now-firstTime).total_seconds(),1)])
lastUpdate = now
except (KeyboardInterrupt,SystemExit):
print 'Shutting down...'
dataWriter.writerow([])
dataFile.close()
#create plot png and upload csv and png to dropbox
import cvt_test_mod as CVT
fig = CVT.save_plot(f_name)
# upload_dropbox(f_name, fig)
# print "files uploaded"
# os.remove(fig)
break | [
"arjun.aletty@gmail.com"
] | arjun.aletty@gmail.com |
ba5e99dbdf6fc4f652300373267f71e812716625 | e18e1e8f9fc0eccf1434dab62d4eeade52a804a6 | /solvers/bfs_replan.py | 7284ecb91867457e4774dfbb50d072a33c3c52a9 | [] | no_license | tomsilver/camps | 2c01394c3ff7bf397845e8df7567a80aa61f200b | c309a044d2bfc13016645bb3d21319961be964e1 | refs/heads/main | 2022-12-29T20:22:50.505946 | 2020-10-16T19:18:11 | 2020-10-16T19:18:11 | 304,717,731 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,505 | py | """BFS solver with replanning. Can handle stochasticity. Assumes that the goal
is reached when the environment gives done=True.
"""
import time
import heapq
from solvers import Solver
from structs import LIMBO
from settings import EnvConfig as ec
class BFSReplan(Solver):
"""BFSReplan solver definition.
"""
def _solve(self, env, timeout=None, info=None, vf=False):
if vf:
# BFSReplan solver is an online method, so just plan from the
# initial state.
state = env.sample_initial_state()
plan, state_seq = self._astar(env, state, timeout)
qvals = self._get_qvals(env, plan, state_seq)
return qvals
plan = []
state_seq = []
def policy(state):
if env.reward(state, None)[1]: # if we're already done, do nothing
return None
nonlocal plan
nonlocal state_seq
if plan and state == state_seq[0]:
# State matches our expectation. Continue current plan.
state_seq.pop(0)
return plan.pop(0)
# Either planning for the first time or replanning.
plan, state_seq = self._astar(env, state, timeout)
if plan is None:
return None
assert state == state_seq[0]
state_seq.pop(0)
return plan.pop(0)
return policy
@staticmethod
def is_online():
return True
@staticmethod
def _astar(env, init_state, timeout):
pqueue = PriorityQueue()
root = AStarNode(init_state, action_sequence=[],
state_sequence=[init_state], cost=0)
pqueue.push(root, env.heuristic(init_state))
start = time.time()
visited = set()
while not pqueue.is_empty():
if timeout is not None and time.time()-start > timeout:
return None, None
node = pqueue.pop()
visited.add(node.state)
if env.reward(node.state, None)[1]:
return node.action_sequence, node.state_sequence
for act in env.action_var.domain:
next_state = env.ml_next_state(node.state, act)
if next_state in visited:
continue
if LIMBO in next_state and next_state[LIMBO] == 1:
continue
pqueue.push(AStarNode(next_state, node.action_sequence+[act],
node.state_sequence+[next_state],
node.cost+1),
node.cost+1+env.heuristic(next_state))
return None, None
@staticmethod
def _get_qvals(env, plan, state_seq):
qvals = {}
if plan is None:
return qvals
for i, state in enumerate(state_seq):
returns = sum(env.reward(state2, None)[0]*(ec.gamma**j)
for j, state2 in enumerate(state_seq[i:]))
if i < len(state_seq)-1:
qvals[state, plan[i]] = returns
for other_act in env.action_var.domain:
if plan[i] == other_act:
continue
qvals[state, other_act] = 0
return qvals
class BFSNode:
"""Node in the search tree for BFS.
"""
def __init__(self, state, actions_to_here, states_to_here):
self.state = state
self.actions_to_here = actions_to_here
self.states_to_here = states_to_here
class PriorityQueue:
"""Priority queue utility class.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
"""Push item to the queue with given priority.
"""
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
"""Remove and return lowest priority item from queue.
"""
(_, _, item) = heapq.heappop(self.heap)
return item
def is_empty(self):
"""Return whether the queue is empty.
"""
return len(self.heap) == 0
def __len__(self):
return len(self.heap)
class AStarNode:
"""Node in the search tree for A*.
"""
def __init__(self, state, action_sequence, state_sequence, cost):
self.state = state
self.action_sequence = action_sequence
self.state_sequence = state_sequence
self.cost = cost
| [
"tomssilver@gmail.com"
] | tomssilver@gmail.com |
93477610d2947bacd127dc680439c5bc7f483708 | ef0285c5db0f320b8ca7f6a1da4fdf882af42458 | /sem6/ad50144124/project.py | c2996ab2ad40a0bc5acdd717610bf5714a006925 | [] | no_license | ad50144124/mon_projet | a04f5eaff361ebce3fce0e78dad6e32aae3a5039 | cf09ba40e8e3d8d7d6c0006e3237d45f441663d6 | refs/heads/master | 2021-01-10T14:47:03.189272 | 2016-04-04T10:39:11 | 2016-04-04T10:39:11 | 50,364,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | # -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
import soccersimulator,soccersimulator.settings
from decorator import *
from strategy import *
from soccersimulator import BaseStrategy, SoccerAction
from soccersimulator import SoccerTeam, SoccerMatch
from soccersimulator import Vector2D, Player, SoccerTournament
from soccersimulator import KeyboardStrategy,show
"""
============================main===================================
"""
class StateLessStrategy(BaseStrategy):
def __init__(self, decid):
BaseStrategy.__init__(self,decid.__name__)
self.decideur = decid
self.info = dict()
def compute_strategy(self,state,id_team,id_player):
aa = self.decideur(SoccerStateDecorator(state,id_team,id_player,self.info))
return aa
#team1=SoccerTeam("team1",[Player("t1j1",StateLessStrategy(random))])
#team2=SoccerTeam("team2",[Player("t2j1",StateLessStrategy(Smart1v1))])
team1=SoccerTeam("team1",[Player("t1j1",StateLessStrategy(random)),Player("t1j2",StateLessStrategy(Smart1v1))])
team2=SoccerTeam("team1",[Player("t2j1",StateLessStrategy(Smart2v2)),Player("t2j2",StateLessStrategy(Smart2v2))])
#team1=SoccerTeam("team1",[Player("t1j1",StateLessStrategy(fonceur)),Player("t1j2",StateLessStrategy(fonceur)),Player("t1j3",StateLessStrategy(fonceur)),Player("t1j4",StateLessStrategy(fonceur))])
#team2=SoccerTeam("team1",[Player("t1j1",StateLessStrategy(Smart1v1)),Player("t1j2",StateLessStrategy(Smart1v1)),Player("t1j3",StateLessStrategy(Smart1v1)),Player("t1j4",StateLessStrategy(Smart1v1))])
strat = KeyboardStrategy() #ou pour une sauvegarde automatique
#KeyboardStrategy(fn="monfichier.exp")
FS = StateLessStrategy(fonceur)
GK = StateLessStrategy(QuickFollow)
strat.add("d",FS)
strat.add("a",GK)
player1 = Player("j1",strat)
team1=SoccerTeam("team1",[player1])
team2=SoccerTeam("team2",[Player("t2j1",StateLessStrategy(Smart1v1))])
match=SoccerMatch(team1,team2)
show(match)
strat.write("monfichier.exp")
#match=SoccerMatch(team1,team2)
#soccersimulator.show(match)
#tournoi = SoccerTournament(1)
#tournoi.add_team(team1)
#tournoi.add_team(team2)
#tournoi.play()
#soccersimulator.show(tournoi)
| [
"3407585@ppti-14-401-03.ufr-info-p6.jussieu.fr"
] | 3407585@ppti-14-401-03.ufr-info-p6.jussieu.fr |
8728e4f36b2bdc51ff896895408ea594eca9a249 | 108e5c9cf8ed492be4278ed8dca499747a0ff129 | /db/models/user.py | 27185b5898970b81d790c11eddb9dcc0247bf940 | [] | no_license | vsymoniuk/telegramGameBot | 60d7430de154ac492d9318f814f2dfd8f5a3629b | 5bf8b6e1d5017183c654a60a1ce83754615ba9e2 | refs/heads/master | 2022-11-20T06:15:46.625345 | 2020-06-28T22:50:05 | 2020-06-28T22:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from sqlalchemy import Column, Integer, String, ForeignKey, Boolean
from sqlalchemy.orm import relationship, backref
from .base import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String, default="")
username = Column(String)
curr_game_id = Column(Integer, ForeignKey('games.id'))
curr_game = relationship("Game", foreign_keys=[curr_game_id], backref="players")
game_creator_to_id = Column(Integer, ForeignKey('games.id'))
game_creator_to = relationship("Game", foreign_keys=[game_creator_to_id], backref=backref("creator", uselist=False))
curr_game_status = Column(String)
curr_game_role = Column(String)
curr_game_membership = Column(String)
curr_game_is_president = Column(Boolean)
curr_game_is_vice_president = Column(Boolean)
def __repr__(self):
return "<User(name='%s', fullname='%s', username='%s', curr_game_id='%s')>" % (
self.name, self.fullname, self.username, self.curr_game_id)
| [
"yaroslaw.klymko@gmail.com"
] | yaroslaw.klymko@gmail.com |
c2318e643a408a8fa2cfdda79a0342ac2f2bd52b | b8d140ce82306ee4032af2d5b15c4229d42d7e5e | /crab/vendor/blackout.py | a5fa3578141581c249112371da078f8a263b9ff6 | [
"MIT"
] | permissive | jdrese/crab | a387827512641ade632b655eea766431381ae670 | 3cd20dad1d4d44b03468d7828b87a28351cecf18 | refs/heads/master | 2022-06-30T09:56:36.461807 | 2020-05-14T07:46:13 | 2020-05-14T07:46:13 | 285,549,135 | 1 | 0 | MIT | 2020-08-06T11:08:16 | 2020-08-06T11:08:16 | null | UTF-8 | Python | false | false | 3,006 | py | """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
blackout is a small micro-module which makes it easier to completely
forget packages. This is particularly useful when working with packages
made up of multiple sub-modules.
And example might be:
/site-packages/foo/__init__.py
/site-packages/foo/bar.py
If bar.py has a function called foo() within it, and we change that
function then it is not enough to reload foo, we must specifically
reload foo as well as foo.bar.
When working with packages with any modules this can be time consuming
and problematic - particularly when developing within a host which is
persistent.
blackout helps, because we can unload the package in its entirety in
a single line using:
```blackout.drop('foo')```
This will remove any hold to foo as well as any submodules of foo. In this
case we can simply call ```import foo``` again, knowing that everything
within that package is being loaded fresh.
"""
import sys
import types
__author__ = "Michael Malinowski"
__copyright__ = "Copyright (C) 2019 Michael Malinowski"
__license__ = "MIT"
__version__ = "1.0.4"
# ------------------------------------------------------------------------------
def drop(package):
"""
This will drop a package and all sub-packages from the sys.modules
variable.
This means that the package and its submodules will be reloaded whenever
you next import it.
Beware, this is incredibly useful for development when you're working
with packages which contain sub-modules you're actively changing but this
does not handle any prior references to those modules, therefore it is not
code that should be utilised in release code.
:param package: Name of the package to drop
:type package: str
:return: None
"""
if isinstance(package, types.ModuleType):
package = package.__name__
for m in list(sys.modules.keys()):
if m == package or m.startswith('%s.' % package):
del sys.modules[m]
| [
"mike.malinowski@outlook.com"
] | mike.malinowski@outlook.com |
d6cdeb85c2c728cec647a482d4b7f3bb21bb0366 | a80047f86b96367c75fd24a43be67bb8202645aa | /tailor/models.py | 00f95b7fe8b02f9d3e439941c5eebce2bbfdddc3 | [
"MIT"
] | permissive | Albert-Byrone/Tailor_Hub | 84deaee04809c56bb4d7638dfa800e434936c82a | 0ab4402302d03f261ff00af79f84b62cbd241a27 | refs/heads/master | 2022-12-09T14:18:01.392545 | 2019-12-23T10:26:44 | 2019-12-23T10:26:44 | 224,847,452 | 0 | 0 | MIT | 2022-12-08T03:18:46 | 2019-11-29T12:10:07 | Python | UTF-8 | Python | false | false | 6,060 | py | from django.db import models
from django.contrib.auth.models import User
from django.shortcuts import reverse
from pyuploadcare.dj.models import ImageField
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime as datetime
from django_countries.fields import CountryField
from django.db.models import Q
CATEGORY_CHOICES=(
('SU','Suits'),
('TR','Trousers'),
('CO','Coats'),
('DR','Dresses')
)
LABEL_CHOICES=(
('P','primary'),
('S','secondary'),
('D','danger')
)
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,related_name='profile')
prof_pic = models.ImageField(upload_to='images/',default='./img/avator.png')
bio = models.TextField(max_length=50,default='this is my bio')
name = models.CharField(blank=True,max_length=120)
contact =models.PositiveIntegerField(null=True,blank=True)
email = models.EmailField()
location = models.CharField(max_length=60, blank=True)
stripe_customer_id = models.CharField(max_length=50, blank=True, null=True)
one_click_purchasing = models.BooleanField(default=False)
def __str__(self):
return f'{self.user.username} Profile'
@receiver(post_save,sender=User)
def create_user_profile(sender,instance,created,**kwargs):
if created:
userprofile = Profile.objects.create(user=instance)
@receiver(post_save,sender=User)
def save_user_profile(sender,instance,created,**kwargs):
instance.profile.save()
def save_profile(self):
return self.save()
def delete_profile(self):
return self.delete()
@classmethod
def search_profile(cls,name):
return cls.objects.filter(user__username__icontains=name).all()
class Item(models.Model):
title = models.CharField(max_length=200)
price = models.FloatField()
discount_price = models.FloatField(null=True,blank=True,default="0")
category = models.CharField(choices=CATEGORY_CHOICES,max_length=2,default="SU")
label = models.CharField(choices=LABEL_CHOICES,max_length=1,default="P")
photo = models.ImageField(null=True,blank=True)
description = models.TextField(null=True,blank=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='posts')
created = models.DateTimeField(auto_now_add=True, null=True)
def get_all_comments(self):
return self.comments.all()
@classmethod
def search_term(cls,searchterm):
search = Item.objects.filter(Q(title__icontains=searchterm)|Q(description__icontains=searchterm)|Q(category__icontains=searchterm))
return search
def __str__(self):
return f"{self.title}"
class Comment(models.Model):
comment = models.TextField()
item = models.ForeignKey(Item, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='comments')
created = models.DateTimeField(auto_now_add=True, null=True)
class OrderItem(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
item = models.ForeignKey(Item,on_delete=models.CASCADE)
is_ordered = models.BooleanField(default=False)
quantity = models.IntegerField(default=1)
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_price(self):
return self.quantity * self.item.price
def get_total_discount_price(self):
return self.quantity * self.item.discount_price
def get_amount_saved(self):
return self.get_total_price() - self.get_total_discount_price()
def get_final_price(self):
if self.item.discount_price:
return self.get_total_discount_price()
return self.get_total_price()
class Order(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
ref_code = models.CharField(max_length=30)
is_ordered = models.BooleanField(default=False)
items = models.ManyToManyField(OrderItem)
start_date = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
billing_address = models.ForeignKey('BillingAddress',on_delete=models.SET_NULL,null=True,blank=True)
payment = models.ForeignKey('Payment',on_delete=models.SET_NULL,null=True,blank=True)
coupon = models.ForeignKey('Coupon',on_delete=models.SET_NULL,null=True,blank=True)
being_delivered = models.BooleanField(default=False)
received = models.BooleanField(default=False)
refund_requested = models.BooleanField(default=False)
refund_approved = models.BooleanField(default=False)
def __str__(self):
return f"{ self.user.username }"
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
if self.coupon:
total -= self.coupon.amount
return total
class BillingAddress(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
street_address = models.CharField(max_length=50)
apartment_address=models.CharField(max_length=50)
country = CountryField(multiple=False,default="Kenya")
zipcode = models.CharField(max_length=50)
def __str__(self):
return f"{self.user.username }"
class Payment(models.Model):
stripe_charge_id = models.CharField(max_length=50)
user = models.ForeignKey(User,on_delete=models.SET_NULL,blank=True,null=True)
amount= models.FloatField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{ self.user.username }"
class Coupon(models.Model):
code = models.CharField(max_length=15)
amount = models.FloatField(default=50)
def __str__(self):
return f"{self.code }"
class Refund(models.Model):
order = models.ForeignKey(Order,on_delete=models.CASCADE)
reason = models.TextField()
accepted = models.BooleanField(default=False)
email = models.EmailField()
def __str__(self):
return f"{ self.pk }" | [
"albertbyrone1677@gmail.com"
] | albertbyrone1677@gmail.com |
c84e9d198db8ff8ec7ad9b742df879dc7ffc0a13 | 9c74dceac39e715238f3c1e89501bb8dd31c11a8 | /Aniket/graph2.py | 0cf66390f26715063d5926914fb278e1375cf812 | [] | no_license | vkacker/ForensXMCH2018 | 1c585895328107c109795708b302d4b30710a548 | 39413fdae97dcc8edc0bc1274132953322bdb4bb | refs/heads/master | 2021-09-21T02:14:34.268455 | 2018-08-19T15:37:49 | 2018-08-19T15:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | html.Div([
html.Div([
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'title': 'Graph 1',
'xaxis' : dict(
title='x Axis',
titlefont=dict(
family='Arial',
size=20,
color='#7f7f7f'
)),
'yaxis' : dict(
title='y Axis',
titlefont=dict(
family='Arial',
size=20,
color='#7f7f7f'
))
}
}
)
], className= 'seven columns'
),
html.Div([
dcc.Graph(
id='example-graph-2',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'line', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 9, 8], 'type': 'line', 'name': u'Montréal'},
],
'layout': {
'title': 'Graph 2'
}
}
)
], className= 'five columns'
)
], className="row"
), className='ten columns offset-by-one') | [
"bamaking128@gmail.com"
] | bamaking128@gmail.com |
952603e0025d29067305943b57cb851c1d9d1eee | 608d0d757bf1c89d3fe72ba16440a4b24af7d442 | /homework_finalwork/work.py | e09cda349de38c72df5816cdf31aa38e2591c629 | [] | no_license | mhq1065/respose | 5ed08adc4e507b15924225c230f4e23dc83e90eb | e49e42930ccf2ae8efe1296c889dab1f0ca23645 | refs/heads/master | 2020-04-12T10:22:06.826424 | 2018-12-19T11:32:26 | 2018-12-19T11:32:26 | 162,427,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | import requests
import json
import sqlite3
def get_data(page):#获取原始用户评论数据
header = {'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Mobile Safari/537.36'}
url = "http://m.maoyan.com/review/v2/comments.json?movieId=249342&userId=-1&offset="+str(page)+"&limit=15&ts=1545206997098&type=3"
html = requests.get(url, headers=header)
m = html.json()['data']['comments']
for i in range(15):#写入数据库
a = m[i]['nick']
b = m[i]['id']
c = m[i]['content']
save_data(a,b,c)
def save_data(a,b,c):#定义写入数据库的函数
cur.execute('insert into data values(?,?,?)',(a,b,c))
conn = sqlite3.connect("D://data.db")#创建数据库文件
cur = conn.cursor()
cur.execute('create table data(nick varchar(20),id char(10),content varchar(200))')
for n in range(1000): #遍历网页
get_data(n)
conn.commit()
conn.close() | [
"mhq1065@qq.com"
] | mhq1065@qq.com |
0d0d85ab16b1059e7c73105199f09389765f50c5 | fbe98682e937d08535e14ac434208c887be39783 | /mgd_ioms/mgd_rbac/models.py | 24415f89fdc4cd09e3c51d719c3034c0b6829e58 | [] | no_license | HuLight/jgtl | ee3552431bd30ab5a4bb995d8dc7d48592a66d9a | aaecd2565c53d225e623628d56eff382deb4f006 | refs/heads/master | 2022-10-03T23:22:52.513395 | 2020-03-21T12:31:04 | 2020-03-21T12:31:04 | 248,970,298 | 0 | 0 | null | 2022-09-16T18:19:37 | 2020-03-21T12:21:13 | HTML | UTF-8 | Python | false | false | 3,367 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class Menu(models.Model):
"""
菜单
"""
name = models.CharField(max_length=30, unique=True, verbose_name="菜单名") # unique=True, 这个字段在表中必须有唯一值.
parent = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="父菜单")
icon = models.CharField(max_length=50, null=True, blank=True, verbose_name="图标")
code = models.CharField(max_length=50, null=True, blank=True, verbose_name="编码")
url = models.CharField(max_length=128, unique=True, null=True, blank=True)
number = models.FloatField(null=True, blank=True, verbose_name="编号")
def __str__(self):
return self.name
class Meta:
verbose_name = '菜单'
# verbose_name_plural 定义模型名称
verbose_name_plural = verbose_name
ordering = ['number']
@classmethod
def get_menu_by_request_url(cls, url):
try:
return dict(menu=Menu.objects.get(url=url))
except:
None
class Role(models.Model):
"""
角色:用于权限绑定
"""
name = models.CharField(max_length=32, unique=True, verbose_name="角色")
permissions = models.ManyToManyField("menu", blank=True, verbose_name="URL授权")
desc = models.CharField(max_length=50, blank=True, null=True, verbose_name="描述")
class Structure(models.Model):
"""
组织架构
"""
type_choices = (("unit", "单位"), ("department", "部门"))
name = models.CharField(max_length=60, verbose_name="名称")
type = models.CharField(max_length=20, choices=type_choices, default="department", verbose_name="类型")
# 使用了子关联
parent = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="父类架构")
class Meta:
verbose_name = "组织架构"
verbose_name_plural = verbose_name
db_table = "mgd_rbac_org"
def __str__(self):
return self.name
class UserProfile(AbstractUser):
name = models.CharField(max_length=20, default="", verbose_name="姓名")
birthday = models.DateField(null=True, blank=True, verbose_name="出生日期")
gender = models.CharField(max_length=10, choices=(("male", "男"), ("female", "女")),
default="male", verbose_name="性别")
mobile = models.CharField(max_length=11, default="", verbose_name="手机号码")
email = models.EmailField(max_length=50, verbose_name="邮箱")
image = models.ImageField(upload_to="image/%Y/%m", default="image/default.jpg",
max_length=100, null=True, blank=True)
department = models.ForeignKey("Structure", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="部门")
post = models.CharField(max_length=50, null=True, blank=True, verbose_name="职位")
# 自关联, 适用于自己的上层领导
superior = models.ForeignKey("self", null=True, blank=True, on_delete=models.SET_NULL, verbose_name="上级主管")
roles = models.ManyToManyField("role", verbose_name="角色", blank=True)
class Meta:
verbose_name = "用户信息"
verbose_name_plural = verbose_name
ordering = ['id']
def __str__(self):
return self.name
| [
"yhh2101@163.com"
] | yhh2101@163.com |
c16d87aad5c12e73552775d7063b510229c7d2b4 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/c9dbb4f567ab4afa840dedb35209b154.py | 3ac8cb50e479178794591a5bbba3d8776dab6ff1 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 220 | py | """
Solution for "Word Count" Exercise
"""
def word_count(input):
result = {}
words = input.replace('\n',' ').split(' ')
for word in words:
if word != '':
result[word] = result.get(word,0) + 1
return result
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
60fcc7583e7a4666ee84df1b89aa0de7c824794d | 5c724d6e03e4194680c793718a4f72a58ca66bb1 | /app/migrations/0015_auto_20180903_2010.py | 75587dd9dfbf783469020f2e266a255f975a8bff | [] | no_license | tigrezhito1/bat | 26002de4540bb4eac2751a31171adc45687f4293 | 0ea6b9b85e130a201c21eb6cbf09bc21988d6443 | refs/heads/master | 2020-05-02T07:13:06.936015 | 2019-03-26T15:04:17 | 2019-03-26T15:04:17 | 177,812,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-03 20:10
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0014_auto_20180903_1946'),
]
operations = [
migrations.AlterField(
model_name='produccion',
name='fecha',
field=models.DateTimeField(default=datetime.datetime(2018, 9, 3, 20, 10, 30, 268737), editable=False, help_text='Fecha de recepci\xf3n de la llamada (No se puede modificar)'),
),
]
| [
"you@example.com"
] | you@example.com |
dd83e536e6cf733840d182e9c94a1c74b58881bc | ca936f01bbecd4905e08abc9da25bd3e01e9761b | /rendertree.py | 5304f90725c763a4e980e2c8fb7dc021f04ddb26 | [] | no_license | kontactr/python-tree-and-graph | 905bbe92f12ea96212063bcc77449bbbef88f695 | 75ddbbc0633fb7c5a3879f79942431c3877eda3d | refs/heads/master | 2021-01-18T13:06:54.230442 | 2017-02-02T13:20:24 | 2017-02-02T13:20:24 | 80,726,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # -*- coding: utf-8 -*-
#from __future__ import unicode_literals
from anytree import Node, RenderTree
import pydot
graph = pydot.Dot(graph_type='graph')
gf_f = Node("gf_f")
gm_f = Node("gm_f")
gf_m = Node("gf_m")
gm_m = Node("gm_m")
f = Node("f",parent=gf_f)
f = Node("f",parent=gf_m)
m = Node("m",parent=gm_f)
m = Node("m",parent=gm_m)
ch1 = Node("ch1",parent=f)
ch1 = Node("ch1",parent=m)
ch2 = Node("ch2",parent=f)
ch2 = Node("ch2",parent=m)
edge = pydot.Edge("gf_f", "f")
graph.add_edge(edge)
edge = pydot.Edge("gm_f", "f")
graph.add_edge(edge)
edge = pydot.Edge("gf_m", "m")
graph.add_edge(edge)
edge = pydot.Edge("gm_m", "m")
graph.add_edge(edge)
edge = pydot.Edge("f", "ch1")
graph.add_edge(edge)
edge = pydot.Edge("f", "ch2")
graph.add_edge(edge)
edge = pydot.Edge("m", "ch1")
graph.add_edge(edge)
edge = pydot.Edge("m", "ch2")
graph.add_edge(edge)
graph.write("family.png")
#for node present in string line root/child-3
#print (str(d)[6:len(str(d))-2]) | [
"noreply@github.com"
] | kontactr.noreply@github.com |
360d72b4e7fe1211157925b0fa7eb4173dc1f473 | 21f3e2f086b4a5f535c42995cea658cbdf0837f3 | /practica1/answer8py.py | 2d316088cb06ae812e9ea4d797f68976ade59007 | [] | no_license | degracia911/uasd-emergencia | a0154032ce0bd1dfd155a30ec302b07eb5cfd7e9 | 8e3684412a20e088461750632d975541a084826d | refs/heads/master | 2022-12-28T02:25:56.495219 | 2020-09-30T23:37:10 | 2020-09-30T23:37:10 | 293,353,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | '''Almacene monto, cantidad de cuotas,
porcentaje de interes anual de un prestamo y
calcule la cuota mensual. (Amortizar mediante el sistema frances)'''
monto = 100000.00
cant_cuotas = 12
pia = 30
r = pia * monto /100
e = (monto - r )/ 12
print(float(e)
| [
"ruddydegracia@gmail.com"
] | ruddydegracia@gmail.com |
f4cb702bb909488b44205a7c3c85860429401cc4 | 0737b583899d7d5ddcbdda49731046036d65d998 | /sven/settings.py | 746ef6ac71a5df61f46113c3869cf2e519f9fb90 | [] | no_license | martolini/sven_admin | 71b8b6e84e0eae3d7e971a62e3715efac708b160 | 610183b2d995b1d1f6b1946c13e9b4c9070ef26f | refs/heads/master | 2021-01-25T04:58:00.067439 | 2014-02-25T22:04:25 | 2014-02-25T22:04:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | # Django settings for sven project.
import os
import socket
DEBUG = 'cauchy' not in socket.gethostname()
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
STATIC_ROOT = ''
MEDIA_ROOT = os.path.join(BASE_DIR, 'files/uploads')
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'svenquiz', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'sven',
'PASSWORD': 'svenadmin',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
STATIC_ROOT = '/opt/svenv/static'
MEDIA_ROOT = '/opt/svenv/uploads'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_URL = '/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ald-9%v)kc*#$@hip$kpr6&tv_1h*$snl)p67_lvpag+1+phq&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sven.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sven.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'sven.app',
'sven.app.quiz',
'south',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"msroed@gmail.com"
] | msroed@gmail.com |
9fdef289488f09abcc2b7641aa8a537ae0231dd7 | e70d0bcc547d63b338ff51c253aa95d78ea99992 | /xdl/test/python/unit_test/ps_ops/test_ps_pull_op.py | dfa1f106fecb5690866aa4d7d8629b0e15c3b2d8 | [
"Apache-2.0"
] | permissive | niumeng07/x-deeplearning | 2513f7ba823521c40e0346284f5dd0aca5562e40 | 6d3bc3ad4996ab8938c56d8a834af07a04dc2f67 | refs/heads/master | 2020-04-12T23:06:24.447833 | 2019-07-06T16:06:16 | 2019-07-06T16:06:16 | 162,808,758 | 2 | 0 | Apache-2.0 | 2018-12-22T12:18:01 | 2018-12-22T12:17:59 | null | UTF-8 | Python | false | false | 1,377 | py | # Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import xdl
import unittest
import numpy as np
from xdl.python.lib.datatype import *
from xdl.python.lib.graph import execute
class TestPsPullOp(unittest.TestCase):
def test_all(self):
var = xdl.Variable(name="w", dtype=DataType.int32, shape=[4], initializer=xdl.Ones())
execute(xdl.variable_registers())
execute(xdl.global_initializers())
op = xdl.ps_pull_op(var_name="w", var_type="index", dtype=DataType.int32)
ret = execute(op)
self.assertTrue((ret == np.array([1,1,1,1])).all())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestPsPullOp)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| [
"yue.song@alibaba-inc.com"
] | yue.song@alibaba-inc.com |
488ff90b2f40c32aa80e7d917b87adac93351eb5 | 031cc92fb2f22b23a22f2c1c3cad82e0fd822082 | /eval/eval.py | 0315ae4b46e76d5b2fcecb19bb2cd0901de32bf6 | [] | no_license | jianghan0213/PyTorch-BYOL | b740fb06d4ed2594e7afb257d31dc3227b36f740 | e4055729d913a886096a3d6cce93a8e37526f73b | refs/heads/master | 2023-06-07T21:21:16.487827 | 2021-06-25T07:04:28 | 2021-06-25T07:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,977 | py | import os
import torch
import sys
import yaml
import torchvision
from torchvision import transforms, datasets
import numpy as np
from sklearn import preprocessing
from torch.utils.data.dataloader import DataLoader
sys.path.append('../')
from models.resnet_base_network import ResNet18
class LogisticRegression(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
def get_features_from_encoder(encoder, loader, device):
x_train = []
y_train = []
# get the features from the pre-trained model
for i, (x, y) in enumerate(loader):
with torch.no_grad():
x = x.to(device)
feature_vector = encoder(x)
x_train.extend(feature_vector)
y_train.extend(y.cpu().numpy())
x_train = torch.stack(x_train)
y_train = torch.tensor(y_train)
return x_train, y_train
def create_data_loaders_from_arrays(X_train, y_train, X_test, y_test):
train = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train, batch_size=64, shuffle=True)
test = torch.utils.data.TensorDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test, batch_size=512, shuffle=False)
return train_loader, test_loader
if __name__ == "__main__":
batch_size = 512
data_transforms = torchvision.transforms.Compose([transforms.ToTensor()])
config = yaml.load(open("../config/config.yaml", "r"), Loader=yaml.FullLoader)
train_dataset = datasets.STL10('../datasets/', split='train', download=False,
transform=data_transforms)
test_dataset = datasets.STL10('../datasets/', split='test', download=False,
transform=data_transforms)
train_loader = DataLoader(train_dataset, batch_size=batch_size,
num_workers=0, drop_last=False, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size,
num_workers=0, drop_last=False, shuffle=True)
device ='cuda' if torch.cuda.is_available() else 'cpu'
encoder = ResNet18(**config['network'])
encoder = encoder.to(device)
output_feature_dim = encoder.projetion.net[0].in_features
#load pre-trained parameters
load_params = torch.load(os.path.join('../runs/resnet-18_80-epochs/checkpoints/model.pth'),
map_location=torch.device(torch.device(device)))
if 'online_network_state_dict' in load_params:
encoder.load_state_dict(load_params['online_network_state_dict'])
print("Parameters successfully loaded.")
# remove the projection head
encoder = torch.nn.Sequential(*list(encoder.children())[:-1])
encoder = encoder.to(device)
logreg = LogisticRegression(output_feature_dim, 10)
logreg = logreg.to(device)
encoder.eval()
x_train, y_train = get_features_from_encoder(encoder, train_loader, device)
x_test, y_test = get_features_from_encoder(encoder, test_loader, device)
if len(x_train.shape) > 2:
x_train = torch.mean(x_train, dim=[2, 3])
x_test = torch.mean(x_test, dim=[2, 3])
print("Training data shape:", x_train.shape, y_train.shape)
print("Testing data shape:", x_test.shape, y_test.shape)
'''
scaler = preprocessing.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train).astype(np.float32)
x_test = scaler.transform(x_test).astype(np.float32)
'''
x_train = x_train.cpu().numpy()
x_test = x_test.cpu().numpy()
train_loader, test_loader = create_data_loaders_from_arrays(torch.from_numpy(x_train), y_train, torch.from_numpy(x_test), y_test)
optimizer = torch.optim.Adam(logreg.parameters(), lr=3e-4)
criterion = torch.nn.CrossEntropyLoss()
eval_every_n_epochs = 10
for epoch in range(200):
# train_acc = []
for x, y in train_loader:
x = x.to(device)
y = y.to(device)
# zero the parameter gradients
optimizer.zero_grad()
logits = logreg(x)
predictions = torch.argmax(logits, dim=1)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
total = 0
if epoch % eval_every_n_epochs == 0:
correct = 0
for x, y in test_loader:
x = x.to(device)
y = y.to(device)
logits = logreg(x)
predictions = torch.argmax(logits, dim=1)
total += y.size(0)
correct += (predictions == y).sum().item()
acc = 100 * correct / total
print(f"Testing accuracy: {np.mean(acc)}") | [
"1491078277@qq.com"
] | 1491078277@qq.com |
3bb2d3e29596e0164e2c2085bd56f2a995d832c3 | f543d608c675af68601928328f36c51739d94eb8 | /scripts/braker.py | b58f140f09ec4b63008ff48c829655e96d6969aa | [] | no_license | rongrong1314/lidar-montecarlo-pathplanning | a634fc2070abf8cb20623c3af64b3f21b559b01b | 223c80ec1d623dd90d2ebcd955c9c9ebff4f2132 | refs/heads/master | 2022-12-02T09:56:26.539664 | 2020-08-18T18:21:42 | 2020-08-18T18:21:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from pacmod_msgs.msg import PacmodCmd
import time
# pacmod_msgs/PacmodCmd
enable_pub = rospy.Publisher('/pacmod/as_rx/enable', Bool, queue_size=10)
brake_pub = rospy.Publisher('/pacmod/as_rx/brake_cmd',
PacmodCmd,
queue_size=10)
# topic
# as_rx/brake_cmd
def sendBrake(enable_pub, brake_pub):
enable = Bool()
enable.data = True
enable_pub.publish(enable)
brake = PacmodCmd()
brake.enable = True
brake.f64_cmd = 1.0
brake_pub.publish(brake)
def sendNoBrake(enable_pub, brake_pub):
enable = Bool()
enable.data = True
enable_pub.publish(enable)
brake = PacmodCmd()
brake.enable = True
brake.f64_cmd = 0.0
brake_pub.publish(brake)
def talker():
rospy.init_node('braker', anonymous=True)
rate = rospy.Rate(0.3) # every 10s
while not rospy.is_shutdown():
rospy.loginfo("brake at %s" % rospy.get_time())
sendBrake(enable_pub, brake_pub)
time.sleep(1)
rospy.loginfo("un brake at %s" % rospy.get_time())
sendNoBrake(enable_pub, brake_pub)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| [
"kazukishin@kazuki-macbook.local"
] | kazukishin@kazuki-macbook.local |
0dc622251f91d229a93059dbc1470fb1b08fa036 | cc43a517a1825a46e2474dd979c530daa0386f6e | /search-engine/query_parser.py | 8cba207960945eca1682bb9f95ba2e03bc4f36a0 | [] | no_license | keerthipendyala/Information-Retrieval | bc0152336ce29be1f135d8bbf39d088420c7133a | 190b2dbf3f3c15972344375619f0a660b2860bb7 | refs/heads/master | 2020-03-29T16:51:10.816147 | 2018-09-24T16:13:52 | 2018-09-24T16:13:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py |
import xml.etree.ElementTree as ET
from collections import OrderedDict
import re
import os
from bs4 import BeautifulSoup
queries=OrderedDict()
query_file="cacm.query"
query_xml="cacm_query.xml"
folder="cacm/"
parsed_path="parsed_corpus/"
def clean(string):
str=re.sub(r'(\[.+?\])', '', string) #removes citation
str=re.sub(r'\,(?=[^0-9])',' ',str).rstrip(",") # remove comma except within digits
str=re.sub(r'\.(?=[^0-9])', ' ', str).rstrip(".") # remove dot except within digits
str=re.sub(r"[^\w\s,.-]|_", ' ', str) #remvoes all the punctuations except for - , comma and dot
return str.lower()
def get_parsed_queries():
with open(query_file, 'rb') as f, open(query_xml, 'wb') as g:
g.write('<ROOT>{}</ROOT>'.format(f.read()))
read_xml_into_dict()
return queries
def read_xml_into_dict():
tree = ET.parse(query_xml)
root = tree.getroot()
root.findall('DOC')[0][0].tail
docs=root.findall('DOC')
for doc in docs:
query=doc[0].tail
queries[doc[0].text.strip()]=clean(query.replace("\n",' '))
#print doc[0].text+"|"+queries[doc[0].text]
def get_filenames():
if not os.path.exists(parsed_path):
os.makedirs(parsed_path)
filenames= os.listdir(folder)
return filenames
def parse_corpus():
filenames = get_filenames()
i = 0
for filename in filenames:
if filename==".DS_Store":
continue
with open(folder + filename, "r") as fd:
fileCon = fd.read()
soup = BeautifulSoup(fileCon, "html.parser")
file_path = parsed_path + filename
fil = open(file_path.split(".")[0] + ".txt", "w")
body = soup.pre
fil.write(clean(body.text).encode('utf8') + '\n')
fil.close()
fd.close()
get_parsed_queries()
parse_corpus() | [
"pendyala.k@husky.neu.edu"
] | pendyala.k@husky.neu.edu |
95b462c1589ad5fcc2afbb3b460ed427d4606ddf | ef8d6ba2afe24cd981eeb92624e838b6acc98166 | /src/ckanext-delineate/setup.py | 4b2815902ac1b31eff768239e15632773f5b8dd6 | [
"BSD-3-Clause"
] | permissive | CI-WATER/portal | cd68cd8f514d10092faea3e78d4421819c9714c3 | c61660c8389c7af82517cbd0154bc83f9737c4d1 | refs/heads/master | 2016-09-05T10:40:10.721184 | 2014-07-01T17:43:06 | 2014-07-01T17:43:06 | 20,502,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from setuptools import setup, find_packages
version = '0.1'
setup(
name='ckanext-delineate',
version=version,
description="Watershed delineation extension",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Pabitra Dash',
author_email='pabitra.dash@usu.edu',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.delineate'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points=\
"""
[ckan.plugins]
# Add plugins here, eg
watershed_delineate=ckanext.delineate.plugin:DelineateWatershedPlugins
""",
)
| [
"pkdash_reena@hotmail.com"
] | pkdash_reena@hotmail.com |
5e6a0deb1992ee2d2915ebf090609c88dd903171 | 084d705b6cadf199570dfd862e2cd55d575693a7 | /utils/nuc_stats.py | b98ddee2191179b74693624fb4c1040240658149 | [] | no_license | andrewcboardman/aptamers | 94d8e4e16a250b3cab0db4703b85d50e4a75278d | a296e8acb1bb7f64e6dd82326951b005798b37fd | refs/heads/master | 2020-04-04T19:54:09.698652 | 2019-11-18T19:17:30 | 2019-11-18T19:17:30 | 156,225,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from pysster.Data import Data
import matplotlib.pyplot as plt
data = Data(['mf_artificial_samples.fasta'],'ACGT')
profile = sum((data.astype('int32') for data in data.data))/len(data.data)
plt.switch_backend('TkAgg')
plt.bar(range(1,41),profile[:,0])
plt.bar(range(1,41),profile[:,1],bottom=profile[:,0])
plt.bar(range(1,41),profile[:,2],bottom=profile[:,0]+profile[:,1])
plt.bar(range(1,41),profile[:,3],bottom=profile[:,0]+profile[:,1]+profile[:,2])
plt.legend(('A','C','G','T')
)
plt.title('Nucleotide distribution for the artificially generated sequences')
plt.show() | [
"acb95@cam.ac.uk"
] | acb95@cam.ac.uk |
a7896d94daadb140e6d3a09d2e940af892feae70 | fb2ba196c33fa647909914533559485b716f24b6 | /py/ReverseString.py | 66fa736a9b1b85f808ea77057803a61e3fce95b8 | [] | no_license | ZihengZZH/LeetCode | 7d62ab7211004c0ee5fe49ef3adeb72c698da44f | 96e2faaa8c18636c173883cca55b2c228c81477a | refs/heads/master | 2021-06-04T05:31:03.306503 | 2020-03-07T03:07:28 | 2020-03-07T03:07:28 | 108,906,661 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | '''
Write a function that takes a string as input and returns the string reversed.
Example:
Given s = "hello", return "olleh".
NOTE: Try to avoid running-time-exceeded
'''
class Solution(object):
def reverseString(self,s):
res = ""
i = len(s)-1
while i >= 0:
res += s[i]
i-=1
return res
'''
res = ""
for char in s:
res = char + res
return res
'''
if __name__ == "__main__":
solu = Solution()
inputStr = "My name is Ziheng ZHANG, an undergraduate"
res = solu.reverseString(inputStr)
print inputStr, "ANSWER IS", res
| [
"zihengzhang1025@gmail.com"
] | zihengzhang1025@gmail.com |
29ade0802ba7f753dde50457d3d040b9d9b4e45a | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/traceback/traceback_6.py | 98c71ab419e8207fcac4ff823ee242f3b6713f45 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
import traceback
import sys
from traceback_1 import call_function
def f():
traceback.print_stack(file=sys.stdout)
print 'Calling f() directly:'
f()
print
print 'Calling f() from 3 levels deep:'
call_function(f) | [
"zhuzhulang@126.com"
] | zhuzhulang@126.com |
d44b6bb9da0fd77a4c7f48727cea45df47f2881d | b235dfe0a970b74df11c90c738ac4dc24361506a | /src/Sql_aat.py | 82870b1ceeac58e1555edef71ed70952cc5e2819 | [] | no_license | arturo-alatriste/sql_aat | 7b3b3befb473b3833fa7344b7d7610713e4e3db5 | c4e902705c0e78df8d95bdc413ea7bcacc60b16f | refs/heads/master | 2020-04-22T02:25:47.599358 | 2019-02-11T01:46:32 | 2019-02-11T01:46:32 | 170,048,361 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | '''
-------------------------------------------------------------------------------
Description : This project is a useful library to run SQL queries on pandas dataframes.
We can just give a query in plain Text and it can be executed. For example:
import pandas as pd
from Sql_aat import Sql_aat
sql_aat = Sql_aat()
result = sql_aat.exec( query, df )
Date : 2019-02-10
Author : Arturo Alatriste Trujillo.
-------------------------------------------------------------------------------
'''
import pandas as pd
from Sql_parser import Sql_parser
from Sql_command import Sql_command
class Sql_aat:
# this two objects to the main work, parse a string sql query and execute it on a collection.
sql_parser = Sql_parser()
sql_command = Sql_command()
df = None
def exec(self, query, df ):
try:
sql_sections = self.sql_parser.get_sql_sections( query )
#self.sql_parser.print_sections()
result = self.sql_command.exec( sql_sections, df )
return result
except Exception as e:
print( 'Sql_aat.exec, error: {}'.format(str(e)))
raise
def __init__(self):
pass | [
"noreply@github.com"
] | arturo-alatriste.noreply@github.com |
1b075a4e8d7640d03ff61a2f0e63e6b28699aa4e | fc8ba9e7e7d1b46f113edfe54105a6d3b770cf4d | /Hello.py | 6cc0f055945baf32dc259e520bdeb0e675660150 | [] | no_license | hezhengfa/TestPycharm | 9d91fb58807b7051a2ba22bc832e5adf0781f422 | 8fa1b84ba3debc73deaf58a826b6b9c14cecc4c2 | refs/heads/master | 2020-04-17T03:09:49.221291 | 2019-01-17T06:06:58 | 2019-01-17T06:06:58 | 166,169,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | print 'Hello world'
print 'Test github Successfully!' | [
"hezhengfa123@gmail.com"
] | hezhengfa123@gmail.com |
a53f2b3e268ed16d83380a3ccf0b8f51095dcb62 | 8fcbf2f2d0c7bbb6aef36815f6ad490c917d8530 | /extractMut.py | 62b8195f18d2c03d14f23a57e6e8fe32b4042f36 | [] | no_license | rmehta1987/mutBOW | f0752f0a550af58c5354c93a8d595ccc965a6b14 | 6e634af0481986c97ae0bd770512b5e5c284a459 | refs/heads/master | 2020-07-12T11:06:10.661832 | 2019-08-28T17:06:30 | 2019-08-28T17:06:30 | 204,801,044 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,076 | py | import pandas as pd
from operator import itemgetter
import numpy as np
import argparse
def createOneDataFrame(path):
''' Creates a dataframe from a .maf file contaning mutation data
Args:
path: path to the maf file
Returns:
mutation_df a dataframe consiting of all the mutations
'''
used_col = ['Hugo_Symbol', 'Entrez_Gene_Id', 'Chromosome', 'Start_position',
'End_position', 'Strand', 'Variant_Classification', 'Variant_Type',
'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2',
'Tumor_Sample_Barcode', 'Matched_Norm_Sample_Barcode', 'Transcript_Strand']
# Note some maf files contain comments at the beginning noting version history, can ignore
# comments argument in pandas and passing '#'
mutation_df=pd.read_csv(path,sep='\t',comment='#',encoding='utf-8',usecols=used_col)
return mutation_df
def createAllDataFrame(path):
used_col = ['Hugo_Symbol', 'Entrez_Gene_Id', 'Chromosome', 'Start_position',
'End_position', 'Strand', 'Variant_Classification', 'Variant_Type',
'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2',
'Tumor_Sample_Barcode', 'Matched_Norm_Sample_Barcode', 'Transcript_Strand']
mutation_df = pd.concat([pd.read_csv(os.path.join(result_dir, maf_file),
sep='\t',
na_values='[Not Available]')
for maf_file in maf_files])
def sortPatients(the_df):
''' Sorts the dataframe by patient respective of their mutations
See: https://docs.gdc.cancer.gov/Data/File_Formats/MAF_Format/ for column names
Args:
the_df - dataframe containing mutation data of all patients
Returns:
dataframe sorted by patients with mutations
'''
return the_df.sort_values(by=['Tumor_Sample_Barcode'], axis=0, ascending=True)
def createFiles(the_df):
''' Creates a csv file, where row is patient, and columns are the mutations
then writes 5 files:
a list of all the mutations, an array containing all the unique mutations (the vocab),
a list of numpy arrays, where columns represent the unique mutation index and each numpy array is the patient
a list of numpy arrays, where the columns represent the count of the associated mutation for each patient
a CSV file with a matrix representing Patients x Mutations
a csv file of the dataframe
[Note this is a bag of words format for Patient Mutation Data]
[Note we ignore variant of mutation, Misense, deletion, insertion as treated as the same]
Args:
the_df: a sorted dataframe by patient name, containing their mutation data
'''
temp_muts = the_df['Hugo_Symbol'].unique()
temp_ids = the_df['Tumor_Sample_Barcode'].unique()
mut_array = np.array(temp_muts) # Vocab
# Create a dictionary such that the key is the mutation and the value is the index
dict_mut = {x: i for i, x in enumerate(mut_array)}
newpd = pd.DataFrame(columns=temp_muts, index=temp_ids)
newpd.fillna(0,inplace=True)
pat_idx = [] # list of numpy arrays, where columns represent the unique mutation index and each numpy array is the patient
pat_cnt = [] # list of numpy arrays, where the columns represent the count of the associated mutation for each patient
for ind_id, id in enumerate(temp_ids):
# Get the Count of Mutations for a specific patient
filtered_counts = the_df[the_df['Tumor_Sample_Barcode']==id]['Hugo_Symbol'].value_counts()
# See: https://stackoverflow.com/questions/18453566/python-dictionary-get-list-of-values-for-list-of-keys
# Create a list of the indicies returned from filtered_counts
pat_idx.append(np.array(itemgetter(*filtered_counts.index.values)(dict_mut)))
# Create a list of the counts of the associate mutations for each patient
pat_cnt.append(np.array(filtered_counts.values))
# Create a dataframe of patients x mutations
newpd.iloc[ind_id][filtered_counts.index.values] = filtered_counts.values
# Is a stack like save, so the 2nd argument is the 1st argument when loading
np.savez('patient_bow.npz', pat_idx, pat_cnt)
np.savez('patient_vocab.npz', mut_array)
the_df.to_csv('reduced_pat_mut.csv', sep=',',index=False)
newpd.to_csv('pat_mut_mat.csv',sep=',')
return newpd
"""Parse arguments."""
parser = argparse.ArgumentParser(
description='Population random measure embedding model.')
# data configurations
parser.add_argument(
'--dataset',
default='broad.mit.edu_LUAD_IlluminaGA_DNASeq.maf',
type=str,
help='dataset folder or maf file name')
parser.add_argument(
'--multiple',
default=0,
type=int,
help='If there are multiple MAF files, 0 for No')
args = parser.parse_args()
if args.multiple:
the_df = createAllDataFrame(args.dataset)
else:
the_df = createOneDataFrame(args.dataset)
the_df = sortPatients(the_df)
createFiles(the_df)
| [
"rmehta1987@outlook.com"
] | rmehta1987@outlook.com |
0cb973af037c7d074b7cfbefc854bee39ac187aa | 023f5bba7f117f1aa153b30b1e904612e1265497 | /python/Test_API/Test_case/Test_case/test_api_vpn_8.167.py | 911bb38c59a9f54f2ced34aabb0cf4e84df39fae | [] | no_license | heliangliang2/iyunxiao | 4d987e474581a49a5197ae993a9a3f56d3bb0a7f | b732cf6641bfb2b52bd2cd1628331ffc3a5169b2 | refs/heads/master | 2020-04-04T00:06:43.010824 | 2018-11-11T06:54:18 | 2018-11-11T06:54:18 | 155,476,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | #conding:utf-8
import requests
#禁用安全请求警告
#登录Jenkins
url="https://172.18.8.167:8888/cgi-bin/index.cgi"
header={
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN",
# "User-Agent":" Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"X-Requested-With": "XMLHttpRequest"
}
data={
"type":"loginCheck",
"user":"adm",
"pwd":"adm"
}
jension={
"JSON":"type"
}
s=requests.session()
r=s.post(url,headers=header,json=jension,data=data,verify=False)
print(r.status_code)
print(r.content)
print(s.cookies)
#添加cookies
# c=requests.cookies.RequestsCookieJar()
# c.set("sw_login_user_name","adm")
# c.set("sw_login_session_id","1678151169")
# c.set("sw_login_role_info","255")
# c.set("sw_login_role_name","administrator")
# s.cookies.update(c)
# print(s.cookies)
#添加接口
url2="https://172.18.8.167:8888/cgi-bin/netManage/netInterface.cgi"
header={
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN",
# "User-Agent":" Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"X-Requested-With": "XMLHttpRequest"
}
body={
"type":"setConfigInfo",
"eth":"eth2",
"ip":"2.2.5.2",
"mast":"255.255.255.0",
"gateway":"0.0.0.0",
"linkWeight":"0",
"userName":"N",
"password":"N",
"br":"null",
"onBoot":"1",
"mtu":"0",
"mac":"00:00:00:00:00:00",
"mode":"1",
"probe_addr":"0.0.0.0"
}
jens={
"JSON":"type"
}
r2=requests.session()
s2=r2.post(url2,headers=header,json=jens,data=body,verify=False)
print(s2.status_code)
print(s2.content)
print(s2.cookies)
| [
"2463731436@qq.com"
] | 2463731436@qq.com |
d7af9a0c88e28ef76bd358174e5b026502fcb910 | 956d2056e7cc56cc5ef2c6224a52a5e5c2987785 | /utils/pyrrd/backend/external.py | 69b49adb1b75d3f9cf03e1a3dd935cd127612bc0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | AndriyZabavskyy/taf | 60f7ff7476d6a7443a1e5a752778277f6629e65b | 2007bf3fe66edfe704e485141c55caed54fe13aa | refs/heads/master | 2021-05-20T01:01:23.835257 | 2020-04-10T08:31:15 | 2020-04-10T08:31:15 | 252,119,009 | 0 | 0 | Apache-2.0 | 2020-04-01T08:40:24 | 2020-04-01T08:40:23 | null | UTF-8 | Python | false | false | 11,722 | py | """
Copyright (c) 2004-2008, AdytumSolutions, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of AdytumSolutions, Inc. nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import sys
from subprocess import Popen, PIPE
from . import common
from ..exceptions import ExternalCommandError
from ..util import XML
def _cmd(command, args=""):
if sys.platform == 'win32':
close_fds = False
else:
close_fds = True
command = "rrdtool %s %s" % (command, args)
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE,
close_fds=close_fds)
(stdout, stderr) = process.communicate()
if stderr:
print(command)
raise ExternalCommandError(stderr.strip())
if process.returncode != 0:
errmsg = "Return code from '%s' was %s." % (
command, process.returncode)
raise ExternalCommandError(errmsg)
return stdout
def concat(args):
if isinstance(args, list):
args = " ".join(args)
return args
def create(filename, parameters):
"""
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> import os
>>> os.path.exists(rrdfile.name)
True
"""
parameters = "%s %s" % (filename, concat(parameters))
output = _cmd('create', parameters)
def update(filename, data, debug=False):
"""
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> import os
>>> os.path.exists(rrdfile.name)
True
>>> update(rrdfile.name,
... '920804700:12345 920805000:12357 920805300:12363')
>>> update(rrdfile.name,
... '920805600:12363 920805900:12363 920806200:12373')
>>> update(rrdfile.name,
... '920806500:12383 920806800:12393 920807100:12399')
>>> update(rrdfile.name,
... '920807400:12405 920807700:12411 920808000:12415')
>>> update(rrdfile.name,
... '920808300:12420 920808600:12422 920808900:12423')
"""
parameters = "%s %s" % (filename, concat(data))
if debug:
_cmd('updatev', parameters)
else:
_cmd('update', parameters)
def fetchRaw(filename, query):
parameters = "%s %s" % (filename, concat(query))
return _cmd('fetch', parameters).strip()
def fetch(filename, query):
"""
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> import os
>>> os.path.exists(rrdfile.name)
True
>>> update(rrdfile.name, '920804700:12345 920805000:12357 920805300:12363')
>>> update(rrdfile.name, '920805600:12363 920805900:12363 920806200:12373')
>>> update(rrdfile.name, '920806500:12383 920806800:12393 920807100:12399')
>>> update(rrdfile.name, '920807400:12405 920807700:12411 920808000:12415')
>>> update(rrdfile.name, '920808300:12420 920808600:12422 920808900:12423')
>>> results = fetch(rrdfile.name, 'AVERAGE --start 920804400 --end 920809200')
# Results are provided in two ways, one of which is by the data source
# name:
>>> sorted(results["ds"].keys())
['speed']
# accessing a DS entry like this gives of a (time, data) tuple:
>>> results["ds"]["speed"][0]
(920805000, 0.04)
# The other way of accessing the results data is by data source time
# entries:
>>> keys = sorted(results["time"].keys())
>>> len(keys)
16
>>> keys[0:6]
[920805000, 920805300, 920805600, 920805900, 920806200, 920806500]
>>> results["time"][920805000]
{'speed': 0.04}
The benefits of using an approach like this become obvious when the RRD
file has multiple DSs and RRAs.
"""
output = fetchRaw(filename, concat(query))
lines = [line for line in output.split('\n') if line]
dsNames = lines[0].split()
results = {
"ds": {},
"time": {},
}
for line in lines[2:]:
time, data = line.split(":")
data = [common.coerce(datum) for datum in data.split()]
results["time"][int(time)] = dict(list(zip(dsNames, data)))
for dsName, datum in zip(dsNames, data):
results["ds"].setdefault(dsName, [])
results["ds"][dsName].append((int(time), common.coerce(datum)))
return results
def dump(filename, outfile="", parameters=""):
"""
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> xml = dump(rrdfile.name)
>>> xmlBytes = len(xml)
>>> 3300 < xmlBytes < 4000
True
>>> xmlCommentCheck = '<!-- Round Robin Database Dump'
>>> xmlCommentCheck in xml[0:200]
True
>>> xmlfile = tempfile.NamedTemporaryFile()
>>> dump(rrdfile.name, xmlfile.name)
>>> import os
>>> os.path.exists(xmlfile.name)
True
"""
parameters = "%s %s %s" % (filename, outfile, concat(parameters))
output = _cmd('dump', parameters)
if not outfile:
return output.strip()
def load(filename):
"""
Load RRD data via the RRDtool XML dump into an ElementTree.
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> tree = load(rrdfile.name)
>>> [x.tag for x in tree]
['version', 'step', 'lastupdate', 'ds', 'rra', 'rra']
"""
return XML(dump(filename))
def info(filename, obj, **kwargs):
"""
"""
obj.printInfo()
def graph(filename, parameters):
"""
>>> import tempfile
>>> rrdfile = tempfile.NamedTemporaryFile()
>>> graphfile = tempfile.NamedTemporaryFile()
>>> parameters = ' --start 920804400'
>>> parameters += ' DS:speed:COUNTER:600:U:U'
>>> parameters += ' RRA:AVERAGE:0.5:1:24'
>>> parameters += ' RRA:AVERAGE:0.5:6:10'
>>> create(rrdfile.name, parameters)
>>> import os
>>> os.path.exists(rrdfile.name)
True
>>> update(rrdfile.name, '920804700:12345 920805000:12357 920805300:12363')
>>> update(rrdfile.name, '920805600:12363 920805900:12363 920806200:12373')
>>> update(rrdfile.name, '920806500:12383 920806800:12393 920807100:12399')
>>> update(rrdfile.name, '920807400:12405 920807700:12411 920808000:12415')
>>> update(rrdfile.name, '920808300:12420 920808600:12422 920808900:12423')
>>> parameters = ' --start 920804400 --end 920808000'
>>> parameters += ' --vertical-label km/h'
>>> parameters += ' DEF:myspeed=%s:speed:AVERAGE' % rrdfile.name
>>> parameters += ' CDEF:realspeed=myspeed,1000,*'
>>> parameters += ' CDEF:kmh=myspeed,3600,*'
>>> parameters += ' CDEF:fast=kmh,100,GT,kmh,0,IF'
>>> parameters += ' CDEF:good=kmh,100,GT,0,kmh,IF'
>>> parameters += ' HRULE:100#0000FF:"Maximum allowed"'
>>> parameters += ' AREA:good#00FF00:"Good speed"'
>>> parameters += ' AREA:fast#00FFFF:"Too fast"'
>>> parameters += ' LINE2:realspeed#FF0000:Unadjusted'
>>> graph(graphfile.name, parameters)
>>> os.path.exists(graphfile.name)
True
"""
parameters = "%s %s" % (filename, concat(parameters))
_cmd('graph', parameters)
def prepareObject(function, obj):
"""
This is a funtion that serves to make interacting with the
backend as transparent as possible. It's sole purpose it to
prepare the attributes and data of the various pyrrd objects
for use by the functions that call out to rrdtool.
For all of the rrdtool-methods in this module, we need a filename
and then parameters -- both as strings. That's it.
This function will get called by methods in the pyrrd wrapper
objects. For instance, most of the methods of pyrrd.rrd.RRD
will call this function. In graph, Pretty much only the method
pyrrd.graph.Graph.write() will call this function.
"""
if function == 'create':
validParams = ['start', 'step']
params = common.buildParameters(obj, validParams)
data = [str(x) for x in obj.ds]
data += [str(x) for x in obj.rra]
return (obj.filename, params + data)
if function == 'update':
validParams = ['template']
params = common.buildParameters(obj, validParams)
FIRST_VALUE = 0
DATA = 1
TIME_OR_DATA = 0
if obj.values[FIRST_VALUE][DATA]:
data = ['%s:%s' % (time, values)
for time, values in obj.values]
else:
data = [data for data, nil in obj.values]
return (obj.filename, params + data)
if function == 'fetch':
validParams = ['resolution', 'start', 'end']
params = common.buildParameters(obj, validParams)
return (obj.filename, obj.cf, params)
if function == 'info':
return (obj.filename, obj)
if function == 'graph':
validParams = ['start', 'end', 'step', 'title',
'vertical_label', 'width', 'height', 'only_graph',
'upper_limit', 'lower_limit', 'rigid', 'alt_autoscale',
'alt_autoscale_max', 'no_gridfit', 'x_grid', 'y_grid',
'alt_y_grid', 'logarithmic', 'units_exponent', 'zoom',
'font', 'font_render_mode', 'interlaced', 'no_legend',
'force_rules_legend', 'tabwidth', 'base', 'color', 'imgformat',
'slope_mode']
params = common.buildParameters(obj, validParams)
data = [str(x) for x in obj.data]
return (obj.filename, params + data)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"ross.b.brattain@intel.com"
] | ross.b.brattain@intel.com |
22df3a45ebed1aaa000d917da4cf9f6d77c8ad8e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04001/s037589622.py | 245a5174bd7db200bf673123ca5d544e43c7f7dc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin python3
# -*- coding: utf-8 -*-
def main():
S = input()
l = len(S)-1
ret = 0
for i in range(2**l):
fs = S[0]
for j in range(l):
if (i>>j & 1) == 1:
fs += '+'
fs += S[j+1]
ret += eval(fs)
print(ret)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
dc1007abb291bb034f6599134256533361787bf4 | 212d28a6a1b10431b1175183731227d59dc53bee | /Draw_permanent_states.py | c61a0ff2514dbad866f77f8ef47522857837f561 | [] | no_license | Andrey905/PyGame_project | 3fea8cf8eef401fe3a764b03a3d103fdeb242959 | eee6d852a831a62b32342fe9ddf888727481b485 | refs/heads/main | 2023-03-03T07:04:37.579746 | 2021-02-04T18:02:40 | 2021-02-04T18:02:40 | 325,557,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | import pygame
class Draw_permanent_states:
def __init__(self, screen, window_width, window_height, game_level):
self.__window_width = window_width
self.__window_height = window_height
coin_surf = pygame.image.load('Coin.png')
self.__coin_scale = pygame.transform.scale(coin_surf, (int(coin_surf.get_width() // 15), int(coin_surf.get_height() // 15)))
self.__coin_rect = self.__coin_scale.get_rect()
self.__draw_half_face_coin = False
self.__screen = screen
self.__game_level = game_level
self.__fontobj = pygame.font.SysFont('Agency FB', 25)
def draw_permanent_states(self, n_of_coins, FPS_counter):
self.__draw_n_of_coin(n_of_coins, FPS_counter)
self.__draw_game_level()
def raise_game_level(self):
self.__game_level += 1
def __draw_n_of_coin(self, n_of_coins, FPS_counter):
txt_image = self.__fontobj.render(str(n_of_coins), True, (129, 129, 129))
txt_rect = txt_image.get_rect(topright=(self.__window_width - 7, 0))
self.__screen.blit(txt_image, txt_rect)
self.__coin_rect.topright = (txt_rect.topleft[0] - 2, txt_rect.topleft[1] + 4)
self.__update_coin_image_pos(FPS_counter)
if self.__draw_half_face_coin:
coin_width = 4
coin_height = self.__coin_rect.height
pygame.draw.rect(self.__screen, (247, 205, 17), (self.__coin_rect.centerx - coin_width // 2,
self.__coin_rect.centery - coin_height / 2,
coin_width, coin_height))
else:
self.__screen.blit(self.__coin_scale, self.__coin_rect)
def __draw_game_level(self):
txt = 'game level: ' + str(self.__game_level)
txt_image = self.__fontobj.render(txt, True, (129, 129, 129))
txt_rect = txt_image.get_rect(topleft=(7, 0))
self.__screen.blit(txt_image, txt_rect)
def __update_coin_image_pos(self, FPS_counter):
if FPS_counter % 25 == 0:
self.__draw_half_face_coin = not self.__draw_half_face_coin
| [
"noreply@github.com"
] | Andrey905.noreply@github.com |
b05a64bfdc796daba55747b51fe84300a0cbd094 | ab8db02202736bc7af2410154a9ab0c9f194f80e | /node_modules/serialport/build/config.gypi | c69ea50d4c9f9d27b5c6196316403446ca7d5575 | [
"MIT"
] | permissive | Zerphed/zerphed.github.io | 149b9be80426917aa7d70aaf091dc3334069d59b | 48d663a6a4e77d5a9b24f88138bef1639e2b9b99 | refs/heads/master | 2021-01-20T04:30:22.060111 | 2015-06-02T18:47:51 | 2015-06-02T18:47:51 | 34,660,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,059 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/jhnissin/.node-gyp/0.12.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/jhnissin/code/personal/zerphed.github.io/node_modules/serialport/build/serialport/v1.7.1/Release/node-v14-darwin-x64/serialport.node",
"module_name": "serialport",
"module_path": "/Users/jhnissin/code/personal/zerphed.github.io/node_modules/serialport/build/serialport/v1.7.1/Release/node-v14-darwin-x64",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/jhnissin/.npmrc",
"init_module": "/Users/jhnissin/.npm-init.js",
"user": "",
"node_version": "0.12.2",
"save": "",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/jhnissin/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/2.7.4 node/v0.12.2 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/p2/4zv4xv396qz19vg5jw4_qs400000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"joonas.nissinen@aalto.fi"
] | joonas.nissinen@aalto.fi |
03f1750148666fdc2a66424395ef053051ad8d67 | 59275d7e6f769511f2b0f1eb55311049cb977dfd | /tweets_read_v6.py | ce47a0d0fe79e4a1ffe7293200239058fb5daf00 | [] | no_license | Warlang/cloud_computing | af9c93408213fb22ed5843fbe821da3f2a05155a | 58739d6f25b6161b6b8f5e7ca7df51e363f56b95 | refs/heads/master | 2021-01-10T15:23:20.713784 | 2015-10-06T14:54:05 | 2015-10-06T14:54:05 | 43,367,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | py | from flask import Flask
import json
import subprocess
import sys
import time
#special for celery
from celery import Celery
app = Celery('tweets_read_v6', backend='amqp', broker='amqp://')
#end of special
# --- Attributes ---
fname = 'tweets_16.txt'
f = open(fname) #Open file stream
list_of_words = ['han','hon','den','det','denna','denne','hen'] #List of words to check for
counters = {'han':0 , 'hon': 0, 'den':0, 'det':0, 'denna':0, 'denne':0, 'hen':0} #Count each words in
result = None#Just to have it global
nr_cores = 1 #Change if you gonna use more cores
nr_splits = 0 #part of splits thats been done
#Checks the lenght of a file!
file_array = f.readlines() #read in the file
file_len = len(file_array)
#print len(file_array)
# --- Functions ---
#start the counting
def start():
status = False
while (nr_cores != nr_splits):
doSplit()
status = doPart.delay(nr_splits)
@app.task
def getStatus():
if(result == None):
return False
return True
@app.task
def getResult():
return result
#getPart return value to show which split you will get
#Side effect update nr of splits made and we will do same amount of splits as num of cores.
def doSplit():
global nr_splits
nr_splits = nr_splits + 1
#TODO put it in thir own "part" result list
#Count words in a part of the file.
#Return which part they had
@app.task
def doPart(partToDo):
start_index = ((file_len/nr_cores) * (partToDo-1))
stop_index = ((file_len/nr_cores) * partToDo)
if(partToDo == nr_cores):
stop_index = file_len
countWords(getText(start_index,stop_index))
return partToDo
#getText: Get the text from the file that is gonna be checked from n lines.
#Arguements: start - is where you want to start check for text in the array and stop is where you end.
# must be start < stop
def getText(start, stop):
text = " "
skipRow = False
if(file_array[start]=='\n'):
skipRow = True
i = start
while (i < stop):
row = file_array[i]
if (skipRow):
skipRow = False
else:
skipRow = True
json_obj = json.loads(row)
if (validText(json_obj)):
text = text + json_obj['text']
i = i + 1
return text
#validText: Check if the text should be counter or if its a retweet of an old one
#Arguments: text - must be json object of correct type
#return true if text is valid. False otherwise
def validText(text):
#'retweeted_status' if that exist in the json object its a retweet otherwiese not
if ('retweeted_status' not in text):
return True
else:
return False
#countWords: Count and update list_of_words depending of the occurence of words
#Arguments: text - string of text that should be counted
#side effect: Update counters
def countWords(text):
global counters
for word in list_of_words:
n = text.count(word)
counters[word] = counters.get(word) + n
global result
result = json.dumps(counters) #TODO fix so it can be multiple results list to check to nr of cores.
#REST API
apps = Flask(__name__)
@apps.route('/result', methods=['GET'])
def getCounters():
return getResult()
@apps.route('/start', methods=['GET'])
def doStart():
start()
@apps.route('/status', methods=['GET'])
def doGetStatus():
return getStatus()
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True) | [
"thinker20"
] | thinker20 |
0150ee4b951d7aa98d33671a60f4da5d924e1da8 | 2de33ba731066a63352080dd19da1e4582bb00c4 | /collective.project/collective/project/browser/create_iteration.py | d3c03e2dcb7ff08becb20002dd3249c618cfcb4f | [] | no_license | adam139/plonesrc | 58f48e7cdfc8fbed7398011c40649f095df10066 | cbf20045d31d13cf09d0a0b2a4fb78b96c464d20 | refs/heads/master | 2021-01-10T21:36:44.014240 | 2014-09-09T04:28:04 | 2014-09-09T04:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,139 | py | from zope import interface, schema
from z3c.form import form, field, button
from plone.app.z3cform.layout import wrap_form
from collective.project import projectMessageFactory as _
from collective.project.bbb import getSite
from zope.schema import vocabulary
from zope.interface import directlyProvides
from zope.schema.interfaces import IVocabularyFactory
import datetime
import calendar
projectsField = schema.Choice(
title=u'Projects',
description=u'Projects field.',
vocabulary='projects_vocab')
def breadcrumbs(context, project):
results = []
path = list(project.getObject().getPhysicalPath())[2:]
for i in range(len(path)):
results.append(context.restrictedTraverse('/'.join(path)).Title())
path.pop()
return ' > '.join(results)
class CreateIterationSchema(interface.Interface):
iteration = schema.TextLine(
title=_(u"Iteration"),
default=_(unicode(datetime.datetime.today().strftime('%B %Y'))))
projects = schema.Set(
title=u'Project(s)',
value_type=projectsField,
)
class CreateIterationForm(form.Form):
fields = field.Fields(CreateIterationSchema)
ignoreContext = True # don't use context to get widget data
label = _(u"Iteration tool")
description = _(u"Create iteration for selected projects")
def deactivate_iteration(self, action, data):
wftool = self.context.portal_workflow
iterations = find_iterations()
for iteration in iterations:
if data['iteration']:
data_iter = data['iteration']
new_iter = data_iter.lower().replace(' ', '-')
iter = iteration.getObject()
if not unicode(iter.id) == new_iter: # Don't deactive the current iter
if wftool.getInfoFor(iter, 'review_state') == 'active':
wftool.doActionFor(iter,'deactivate')
def create_iteration(self, action, data):
if data['iteration']:
data_iter = data['iteration']
new_iter = data_iter.lower().replace(' ', '-')
projects = find_projects()
for project in projects:
proj = project.getObject()
if proj in data['projects']:
try:
proj.invokeFactory('iteration', new_iter)
except:
print "Cannot create iteration %s for project %s." % (new_iter, proj.absolute_url())
try:
iter_new = proj[new_iter]
iter_new.setTitle(data_iter)
iter_new.start = startDate()
iter_new.stop = stopDate()
iter_new.reindexObject()
except:
print "Cannot create iteration %s for project %s." % (new_iter, proj.absolute_url())
@button.buttonAndHandler(u'Create')
def handleApply(self, action):
data, errors = self.extractData()
if errors:
self.status = form.EditForm.formErrorsMessage
else:
self.create_iteration(action, data)
self.deactivate_iteration(action, data)
def projectsDict(context):
projects = {}
for p in find_projects():
obj = p.getObject()
bc = breadcrumbs(context,p)
projects[bc] = obj
return projects
def projectsVocab(context):
projects = projectsDict(context)
items = sorted(projects.items())
return vocabulary.SimpleVocabulary.fromItems(items)
def find_iterations():
site = getSite()
return site.portal_catalog(portal_type='iteration')
def find_projects():
site = getSite()
return site.portal_catalog(portal_type='project')
def startDate():
# start on first day of current month
now = datetime.datetime.now()
first_day = datetime.datetime(now.year, now.month, 1)
return first_day
def stopDate():
# stop in one month
now = datetime.datetime.now()
last_day = calendar.monthrange(now.year, now.month)[1]
return datetime.datetime(now.year, now.month, last_day)
directlyProvides(projectsVocab, IVocabularyFactory)
CreateIteration = wrap_form(CreateIterationForm)
| [
"plone@localhost.localdomain"
] | plone@localhost.localdomain |
c6b2508756ca483742fc681caa459b0b1fe7fff9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_wounder.py | b0d6cd04f474aad1a04a22fbffe4505de907b214 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._wound import _WOUND
#calss header
class _WOUNDER(_WOUND, ):
def __init__(self,):
_WOUND.__init__(self)
self.name = "WOUNDER"
self.specie = 'verbs'
self.basic = "wound"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
73b384d0b1fb7908b2521c4f150f93c076b807c7 | 10d17864a685c025bb77959545f74b797f1d6077 | /capitulo 07/07.22.py | d7274f669aa67ce98579dc96a66e3b781eec34c2 | [] | no_license | jcicerof/IntroducaoPython | 02178d2dfcaa014587edbd3090c517089ccef7c2 | 02e619c7c17e74acdc3268fbfae9ab624a3601dd | refs/heads/master | 2020-04-24T18:12:21.422079 | 2019-02-23T05:14:43 | 2019-02-23T05:14:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 07\07.22.py
# Descrição:
##############################################################################
"771".isdigit()
"10.4".isdigit()
"+10".isdigit()
"-5".isdigit()
| [
"jose.cicero@gmail.com"
] | jose.cicero@gmail.com |
c585072ddb9c6cccf258955047c633c5f013cc07 | 06c8b19c0b2e75271a8b866388a2b26b48abb02d | /codes/seeGlass.py | 18a7bcafd8cebb74972f12f8adb4de9ad4cb8372 | [] | no_license | FedeDavilaKurban/Multipoles | 7fe73d2df3864acdd4c20e7aa0670165d390fe84 | 0185242b2bcf0d36648d7513c3240fab17462bed | refs/heads/master | 2023-06-25T11:09:08.454284 | 2021-07-23T18:54:52 | 2021-07-23T18:54:52 | 252,025,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | from astropy.io import ascii
from astropy import table
from astropy.table import Table
from nbodykit.lab import *
import matplotlib.pyplot as plt
import numpy as np
###
#Visualize xy projection of one Glass
###
box=1500.
dtype=[('Position', ('f4', 3))]
gcat=BinaryCatalog('glass_001.bin',dtype,header_size=4)
gcat['Position']/=box
glass=Table(gcat.compute(gcat['Position']),names=['x','y','z'])
glass1=glass[np.where(glass['z']<.1)]
plt.scatter(glass1['x'],glass1['y'],s=.1)
plt.show()
box=1.
###
#Visualize PowSpec of one glass
###
#gcat['Position'] = transform.StackColumns(gcat['x'], gcat['y'], gcat['z'])
dk=7./box
dk=0.7
real_mesh = gcat.to_mesh(compensated=True, resampler='tsc', position='Position', BoxSize=[box,box,box],Nmesh=256)
r = FFTPower(real_mesh, mode='1d',dk=dk)
Pkglass = r.power
plt.loglog(Pkglass['k'], Pkglass['power'].real,label='Glass')
plt.show()
###
#Plot the Xi calculated using Glasses
###
names=['xi_0','xi_2','xi_4','xi_6']
nr = 2.5+np.linspace(5.,150.,30)[:-1]
nran = 8*87**3
xi_true = ascii.read('../data/xi_l_noran.txt',names=names)
for i in range(0,400,2):
g = ascii.read('../data/out/glass/xi_l_glass_{}_{}-{}.txt'.format(nran,i,i+1),names=names)
plt.plot(nr, g['xi_0'] )
#plt.plot(nr, g['xi_0']-xi_true['xi_0'])
plt.yscale('log')
plt.show()
| [
"fdavilakurban@unc.edu.ar"
] | fdavilakurban@unc.edu.ar |
ba7d89130eefa370fa05fd79edc72c2a5e486c2a | 23176ac222bcf5a4aa3188fd195b9f2eb8c05fc6 | /ML/run_once/dataset_authors.py | 2fccee98b41236ad6458fec3457ef5cc47ab0034 | [] | no_license | edwardj4747/Automated-Approach-to-Dataset-Labeling | c0682b831ecfca4105177d52da0594e80e55e9d2 | 7efa9badd39026b2c3b10cd0981c86085e0d7516 | refs/heads/master | 2023-04-13T03:00:43.197253 | 2021-05-01T23:57:22 | 2021-05-01T23:57:22 | 332,786,315 | 1 | 1 | null | 2021-04-07T01:38:22 | 2021-01-25T15:09:44 | Jupyter Notebook | UTF-8 | Python | false | false | 6,523 | py | from collections import defaultdict
import json
import glob
import re
'''
Create a dictionary mapping of dataset: [author(s) of the dataset] as based on the metadata files
'''
def dictionary_to_list(dataset_author_mapping, save=False):
seen = set()
unique_authors = []
for list_of_authors in dataset_author_mapping.values():
for author in list_of_authors:
author = re.sub(r'^Dr\. ', '', author) # remove the Dr. in a Dr. so-and-so
author = re.sub(r'(, PH\. ?D\.?)|(, PHD)', '', author)
author = author.lower()
if author not in seen:
unique_authors.append(author)
seen.add(author)
if save:
with open('../ml_data/author_keywords_list.json', 'w', encoding='utf-8') as f:
json.dump(unique_authors, f, indent=4)
def remove_known_non_authors(author_string):
not_useful = [
'(goddard )?laboratory for atmospheres at( +gsfc)?',
'copernicus sentinel data processed by esa, koninklijk nederls meteorologisch instituut \(knmi\)',
'copernicus sentinel data processed by esa, german aerospace center \(dlr\)',
'koninklijk nederls meteorologisch instituut \(knmi\)',
'koninklijk nederl,s meteorologisch instituut \(knmi\)',
'netherlands institute for space research \(sron\)',
'copernicus sentinel data processed by esa, rutherford appleton laboratory \(ral\)',
'copernicus sentinel data processed by esa, german aerospace center-institute for environmental research,university of bremen \(dlr_iup\)',
'copernicus sentinel data processed by esa',
'koninklijk nederlands meteorologisch instituut \(knmi\)',
'oco-2 science team',
'oco(-3)? science team',
'airs project',
'airs science team',
'university of michigan',
'global hydrology resource center',
'neespi data center project',
'goddard space flight center \(gsfc\)',
'gpm science team',
'tropical rainfall measuring mission \(trmm\)',
'tropical rainfall measuring mission \(trmm\)',
'global modeling',
'polder science team,cnes',
', assimilation office \(gmao\)',
'goddard space flight center',
'modis science team',
'oxford university( aopp)?',
'trmm science team',
'eos mls science team',
'goddard earth sciences data',
', information services center',
'ges disc northern eurasian earth science partnership initiative project',
'ncep,emc',
'omi science team',
'oxford university aopp',
'jet propulsion laboratory: ',
'goddard laboratory for atmospheres at gsfc',
'toms science team',
'goddard laboratory for atmospheres at nasa,gsfc',
'nasa,gsfc',
'msfc,nasa',
'ges_disc',
'at princeton university,',
'gsfc',
'nasa'
]
for nu in not_useful:
author_string = re.sub(rf'{nu},?', '', author_string)
return author_string
dataset_directory = 'C:/Users/edwar/Desktop/Publishing Internship/datasets'
output_file_name = 'dataset_to_miv.json'
output_file_location = '../ml_data/' + output_file_name
data = defaultdict(set)
for file in glob.glob(dataset_directory + "/*.json"):
print(file)
with open(file, errors='ignore') as f:
contents = json.load(f)
collection_citations = contents['CollectionCitations']
short_name = contents['CollectionCitations'][0]['SeriesName']
for cc in collection_citations:
creator = cc.get('Creator', '').lower()
nasa_string = 'NASA/GSFC/HSL'.lower()
creator = re.sub(rf'{nasa_string}', ',', creator)
creator = re.sub(r'vrije universiteit amsterdam \(richard de jeu\) and nasa gsfc \(manfred owe\)\.?',
'richard de jeu, manfred owe', creator)
creator = re.sub(r'usda agricultural research service \(wade crow\)', 'wade crow', creator)
creator = re.sub(
r'uw-madison space science and engineering center: hank revercomb; umbc atmospheric spectroscopy laboratory: larrabee strow',
'hank revercomb, larrabee strow', creator)
creator = re.sub(r'princeton university \(eric wood\)', 'eric wood', creator)
creator = re.sub(r'colorado state university \(christian kummerow\)', 'christian kummerow', creator)
creator = re.sub(r'airs science team \(joel susskind, nasa/gsfc\)', 'joel susskind', creator)
creator = re.sub(r'crow, wade \(usda ars\) k. tobin \(texas a,m iu\)', 'wade crow, k. tobin', creator)
creator = re.sub(r'/|;', ',', creator)
creator = re.sub(r'and ', ', ', creator)
creator = re.sub(r'&', ',', creator)
creator = re.sub(r'e\. ?g\. ?', '', creator)
creator = re.sub(r', et al\.', '', creator)
creator = re.sub(r'et al\.(, )?', ',', creator)
creator = re.sub(r',?dr\. ', '', creator)
creator = re.sub(r'\(\d{4}\)', '', creator)
creator = re.sub(r', phd', '', creator)
creator = re.sub(r'(\w) [a-z]\. (\w)', '\\1 \\2', creator) # middle initials
creator = remove_known_non_authors(creator)
# formatting the commas
creator = re.sub(r' {2,}', ' ', creator)
creator = re.sub(r' ,', ',', creator)
creator = re.sub(r', ?,', ',', creator)
creator = creator.strip()
if creator != '':
data[short_name].add(creator)
# dataset_name = contents['CollectionCitations'][0]['SeriesName']
# contact_person = contents['ContactPersons']
#
# authors = []
# for index, element in enumerate(contact_person): # will sometimes be a list
# if "Investigator" in element['Roles']:
# author_name = element['FirstName'] + " " + element['LastName']
# authors.append(author_name)
#
# data[dataset_name] += authors
#
# Remove duplicate entries
for key, all_authors in data.items():
seen = set()
unique = []
for item in all_authors:
if item not in seen:
unique.append(item)
seen.add(item)
data[key] = unique
with open('../ml_data/dataset_authors_v2.json', 'w', encoding='utf-8') as f:
json.dump(data, f, indent=4)
dictionary_to_list(data, save=True)
| [
"edward.jahoda@gmail.com"
] | edward.jahoda@gmail.com |
d5f8c770ce43647135663e3e1f2817e3f071c942 | 6a83bbff721594fd249b466c39b955feba41e13b | /baekjoon/3197.py | fcb885cac78715c7b6c258fb4bc888766ccc3f3c | [] | no_license | hololee/algorithm | 9b59aafc93e35c37dedcd7369ba05d8b38db3477 | ac337331ca4cd8132cc59d5e2164bfdc24573ef0 | refs/heads/master | 2023-07-02T05:33:09.915717 | 2021-08-04T08:59:05 | 2021-08-04T08:59:05 | 288,984,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,089 | py | # 200921
import sys
from collections import deque
class LakeMap(object):
def __init__(self, lake, row, col):
self.lake = lake
self.row = row
self.col = col
self.check_positions = [[0 for c in range(col)] for r in range(row)]
self.swan_position = []
self.melting_positions = set()
# 이웃들.
self.ice_in_path = set()
self._find_swan()
def _find_swan(self):
for i in range(self.row):
for j in range(self.col):
if self.lake[i][j] == 'L':
self.swan_position.append((i, j))
self.lake[i][j] = '.'
def reduce_area(self):
new_melting_positions = set()
# 타켓이 없는경우.
if not self.melting_positions:
for i in range(self.row):
for j in range(self.col):
# 얼음인 경우만 동작.
if self.lake[i][j] == '.':
# 4방향 체크.
if i > 0 and self.lake[i - 1][j] == 'X': # 상
new_melting_positions.add((i - 1, j))
if i < (self.row - 1) and self.lake[i + 1][j] == 'X': # 하
new_melting_positions.add((i + 1, j))
if j > 0 and self.lake[i][j - 1] == 'X': # 좌
new_melting_positions.add((i, j - 1))
if j < (self.col - 1) and self.lake[i][j + 1] == 'X': # 우
new_melting_positions.add((i, j + 1))
# 타켓 (전에 사라진 얼음들) 이 있는경우.
else:
for target in self.melting_positions:
i = target[0]
j = target[1]
# 4방향 체크.
if i > 0 and self.lake[i - 1][j] == 'X': # 상
new_melting_positions.add((i - 1, j))
if i < (self.row - 1) and self.lake[i + 1][j] == 'X': # 하
new_melting_positions.add((i + 1, j))
if j > 0 and self.lake[i][j - 1] == 'X': # 좌
new_melting_positions.add((i, j - 1))
if j < (self.col - 1) and self.lake[i][j + 1] == 'X': # 우
new_melting_positions.add((i, j + 1))
for pos in new_melting_positions:
self.lake[pos[0]][pos[1]] = '.'
self.melting_positions = new_melting_positions
def _visit_adj(self, moving_position, target_que):
# 상하좌우 이동 가능한 공간.
x, y = moving_position[0], moving_position[1]
if x > 0:
if self.lake[x - 1][y] == '.': # 상
if self.check_positions[x - 1][y] == 0:
self.check_positions[x - 1][y] = 1
target_que.append((x - 1, y))
else:
self.ice_in_path.add((x - 1, y))
if x < (self.row - 1):
if self.lake[x + 1][y] == '.': # 하
if self.check_positions[x + 1][y] == 0:
self.check_positions[x + 1][y] = 1
target_que.append((x + 1, y))
else:
self.ice_in_path.add((x + 1, y))
if y > 0:
if self.lake[x][y - 1] == '.': # 좌
if self.check_positions[x][y - 1] == 0:
self.check_positions[x][y - 1] = 1
target_que.append((x, y - 1))
else:
self.ice_in_path.add((x, y - 1))
if y < (self.col - 1):
if self.lake[x][y + 1] == '.': # 우
if self.check_positions[x][y + 1] == 0:
self.check_positions[x][y + 1] = 1
target_que.append((x, y + 1))
else:
self.ice_in_path.add((x, y + 1))
def is_reach(self):
target_que = deque()
if not self.ice_in_path:
target_que.append(self.swan_position[0])
self.check_positions[self.swan_position[0][0]][self.swan_position[0][1]] = 1
else:
# 기존 탐색시 만난 얼음들 부터 탐색.
for pos in self.ice_in_path:
target_que.append(pos)
self.check_positions[pos[0]][pos[1]] = 1
self.ice_in_path.clear()
# 모든 가능한 길 탐색
while target_que:
tar_pos = target_que.popleft()
self._visit_adj(tar_pos, target_que)
# 탐색 중 얼음으로 못갔던 부분을 따로 저장 나중에 해당 지점부터 다시 시작.
# 갔던 길에 다른 swan 이 포함 되는지 체크
if self.check_positions[self.swan_position[1][0]][self.swan_position[1][1]] == 1:
return True
return False
# Main start
R, C = list(map(int, sys.stdin.readline().split()))
lake = []
for i in range(R):
lake.append(list(sys.stdin.readline().replace('\n', '')))
meet = False
days = 0
lakeMap = LakeMap(lake, R, C)
while not lakeMap.is_reach():
days += 1
lakeMap.reduce_area()
print(days)
| [
"lccandol@naver.cm"
] | lccandol@naver.cm |
7d2d1b6103c5ea43b2cfe02b42cba943b94881a1 | f5f7af219cd9122a0b381dfa113234116d839011 | /202002发送微信信息.py | 453370022770619ed27f0700813b815a31d137c0 | [] | no_license | Victordpf/Victor- | fe03cef0ea74e00389e98731e6c47f5e36cbcbd4 | ccdf94ec119a4d169743196f0d04d075942a3ce5 | refs/heads/master | 2021-01-01T01:07:02.823296 | 2020-02-17T12:47:12 | 2020-02-17T12:47:12 | 239,112,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #! python3
"""说明:
1、tongxunlu.png图片必须用通讯录显示页最下面的图片;
2、2020春节已经测试过,可以用"""
import pyautogui,os,time,pyperclip
from PIL import Image
os.chdir('D:\\Python-Victor\\快速上手-用过的文件')
pyautogui.PAUSE=0.5
time.sleep(6)
text='拜年啦,拜年啦,鼠年来临之际,鹏飞祝您在鼠年:[此处有音乐]鹏程万里,飞黄腾达,身体健康,阖家安康,财运亨通,福星高照。'
pyperclip.copy(text)
#获取发消息焦点(由于会变动,要调用图片识别),'enter'发送信息
#获取通讯录焦点(图像识别)
tongxunlu=Image.open('tongxunlu.png')
locate1=pyautogui.locateOnScreen(tongxunlu)
center1=pyautogui.center(locate1)
#第一组发送
print(center1)
pyautogui.click(center1)
#以下为第一组发送特有的
renming=Image.open('renming.png')#这个图像为开始的图像,需要重新设置
#这一步是为了获取鼠标的焦点,也可以不用图像识别,直接设置鼠标坐标
#也可以把要单独发送信息的头像设置图像列表,只为个别人发送微信
locate3=pyautogui.locateOnScreen(renming)
center3=pyautogui.center(locate3)
print(center3)
pyautogui.click(center3)
#pyautogui.press('down')
#继续第一组发送
#这里的图像识别不能免除,因为图片的位置是不固定的
faxiaoxi=Image.open('faxiaoxi.png')
locate2=pyautogui.locateOnScreen(faxiaoxi)
center2=pyautogui.center(locate2)
print(center2)
pyautogui.click(center2)
pyautogui.hotkey('ctrl','v')
pyautogui.press('enter')
for i in range(942):#控制发送的人数,如果所有联系人共300,则设为300
#点击到通讯录
j=i+1
print('循环中第'+str(j)+'个')
pyautogui.click(center1)
pyautogui.click(center3)
#点击通讯录右边的姓名列,为了能够上下,自动选择灰色的姓名的地方
pyautogui.press('down')
#发送消息
#faxiaoxi=Image.open('faxiaoxi.png')
locate2=pyautogui.locateOnScreen(faxiaoxi)
center2=pyautogui.center(locate2)
pyautogui.click(center2)
pyautogui.hotkey('ctrl','v')
pyautogui.press('enter')
| [
"noreply@github.com"
] | Victordpf.noreply@github.com |
371af8cf202bc43ff32f5450e4bd1666cdb52069 | 8ec3db88a3463f1e8eb40af89966c8b093bad477 | /capston/Filter.py | 6afa20581cd1f6aab7bf4a082aa6f57273dec1df | [] | no_license | heyhobeach/capston | e2327619ce9a2a815829880d1dddb2caef31f288 | 5e733f91db2443bace4e962e77aa00a1b24b28df | refs/heads/main | 2023-03-14T09:07:36.392743 | 2021-03-10T05:57:52 | 2021-03-10T05:57:52 | 314,143,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | import Tkinter
import tkMessageBox
import os
import subprocess
import threading
import time
#################################################################################
def run_rosfilter():
subprocess.call("roslaunch turtlebot3_follow_filter turtlebot3_follow_filter.launch",shell=True)
#################################################################################
def okClick1():
run_rosfilter()
#################################################################################
root = Tkinter.Tk()
label=Tkinter.Label(root,text="BigDate 2.0")
label.pack()
root.geometry("200x200")
#################################################################################
button1 = Tkinter.Button(root,text="Move ON",width=20,height=13,command=okClick1)
button1.pack()
#################################################################################
root.mainloop()
| [
"noreply@github.com"
] | heyhobeach.noreply@github.com |
7173df93961f6ef1116d2a6295643f7aac0f90d7 | 9b77c449497b776f28c63305f1bd24617ebc1d9b | /rango/migrations/0004_auto_20170515_1130.py | 791f4d0287a95e4db1bb4ca2c4106d345992f797 | [] | no_license | XOyarz/tango_with_django_project | 0c0416ad5188b1eeec90232feb6f7f6fa21b55fe | 42e32403f1710a8400bd15bbd06cd76f511b9bd0 | refs/heads/master | 2020-12-30T13:39:49.415073 | 2017-05-20T12:41:12 | 2017-05-20T12:41:12 | 91,241,170 | 0 | 0 | null | 2017-05-17T13:50:25 | 2017-05-14T12:15:11 | Python | UTF-8 | Python | false | false | 624 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-05-15 09:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0003_category_likes'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
| [
"xavier982@hotmai.com"
] | xavier982@hotmai.com |
62054d2b77f9ee5d2bf2a7bd93ae6a176ff1e728 | c87303cbb6eb1e511fa0776e2ecd076385dfe86f | /kivymd/icon_definitions.py | 39c9abb5a1909acd83d31f546b027dd8e07b6e37 | [] | no_license | barry-2710/youtube-downloader | 52bf3e6a29fd9db93165671fad6cfe82fbeb70c0 | 360a38393500d193f5d32f0945bc127650a9cf9c | refs/heads/main | 2023-02-07T21:52:39.687488 | 2020-12-30T06:56:23 | 2020-12-30T06:56:23 | 325,478,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134,445 | py | """
Icon Definitions
================
Copyright (c) 2015 Andrés Rodríguez and KivyMD contributors -
KivyMD library up to version 0.1.2
Copyright (c) 2019 Ivanov Yuri and KivyMD contributors -
KivyMD library version 0.1.3 and higher
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
These expanded material design icons are maintained by Austin Andrews
(Templarian on Github). They can be found at materialdesignicons.com.
LAST UPDATED: Version 4.1.95
For those updating from an older version of KivyMD, icon names have
changed/updated (e.g. square-o is now checkbox-blank-outline). The
previous gallery for comparison (if you need to find the icon using
the previous name is version 2.2.0 located at
https://zavoloklom.github.io/material-design-iconic-font/icons.html
"""
md_icons = {
"ab-testing": "\U000F001C",
"access-point": "\uF002",
"access-point-network": "\uF003",
"access-point-network-off": "\uFBBD",
"account": "\uF004",
"account-alert": "\uF005",
"account-alert-outline": "\uFB2C",
"account-arrow-left": "\uFB2D",
"account-arrow-left-outline": "\uFB2E",
"account-arrow-right": "\uFB2F",
"account-arrow-right-outline": "\uFB30",
"account-badge": "\uFD83",
"account-badge-alert": "\uFD84",
"account-badge-alert-outline": "\uFD85",
"account-badge-horizontal": "\uFDF0",
"account-badge-horizontal-outline": "\uFDF1",
"account-badge-outline": "\uFD86",
"account-box": "\uF006",
"account-box-multiple": "\uF933",
"account-box-multiple-outline": "\U000F002C",
"account-box-outline": "\uF007",
"account-card-details": "\uF5D2",
"account-card-details-outline": "\uFD87",
"account-check": "\uF008",
"account-check-outline": "\uFBBE",
"account-child": "\uFA88",
"account-child-circle": "\uFA89",
"account-circle": "\uF009",
"account-circle-outline": "\uFB31",
"account-clock": "\uFB32",
"account-clock-outline": "\uFB33",
"account-convert": "\uF00A",
"account-details": "\uF631",
"account-edit": "\uF6BB",
"account-edit-outline": "\U000F001D",
"account-group": "\uF848",
"account-group-outline": "\uFB34",
"account-heart": "\uF898",
"account-heart-outline": "\uFBBF",
"account-key": "\uF00B",
"account-key-outline": "\uFBC0",
"account-minus": "\uF00D",
"account-minus-outline": "\uFAEB",
"account-multiple": "\uF00E",
"account-multiple-check": "\uF8C4",
"account-multiple-minus": "\uF5D3",
"account-multiple-minus-outline": "\uFBC1",
"account-multiple-outline": "\uF00F",
"account-multiple-plus": "\uF010",
"account-multiple-plus-outline": "\uF7FF",
"account-network": "\uF011",
"account-network-outline": "\uFBC2",
"account-off": "\uF012",
"account-off-outline": "\uFBC3",
"account-outline": "\uF013",
"account-plus": "\uF014",
"account-plus-outline": "\uF800",
"account-question": "\uFB35",
"account-question-outline": "\uFB36",
"account-remove": "\uF015",
"account-remove-outline": "\uFAEC",
"account-search": "\uF016",
"account-search-outline": "\uF934",
"account-settings": "\uF630",
"account-star": "\uF017",
"account-star-outline": "\uFBC4",
"account-supervisor": "\uFA8A",
"account-supervisor-circle": "\uFA8B",
"account-switch": "\uF019",
"account-tie": "\uFCBF",
"accusoft": "\uF849",
"adchoices": "\uFD1E",
"adjust": "\uF01A",
"adobe": "\uF935",
"adobe-acrobat": "\uFFBD",
"air-conditioner": "\uF01B",
"air-filter": "\uFD1F",
"air-horn": "\uFD88",
"air-purifier": "\uFD20",
"airbag": "\uFBC5",
"airballoon": "\uF01C",
"airballoon-outline": "\U000F002D",
"airplane": "\uF01D",
"airplane-landing": "\uF5D4",
"airplane-off": "\uF01E",
"airplane-takeoff": "\uF5D5",
"airplay": "\uF01F",
"airport": "\uF84A",
"alarm": "\uF020",
"alarm-bell": "\uF78D",
"alarm-check": "\uF021",
"alarm-light": "\uF78E",
"alarm-light-outline": "\uFBC6",
"alarm-multiple": "\uF022",
"alarm-note": "\uFE8E",
"alarm-note-off": "\uFE8F",
"alarm-off": "\uF023",
"alarm-plus": "\uF024",
"alarm-snooze": "\uF68D",
"album": "\uF025",
"alert": "\uF026",
"alert-box": "\uF027",
"alert-box-outline": "\uFCC0",
"alert-circle": "\uF028",
"alert-circle-outline": "\uF5D6",
"alert-decagram": "\uF6BC",
"alert-decagram-outline": "\uFCC1",
"alert-octagon": "\uF029",
"alert-octagon-outline": "\uFCC2",
"alert-octagram": "\uF766",
"alert-octagram-outline": "\uFCC3",
"alert-outline": "\uF02A",
"alien": "\uF899",
"all-inclusive": "\uF6BD",
"alpha": "\uF02B",
"alpha-a": "\u0041",
"alpha-a-box": "\uFAED",
"alpha-a-box-outline": "\uFBC7",
"alpha-a-circle": "\uFBC8",
"alpha-a-circle-outline": "\uFBC9",
"alpha-b": "\u0042",
"alpha-b-box": "\uFAEE",
"alpha-b-box-outline": "\uFBCA",
"alpha-b-circle": "\uFBCB",
"alpha-b-circle-outline": "\uFBCC",
"alpha-c": "\u0043",
"alpha-c-box": "\uFAEF",
"alpha-c-box-outline": "\uFBCD",
"alpha-c-circle": "\uFBCE",
"alpha-c-circle-outline": "\uFBCF",
"alpha-d": "\u0044",
"alpha-d-box": "\uFAF0",
"alpha-d-box-outline": "\uFBD0",
"alpha-d-circle": "\uFBD1",
"alpha-d-circle-outline": "\uFBD2",
"alpha-e": "\u0045",
"alpha-e-box": "\uFAF1",
"alpha-e-box-outline": "\uFBD3",
"alpha-e-circle": "\uFBD4",
"alpha-e-circle-outline": "\uFBD5",
"alpha-f": "\u0046",
"alpha-f-box": "\uFAF2",
"alpha-f-box-outline": "\uFBD6",
"alpha-f-circle": "\uFBD7",
"alpha-f-circle-outline": "\uFBD8",
"alpha-g": "\u0047",
"alpha-g-box": "\uFAF3",
"alpha-g-box-outline": "\uFBD9",
"alpha-g-circle": "\uFBDA",
"alpha-g-circle-outline": "\uFBDB",
"alpha-h": "\u0048",
"alpha-h-box": "\uFAF4",
"alpha-h-box-outline": "\uFBDC",
"alpha-h-circle": "\uFBDD",
"alpha-h-circle-outline": "\uFBDE",
"alpha-i": "\u0049",
"alpha-i-box": "\uFAF5",
"alpha-i-box-outline": "\uFBDF",
"alpha-i-circle": "\uFBE0",
"alpha-i-circle-outline": "\uFBE1",
"alpha-j": "\u004A",
"alpha-j-box": "\uFAF6",
"alpha-j-box-outline": "\uFBE2",
"alpha-j-circle": "\uFBE3",
"alpha-j-circle-outline": "\uFBE4",
"alpha-k": "\u004B",
"alpha-k-box": "\uFAF7",
"alpha-k-box-outline": "\uFBE5",
"alpha-k-circle": "\uFBE6",
"alpha-k-circle-outline": "\uFBE7",
"alpha-l": "\u004C",
"alpha-l-box": "\uFAF8",
"alpha-l-box-outline": "\uFBE8",
"alpha-l-circle": "\uFBE9",
"alpha-l-circle-outline": "\uFBEA",
"alpha-m": "\u004D",
"alpha-m-box": "\uFAF9",
"alpha-m-box-outline": "\uFBEB",
"alpha-m-circle": "\uFBEC",
"alpha-m-circle-outline": "\uFBED",
"alpha-n": "\u004E",
"alpha-n-box": "\uFAFA",
"alpha-n-box-outline": "\uFBEE",
"alpha-n-circle": "\uFBEF",
"alpha-n-circle-outline": "\uFBF0",
"alpha-o": "\u004F",
"alpha-o-box": "\uFAFB",
"alpha-o-box-outline": "\uFBF1",
"alpha-o-circle": "\uFBF2",
"alpha-o-circle-outline": "\uFBF3",
"alpha-p": "\u0050",
"alpha-p-box": "\uFAFC",
"alpha-p-box-outline": "\uFBF4",
"alpha-p-circle": "\uFBF5",
"alpha-p-circle-outline": "\uFBF6",
"alpha-q": "\u0051",
"alpha-q-box": "\uFAFD",
"alpha-q-box-outline": "\uFBF7",
"alpha-q-circle": "\uFBF8",
"alpha-q-circle-outline": "\uFBF9",
"alpha-r": "\u0052",
"alpha-r-box": "\uFAFE",
"alpha-r-box-outline": "\uFBFA",
"alpha-r-circle": "\uFBFB",
"alpha-r-circle-outline": "\uFBFC",
"alpha-s": "\u0053",
"alpha-s-box": "\uFAFF",
"alpha-s-box-outline": "\uFBFD",
"alpha-s-circle": "\uFBFE",
"alpha-s-circle-outline": "\uFBFF",
"alpha-t": "\u0054",
"alpha-t-box": "\uFB00",
"alpha-t-box-outline": "\uFC00",
"alpha-t-circle": "\uFC01",
"alpha-t-circle-outline": "\uFC02",
"alpha-u": "\u0055",
"alpha-u-box": "\uFB01",
"alpha-u-box-outline": "\uFC03",
"alpha-u-circle": "\uFC04",
"alpha-u-circle-outline": "\uFC05",
"alpha-v": "\u0056",
"alpha-v-box": "\uFB02",
"alpha-v-box-outline": "\uFC06",
"alpha-v-circle": "\uFC07",
"alpha-v-circle-outline": "\uFC08",
"alpha-w": "\u0057",
"alpha-w-box": "\uFB03",
"alpha-w-box-outline": "\uFC09",
"alpha-w-circle": "\uFC0A",
"alpha-w-circle-outline": "\uFC0B",
"alpha-x": "\u0058",
"alpha-x-box": "\uFB04",
"alpha-x-box-outline": "\uFC0C",
"alpha-x-circle": "\uFC0D",
"alpha-x-circle-outline": "\uFC0E",
"alpha-y": "\u0059",
"alpha-y-box": "\uFB05",
"alpha-y-box-outline": "\uFC0F",
"alpha-y-circle": "\uFC10",
"alpha-y-circle-outline": "\uFC11",
"alpha-z": "\u005A",
"alpha-z-box": "\uFB06",
"alpha-z-box-outline": "\uFC12",
"alpha-z-circle": "\uFC13",
"alpha-z-circle-outline": "\uFC14",
"alphabetical": "\uF02C",
"alphabetical-off": "\U000F002E",
"alphabetical-variant": "\U000F002F",
"alphabetical-variant-off": "\U000F0030",
"altimeter": "\uF5D7",
"amazon": "\uF02D",
"amazon-alexa": "\uF8C5",
"amazon-drive": "\uF02E",
"ambulance": "\uF02F",
"ammunition": "\uFCC4",
"ampersand": "\uFA8C",
"amplifier": "\uF030",
"anchor": "\uF031",
"android": "\uF032",
"android-auto": "\uFA8D",
"android-debug-bridge": "\uF033",
"android-head": "\uF78F",
"android-messages": "\uFD21",
"android-studio": "\uF034",
"angle-acute": "\uF936",
"angle-obtuse": "\uF937",
"angle-right": "\uF938",
"angular": "\uF6B1",
"angularjs": "\uF6BE",
"animation": "\uF5D8",
"animation-outline": "\uFA8E",
"animation-play": "\uF939",
"animation-play-outline": "\uFA8F",
"anvil": "\uF89A",
"apache-kafka": "\U000F0031",
"apple": "\uF035",
"apple-finder": "\uF036",
"apple-icloud": "\uF038",
"apple-ios": "\uF037",
"apple-keyboard-caps": "\uF632",
"apple-keyboard-command": "\uF633",
"apple-keyboard-control": "\uF634",
"apple-keyboard-option": "\uF635",
"apple-keyboard-shift": "\uF636",
"apple-safari": "\uF039",
"application": "\uF614",
"application-export": "\uFD89",
"application-import": "\uFD8A",
"approximately-equal": "\uFFBE",
"approximately-equal-box": "\uFFBF",
"apps": "\uF03B",
"apps-box": "\uFD22",
"arch": "\uF8C6",
"archive": "\uF03C",
"arrange-bring-forward": "\uF03D",
"arrange-bring-to-front": "\uF03E",
"arrange-send-backward": "\uF03F",
"arrange-send-to-back": "\uF040",
"arrow-all": "\uF041",
"arrow-bottom-left": "\uF042",
"arrow-bottom-left-bold-outline": "\uF9B6",
"arrow-bottom-left-thick": "\uF9B7",
"arrow-bottom-right": "\uF043",
"arrow-bottom-right-bold-outline": "\uF9B8",
"arrow-bottom-right-thick": "\uF9B9",
"arrow-collapse": "\uF615",
"arrow-collapse-all": "\uF044",
"arrow-collapse-down": "\uF791",
"arrow-collapse-horizontal": "\uF84B",
"arrow-collapse-left": "\uF792",
"arrow-collapse-right": "\uF793",
"arrow-collapse-up": "\uF794",
"arrow-collapse-vertical": "\uF84C",
"arrow-decision": "\uF9BA",
"arrow-decision-auto": "\uF9BB",
"arrow-decision-auto-outline": "\uF9BC",
"arrow-decision-outline": "\uF9BD",
"arrow-down": "\uF045",
"arrow-down-bold": "\uF72D",
"arrow-down-bold-box": "\uF72E",
"arrow-down-bold-box-outline": "\uF72F",
"arrow-down-bold-circle": "\uF047",
"arrow-down-bold-circle-outline": "\uF048",
"arrow-down-bold-hexagon-outline": "\uF049",
"arrow-down-bold-outline": "\uF9BE",
"arrow-down-box": "\uF6BF",
"arrow-down-circle": "\uFCB7",
"arrow-down-circle-outline": "\uFCB8",
"arrow-down-drop-circle": "\uF04A",
"arrow-down-drop-circle-outline": "\uF04B",
"arrow-down-thick": "\uF046",
"arrow-expand": "\uF616",
"arrow-expand-all": "\uF04C",
"arrow-expand-down": "\uF795",
"arrow-expand-horizontal": "\uF84D",
"arrow-expand-left": "\uF796",
"arrow-expand-right": "\uF797",
"arrow-expand-up": "\uF798",
"arrow-expand-vertical": "\uF84E",
"arrow-left": "\uF04D",
"arrow-left-bold": "\uF730",
"arrow-left-bold-box": "\uF731",
"arrow-left-bold-box-outline": "\uF732",
"arrow-left-bold-circle": "\uF04F",
"arrow-left-bold-circle-outline": "\uF050",
"arrow-left-bold-hexagon-outline": "\uF051",
"arrow-left-bold-outline": "\uF9BF",
"arrow-left-box": "\uF6C0",
"arrow-left-circle": "\uFCB9",
"arrow-left-circle-outline": "\uFCBA",
"arrow-left-drop-circle": "\uF052",
"arrow-left-drop-circle-outline": "\uF053",
"arrow-left-right": "\uFE90",
"arrow-left-right-bold": "\uFE91",
"arrow-left-right-bold-outline": "\uF9C0",
"arrow-left-thick": "\uF04E",
"arrow-right": "\uF054",
"arrow-right-bold": "\uF733",
"arrow-right-bold-box": "\uF734",
"arrow-right-bold-box-outline": "\uF735",
"arrow-right-bold-circle": "\uF056",
"arrow-right-bold-circle-outline": "\uF057",
"arrow-right-bold-hexagon-outline": "\uF058",
"arrow-right-bold-outline": "\uF9C1",
"arrow-right-box": "\uF6C1",
"arrow-right-circle": "\uFCBB",
"arrow-right-circle-outline": "\uFCBC",
"arrow-right-drop-circle": "\uF059",
"arrow-right-drop-circle-outline": "\uF05A",
"arrow-right-thick": "\uF055",
"arrow-split-horizontal": "\uF93A",
"arrow-split-vertical": "\uF93B",
"arrow-top-left": "\uF05B",
"arrow-top-left-bold-outline": "\uF9C2",
"arrow-top-left-bottom-right": "\uFE92",
"arrow-top-left-bottom-right-bold": "\uFE93",
"arrow-top-left-thick": "\uF9C3",
"arrow-top-right": "\uF05C",
"arrow-top-right-bold-outline": "\uF9C4",
"arrow-top-right-bottom-left": "\uFE94",
"arrow-top-right-bottom-left-bold": "\uFE95",
"arrow-top-right-thick": "\uF9C5",
"arrow-up": "\uF05D",
"arrow-up-bold": "\uF736",
"arrow-up-bold-box": "\uF737",
"arrow-up-bold-box-outline": "\uF738",
"arrow-up-bold-circle": "\uF05F",
"arrow-up-bold-circle-outline": "\uF060",
"arrow-up-bold-hexagon-outline": "\uF061",
"arrow-up-bold-outline": "\uF9C6",
"arrow-up-box": "\uF6C2",
"arrow-up-circle": "\uFCBD",
"arrow-up-circle-outline": "\uFCBE",
"arrow-up-down": "\uFE96",
"arrow-up-down-bold": "\uFE97",
"arrow-up-down-bold-outline": "\uF9C7",
"arrow-up-drop-circle": "\uF062",
"arrow-up-drop-circle-outline": "\uF063",
"arrow-up-thick": "\uF05E",
"artist": "\uF802",
"artist-outline": "\uFCC5",
"artstation": "\uFB37",
"aspect-ratio": "\uFA23",
"assistant": "\uF064",
"asterisk": "\uF6C3",
"at": "\uF065",
"atlassian": "\uF803",
"atm": "\uFD23",
"atom": "\uF767",
"atom-variant": "\uFE98",
"attachment": "\uF066",
"audio-video": "\uF93C",
"audiobook": "\uF067",
"augmented-reality": "\uF84F",
"auto-fix": "\uF068",
"auto-upload": "\uF069",
"autorenew": "\uF06A",
"av-timer": "\uF06B",
"aws": "\uFDF2",
"axe": "\uF8C7",
"axis": "\uFD24",
"axis-arrow": "\uFD25",
"axis-arrow-lock": "\uFD26",
"axis-lock": "\uFD27",
"axis-x-arrow": "\uFD28",
"axis-x-arrow-lock": "\uFD29",
"axis-x-rotate-clockwise": "\uFD2A",
"axis-x-rotate-counterclockwise": "\uFD2B",
"axis-x-y-arrow-lock": "\uFD2C",
"axis-y-arrow": "\uFD2D",
"axis-y-arrow-lock": "\uFD2E",
"axis-y-rotate-clockwise": "\uFD2F",
"axis-y-rotate-counterclockwise": "\uFD30",
"axis-z-arrow": "\uFD31",
"axis-z-arrow-lock": "\uFD32",
"axis-z-rotate-clockwise": "\uFD33",
"axis-z-rotate-counterclockwise": "\uFD34",
"azure": "\uF804",
"babel": "\uFA24",
"baby": "\uF06C",
"baby-bottle": "\uFF56",
"baby-bottle-outline": "\uFF57",
"baby-carriage": "\uF68E",
"baby-carriage-off": "\uFFC0",
"baby-face": "\uFE99",
"baby-face-outline": "\uFE9A",
"backburger": "\uF06D",
"backspace": "\uF06E",
"backspace-outline": "\uFB38",
"backspace-reverse": "\uFE9B",
"backspace-reverse-outline": "\uFE9C",
"backup-restore": "\uF06F",
"bacteria": "\uFEF2",
"bacteria-outline": "\uFEF3",
"badminton": "\uF850",
"bag-carry-on": "\uFF58",
"bag-carry-on-check": "\uFD41",
"bag-carry-on-off": "\uFF59",
"bag-checked": "\uFF5A",
"bag-personal": "\uFDF3",
"bag-personal-off": "\uFDF4",
"bag-personal-off-outline": "\uFDF5",
"bag-personal-outline": "\uFDF6",
"baguette": "\uFF5B",
"balloon": "\uFA25",
"ballot": "\uF9C8",
"ballot-outline": "\uF9C9",
"ballot-recount": "\uFC15",
"ballot-recount-outline": "\uFC16",
"bandage": "\uFD8B",
"bandcamp": "\uF674",
"bank": "\uF070",
"bank-minus": "\uFD8C",
"bank-outline": "\uFE9D",
"bank-plus": "\uFD8D",
"bank-remove": "\uFD8E",
"bank-transfer": "\uFA26",
"bank-transfer-in": "\uFA27",
"bank-transfer-out": "\uFA28",
"barcode": "\uF071",
"barcode-scan": "\uF072",
"barley": "\uF073",
"barley-off": "\uFB39",
"barn": "\uFB3A",
"barrel": "\uF074",
"baseball": "\uF851",
"baseball-bat": "\uF852",
"basecamp": "\uF075",
"basket": "\uF076",
"basket-fill": "\uF077",
"basket-unfill": "\uF078",
"basketball": "\uF805",
"basketball-hoop": "\uFC17",
"basketball-hoop-outline": "\uFC18",
"bat": "\uFB3B",
"battery": "\uF079",
"battery-10": "\uF07A",
"battery-10-bluetooth": "\uF93D",
"battery-20": "\uF07B",
"battery-20-bluetooth": "\uF93E",
"battery-30": "\uF07C",
"battery-30-bluetooth": "\uF93F",
"battery-40": "\uF07D",
"battery-40-bluetooth": "\uF940",
"battery-50": "\uF07E",
"battery-50-bluetooth": "\uF941",
"battery-60": "\uF07F",
"battery-60-bluetooth": "\uF942",
"battery-70": "\uF080",
"battery-70-bluetooth": "\uF943",
"battery-80": "\uF081",
"battery-80-bluetooth": "\uF944",
"battery-90": "\uF082",
"battery-90-bluetooth": "\uF945",
"battery-alert": "\uF083",
"battery-alert-bluetooth": "\uF946",
"battery-bluetooth": "\uF947",
"battery-bluetooth-variant": "\uF948",
"battery-charging": "\uF084",
"battery-charging-10": "\uF89B",
"battery-charging-100": "\uF085",
"battery-charging-20": "\uF086",
"battery-charging-30": "\uF087",
"battery-charging-40": "\uF088",
"battery-charging-50": "\uF89C",
"battery-charging-60": "\uF089",
"battery-charging-70": "\uF89D",
"battery-charging-80": "\uF08A",
"battery-charging-90": "\uF08B",
"battery-charging-outline": "\uF89E",
"battery-charging-wireless": "\uF806",
"battery-charging-wireless-10": "\uF807",
"battery-charging-wireless-20": "\uF808",
"battery-charging-wireless-30": "\uF809",
"battery-charging-wireless-40": "\uF80A",
"battery-charging-wireless-50": "\uF80B",
"battery-charging-wireless-60": "\uF80C",
"battery-charging-wireless-70": "\uF80D",
"battery-charging-wireless-80": "\uF80E",
"battery-charging-wireless-90": "\uF80F",
"battery-charging-wireless-alert": "\uF810",
"battery-charging-wireless-outline": "\uF811",
"battery-minus": "\uF08C",
"battery-negative": "\uF08D",
"battery-outline": "\uF08E",
"battery-plus": "\uF08F",
"battery-positive": "\uF090",
"battery-unknown": "\uF091",
"battery-unknown-bluetooth": "\uF949",
"battlenet": "\uFB3C",
"beach": "\uF092",
"beaker": "\uFCC6",
"beaker-outline": "\uF68F",
"beats": "\uF097",
"bed-empty": "\uF89F",
"bee": "\uFFC1",
"bee-flower": "\uFFC2",
"beer": "\uF098",
"behance": "\uF099",
"bell": "\uF09A",
"bell-alert": "\uFD35",
"bell-alert-outline": "\uFE9E",
"bell-circle": "\uFD36",
"bell-circle-outline": "\uFD37",
"bell-off": "\uF09B",
"bell-off-outline": "\uFA90",
"bell-outline": "\uF09C",
"bell-plus": "\uF09D",
"bell-plus-outline": "\uFA91",
"bell-ring": "\uF09E",
"bell-ring-outline": "\uF09F",
"bell-sleep": "\uF0A0",
"bell-sleep-outline": "\uFA92",
"beta": "\uF0A1",
"betamax": "\uF9CA",
"biathlon": "\uFDF7",
"bible": "\uF0A2",
"bike": "\uF0A3",
"billboard": "\U000F0032",
"billiards": "\uFB3D",
"billiards-rack": "\uFB3E",
"bing": "\uF0A4",
"binoculars": "\uF0A5",
"bio": "\uF0A6",
"biohazard": "\uF0A7",
"bitbucket": "\uF0A8",
"bitcoin": "\uF812",
"black-mesa": "\uF0A9",
"blackberry": "\uF0AA",
"blender": "\uFCC7",
"blender-software": "\uF0AB",
"blinds": "\uF0AC",
"blinds-open": "\U000F0033",
"block-helper": "\uF0AD",
"blogger": "\uF0AE",
"blood-bag": "\uFCC8",
"bluetooth": "\uF0AF",
"bluetooth-audio": "\uF0B0",
"bluetooth-connect": "\uF0B1",
"bluetooth-off": "\uF0B2",
"bluetooth-settings": "\uF0B3",
"bluetooth-transfer": "\uF0B4",
"blur": "\uF0B5",
"blur-linear": "\uF0B6",
"blur-off": "\uF0B7",
"blur-radial": "\uF0B8",
"bolnisi-cross": "\uFCC9",
"bolt": "\uFD8F",
"bomb": "\uF690",
"bomb-off": "\uF6C4",
"bone": "\uF0B9",
"book": "\uF0BA",
"book-lock": "\uF799",
"book-lock-open": "\uF79A",
"book-minus": "\uF5D9",
"book-minus-multiple": "\uFA93",
"book-multiple": "\uF0BB",
"book-open": "\uF0BD",
"book-open-outline": "\uFB3F",
"book-open-page-variant": "\uF5DA",
"book-open-variant": "\uF0BE",
"book-outline": "\uFB40",
"book-play": "\uFE9F",
"book-play-outline": "\uFEA0",
"book-plus": "\uF5DB",
"book-plus-multiple": "\uFA94",
"book-remove": "\uFA96",
"book-remove-multiple": "\uFA95",
"book-search": "\uFEA1",
"book-search-outline": "\uFEA2",
"book-variant": "\uF0BF",
"book-variant-multiple": "\uF0BC",
"bookmark": "\uF0C0",
"bookmark-check": "\uF0C1",
"bookmark-minus": "\uF9CB",
"bookmark-minus-outline": "\uF9CC",
"bookmark-multiple": "\uFDF8",
"bookmark-multiple-outline": "\uFDF9",
"bookmark-music": "\uF0C2",
"bookmark-off": "\uF9CD",
"bookmark-off-outline": "\uF9CE",
"bookmark-outline": "\uF0C3",
"bookmark-plus": "\uF0C5",
"bookmark-plus-outline": "\uF0C4",
"bookmark-remove": "\uF0C6",
"boom-gate": "\uFEA3",
"boom-gate-alert": "\uFEA4",
"boom-gate-alert-outline": "\uFEA5",
"boom-gate-down": "\uFEA6",
"boom-gate-down-outline": "\uFEA7",
"boom-gate-outline": "\uFEA8",
"boom-gate-up": "\uFEA9",
"boom-gate-up-outline": "\uFEAA",
"boombox": "\uF5DC",
"bootstrap": "\uF6C5",
"border-all": "\uF0C7",
"border-all-variant": "\uF8A0",
"border-bottom": "\uF0C8",
"border-bottom-variant": "\uF8A1",
"border-color": "\uF0C9",
"border-horizontal": "\uF0CA",
"border-inside": "\uF0CB",
"border-left": "\uF0CC",
"border-left-variant": "\uF8A2",
"border-none": "\uF0CD",
"border-none-variant": "\uF8A3",
"border-outside": "\uF0CE",
"border-right": "\uF0CF",
"border-right-variant": "\uF8A4",
"border-style": "\uF0D0",
"border-top": "\uF0D1",
"border-top-variant": "\uF8A5",
"border-vertical": "\uF0D2",
"bottle-wine": "\uF853",
"bow-tie": "\uF677",
"bowl": "\uF617",
"bowling": "\uF0D3",
"box": "\uF0D4",
"box-cutter": "\uF0D5",
"box-shadow": "\uF637",
"boxing-glove": "\uFB41",
"braille": "\uF9CF",
"brain": "\uF9D0",
"bread-slice": "\uFCCA",
"bread-slice-outline": "\uFCCB",
"bridge": "\uF618",
"briefcase": "\uF0D6",
"briefcase-account": "\uFCCC",
"briefcase-account-outline": "\uFCCD",
"briefcase-check": "\uF0D7",
"briefcase-download": "\uF0D8",
"briefcase-download-outline": "\uFC19",
"briefcase-edit": "\uFA97",
"briefcase-edit-outline": "\uFC1A",
"briefcase-minus": "\uFA29",
"briefcase-minus-outline": "\uFC1B",
"briefcase-outline": "\uF813",
"briefcase-plus": "\uFA2A",
"briefcase-plus-outline": "\uFC1C",
"briefcase-remove": "\uFA2B",
"briefcase-remove-outline": "\uFC1D",
"briefcase-search": "\uFA2C",
"briefcase-search-outline": "\uFC1E",
"briefcase-upload": "\uF0D9",
"briefcase-upload-outline": "\uFC1F",
"brightness-1": "\uF0DA",
"brightness-2": "\uF0DB",
"brightness-3": "\uF0DC",
"brightness-4": "\uF0DD",
"brightness-5": "\uF0DE",
"brightness-6": "\uF0DF",
"brightness-7": "\uF0E0",
"brightness-auto": "\uF0E1",
"brightness-percent": "\uFCCE",
"broom": "\uF0E2",
"brush": "\uF0E3",
"buddhism": "\uF94A",
"buffer": "\uF619",
"bug": "\uF0E4",
"bug-check": "\uFA2D",
"bug-check-outline": "\uFA2E",
"bug-outline": "\uFA2F",
"bugle": "\uFD90",
"bulldozer": "\uFB07",
"bullet": "\uFCCF",
"bulletin-board": "\uF0E5",
"bullhorn": "\uF0E6",
"bullhorn-outline": "\uFB08",
"bullseye": "\uF5DD",
"bullseye-arrow": "\uF8C8",
"bus": "\uF0E7",
"bus-alert": "\uFA98",
"bus-articulated-end": "\uF79B",
"bus-articulated-front": "\uF79C",
"bus-clock": "\uF8C9",
"bus-double-decker": "\uF79D",
"bus-multiple": "\uFF5C",
"bus-school": "\uF79E",
"bus-side": "\uF79F",
"bus-stop": "\U000F0034",
"bus-stop-covered": "\U000F0035",
"bus-stop-uncovered": "\U000F0036",
"cached": "\uF0E8",
"cactus": "\uFD91",
"cake": "\uF0E9",
"cake-layered": "\uF0EA",
"cake-variant": "\uF0EB",
"calculator": "\uF0EC",
"calculator-variant": "\uFA99",
"calendar": "\uF0ED",
"calendar-account": "\uFEF4",
"calendar-account-outline": "\uFEF5",
"calendar-alert": "\uFA30",
"calendar-blank": "\uF0EE",
"calendar-blank-outline": "\uFB42",
"calendar-check": "\uF0EF",
"calendar-check-outline": "\uFC20",
"calendar-clock": "\uF0F0",
"calendar-edit": "\uF8A6",
"calendar-export": "\uFB09",
"calendar-heart": "\uF9D1",
"calendar-import": "\uFB0A",
"calendar-minus": "\uFD38",
"calendar-month": "\uFDFA",
"calendar-month-outline": "\uFDFB",
"calendar-multiple": "\uF0F1",
"calendar-multiple-check": "\uF0F2",
"calendar-multiselect": "\uFA31",
"calendar-outline": "\uFB43",
"calendar-plus": "\uF0F3",
"calendar-question": "\uF691",
"calendar-range": "\uF678",
"calendar-range-outline": "\uFB44",
"calendar-remove": "\uF0F4",
"calendar-remove-outline": "\uFC21",
"calendar-repeat": "\uFEAB",
"calendar-repeat-outline": "\uFEAC",
"calendar-search": "\uF94B",
"calendar-star": "\uF9D2",
"calendar-text": "\uF0F5",
"calendar-text-outline": "\uFC22",
"calendar-today": "\uF0F6",
"calendar-week": "\uFA32",
"calendar-week-begin": "\uFA33",
"calendar-weekend": "\uFEF6",
"calendar-weekend-outline": "\uFEF7",
"call-made": "\uF0F7",
"call-merge": "\uF0F8",
"call-missed": "\uF0F9",
"call-received": "\uF0FA",
"call-split": "\uF0FB",
"camcorder": "\uF0FC",
"camcorder-box": "\uF0FD",
"camcorder-box-off": "\uF0FE",
"camcorder-off": "\uF0FF",
"camera": "\uF100",
"camera-account": "\uF8CA",
"camera-burst": "\uF692",
"camera-control": "\uFB45",
"camera-enhance": "\uF101",
"camera-enhance-outline": "\uFB46",
"camera-front": "\uF102",
"camera-front-variant": "\uF103",
"camera-gopro": "\uF7A0",
"camera-image": "\uF8CB",
"camera-iris": "\uF104",
"camera-metering-center": "\uF7A1",
"camera-metering-matrix": "\uF7A2",
"camera-metering-partial": "\uF7A3",
"camera-metering-spot": "\uF7A4",
"camera-off": "\uF5DF",
"camera-outline": "\uFD39",
"camera-party-mode": "\uF105",
"camera-plus": "\uFEF8",
"camera-plus-outline": "\uFEF9",
"camera-rear": "\uF106",
"camera-rear-variant": "\uF107",
"camera-retake": "\uFDFC",
"camera-retake-outline": "\uFDFD",
"camera-switch": "\uF108",
"camera-timer": "\uF109",
"camera-wireless": "\uFD92",
"camera-wireless-outline": "\uFD93",
"campfire": "\uFEFA",
"cancel": "\uF739",
"candle": "\uF5E2",
"candycane": "\uF10A",
"cannabis": "\uF7A5",
"caps-lock": "\uFA9A",
"car": "\uF10B",
"car-2-plus": "\U000F0037",
"car-3-plus": "\U000F0038",
"car-back": "\uFDFE",
"car-battery": "\uF10C",
"car-brake-abs": "\uFC23",
"car-brake-alert": "\uFC24",
"car-brake-hold": "\uFD3A",
"car-brake-parking": "\uFD3B",
"car-brake-retarder": "\U000F0039",
"car-child-seat": "\uFFC3",
"car-clutch": "\U000F003A",
"car-connected": "\uF10D",
"car-convertible": "\uF7A6",
"car-coolant-level": "\U000F003B",
"car-cruise-control": "\uFD3C",
"car-defrost-front": "\uFD3D",
"car-defrost-rear": "\uFD3E",
"car-door": "\uFB47",
"car-electric": "\uFB48",
"car-esp": "\uFC25",
"car-estate": "\uF7A7",
"car-hatchback": "\uF7A8",
"car-key": "\uFB49",
"car-light-dimmed": "\uFC26",
"car-light-fog": "\uFC27",
"car-light-high": "\uFC28",
"car-limousine": "\uF8CC",
"car-multiple": "\uFB4A",
"car-off": "\uFDFF",
"car-parking-lights": "\uFD3F",
"car-pickup": "\uF7A9",
"car-seat": "\uFFC4",
"car-seat-cooler": "\uFFC5",
"car-seat-heater": "\uFFC6",
"car-shift-pattern": "\uFF5D",
"car-side": "\uF7AA",
"car-sports": "\uF7AB",
"car-tire-alert": "\uFC29",
"car-traction-control": "\uFD40",
"car-turbocharger": "\U000F003C",
"car-wash": "\uF10E",
"car-windshield": "\U000F003D",
"car-windshield-outline": "\U000F003E",
"caravan": "\uF7AC",
"card": "\uFB4B",
"card-bulleted": "\uFB4C",
"card-bulleted-off": "\uFB4D",
"card-bulleted-off-outline": "\uFB4E",
"card-bulleted-outline": "\uFB4F",
"card-bulleted-settings": "\uFB50",
"card-bulleted-settings-outline": "\uFB51",
"card-outline": "\uFB52",
"card-text": "\uFB53",
"card-text-outline": "\uFB54",
"cards": "\uF638",
"cards-club": "\uF8CD",
"cards-diamond": "\uF8CE",
"cards-diamond-outline": "\U000F003F",
"cards-heart": "\uF8CF",
"cards-outline": "\uF639",
"cards-playing-outline": "\uF63A",
"cards-spade": "\uF8D0",
"cards-variant": "\uF6C6",
"carrot": "\uF10F",
"cart": "\uF110",
"cart-arrow-down": "\uFD42",
"cart-arrow-right": "\uFC2A",
"cart-arrow-up": "\uFD43",
"cart-minus": "\uFD44",
"cart-off": "\uF66B",
"cart-outline": "\uF111",
"cart-plus": "\uF112",
"cart-remove": "\uFD45",
"case-sensitive-alt": "\uF113",
"cash": "\uF114",
"cash-100": "\uF115",
"cash-marker": "\uFD94",
"cash-multiple": "\uF116",
"cash-refund": "\uFA9B",
"cash-register": "\uFCD0",
"cash-usd": "\uF117",
"cassette": "\uF9D3",
"cast": "\uF118",
"cast-audio": "\U000F0040",
"cast-connected": "\uF119",
"cast-education": "\uFE6D",
"cast-off": "\uF789",
"castle": "\uF11A",
"cat": "\uF11B",
"cctv": "\uF7AD",
"ceiling-light": "\uF768",
"cellphone": "\uF11C",
"cellphone-android": "\uF11D",
"cellphone-arrow-down": "\uF9D4",
"cellphone-basic": "\uF11E",
"cellphone-dock": "\uF11F",
"cellphone-erase": "\uF94C",
"cellphone-information": "\uFF5E",
"cellphone-iphone": "\uF120",
"cellphone-key": "\uF94D",
"cellphone-link": "\uF121",
"cellphone-link-off": "\uF122",
"cellphone-lock": "\uF94E",
"cellphone-message": "\uF8D2",
"cellphone-nfc": "\uFEAD",
"cellphone-off": "\uF94F",
"cellphone-play": "\U000F0041",
"cellphone-screenshot": "\uFA34",
"cellphone-settings": "\uF123",
"cellphone-settings-variant": "\uF950",
"cellphone-sound": "\uF951",
"cellphone-text": "\uF8D1",
"cellphone-wireless": "\uF814",
"celtic-cross": "\uFCD1",
"certificate": "\uF124",
"chair-rolling": "\uFFBA",
"chair-school": "\uF125",
"charity": "\uFC2B",
"chart-arc": "\uF126",
"chart-areaspline": "\uF127",
"chart-areaspline-variant": "\uFEAE",
"chart-bar": "\uF128",
"chart-bar-stacked": "\uF769",
"chart-bell-curve": "\uFC2C",
"chart-bell-curve-cumulative": "\uFFC7",
"chart-bubble": "\uF5E3",
"chart-donut": "\uF7AE",
"chart-donut-variant": "\uF7AF",
"chart-gantt": "\uF66C",
"chart-histogram": "\uF129",
"chart-line": "\uF12A",
"chart-line-stacked": "\uF76A",
"chart-line-variant": "\uF7B0",
"chart-multiline": "\uF8D3",
"chart-pie": "\uF12B",
"chart-scatter-plot": "\uFEAF",
"chart-scatter-plot-hexbin": "\uF66D",
"chart-timeline": "\uF66E",
"chart-timeline-variant": "\uFEB0",
"chart-tree": "\uFEB1",
"chat": "\uFB55",
"chat-alert": "\uFB56",
"chat-outline": "\uFEFB",
"chat-processing": "\uFB57",
"check": "\uF12C",
"check-all": "\uF12D",
"check-bold": "\uFE6E",
"check-box-multiple-outline": "\uFC2D",
"check-box-outline": "\uFC2E",
"check-circle": "\uF5E0",
"check-circle-outline": "\uF5E1",
"check-decagram": "\uF790",
"check-network": "\uFC2F",
"check-network-outline": "\uFC30",
"check-outline": "\uF854",
"check-underline": "\uFE70",
"check-underline-circle": "\uFE71",
"check-underline-circle-outline": "\uFE72",
"checkbook": "\uFA9C",
"checkbox-blank": "\uF12E",
"checkbox-blank-circle": "\uF12F",
"checkbox-blank-circle-outline": "\uF130",
"checkbox-blank-outline": "\uF131",
"checkbox-intermediate": "\uF855",
"checkbox-marked": "\uF132",
"checkbox-marked-circle": "\uF133",
"checkbox-marked-circle-outline": "\uF134",
"checkbox-marked-outline": "\uF135",
"checkbox-multiple-blank": "\uF136",
"checkbox-multiple-blank-circle": "\uF63B",
"checkbox-multiple-blank-circle-outline": "\uF63C",
"checkbox-multiple-blank-outline": "\uF137",
"checkbox-multiple-marked": "\uF138",
"checkbox-multiple-marked-circle": "\uF63D",
"checkbox-multiple-marked-circle-outline": "\uF63E",
"checkbox-multiple-marked-outline": "\uF139",
"checkerboard": "\uF13A",
"chef-hat": "\uFB58",
"chemical-weapon": "\uF13B",
"chess-bishop": "\uF85B",
"chess-king": "\uF856",
"chess-knight": "\uF857",
"chess-pawn": "\uF858",
"chess-queen": "\uF859",
"chess-rook": "\uF85A",
"chevron-double-down": "\uF13C",
"chevron-double-left": "\uF13D",
"chevron-double-right": "\uF13E",
"chevron-double-up": "\uF13F",
"chevron-down": "\uF140",
"chevron-down-box": "\uF9D5",
"chevron-down-box-outline": "\uF9D6",
"chevron-down-circle": "\uFB0B",
"chevron-down-circle-outline": "\uFB0C",
"chevron-left": "\uF141",
"chevron-left-box": "\uF9D7",
"chevron-left-box-outline": "\uF9D8",
"chevron-left-circle": "\uFB0D",
"chevron-left-circle-outline": "\uFB0E",
"chevron-right": "\uF142",
"chevron-right-box": "\uF9D9",
"chevron-right-box-outline": "\uF9DA",
"chevron-right-circle": "\uFB0F",
"chevron-right-circle-outline": "\uFB10",
"chevron-triple-down": "\uFD95",
"chevron-triple-left": "\uFD96",
"chevron-triple-right": "\uFD97",
"chevron-triple-up": "\uFD98",
"chevron-up": "\uF143",
"chevron-up-box": "\uF9DB",
"chevron-up-box-outline": "\uF9DC",
"chevron-up-circle": "\uFB11",
"chevron-up-circle-outline": "\uFB12",
"chili-hot": "\uF7B1",
"chili-medium": "\uF7B2",
"chili-mild": "\uF7B3",
"chip": "\uF61A",
"christianity": "\uF952",
"christianity-outline": "\uFCD2",
"church": "\uF144",
"circle": "\uF764",
"circle-double": "\uFEB2",
"circle-edit-outline": "\uF8D4",
"circle-expand": "\uFEB3",
"circle-medium": "\uF9DD",
"circle-outline": "\uF765",
"circle-slice-1": "\uFA9D",
"circle-slice-2": "\uFA9E",
"circle-slice-3": "\uFA9F",
"circle-slice-4": "\uFAA0",
"circle-slice-5": "\uFAA1",
"circle-slice-6": "\uFAA2",
"circle-slice-7": "\uFAA3",
"circle-slice-8": "\uFAA4",
"circle-small": "\uF9DE",
"circular-saw": "\uFE73",
"cisco-webex": "\uF145",
"city": "\uF146",
"city-variant": "\uFA35",
"city-variant-outline": "\uFA36",
"clipboard": "\uF147",
"clipboard-account": "\uF148",
"clipboard-account-outline": "\uFC31",
"clipboard-alert": "\uF149",
"clipboard-alert-outline": "\uFCD3",
"clipboard-arrow-down": "\uF14A",
"clipboard-arrow-down-outline": "\uFC32",
"clipboard-arrow-left": "\uF14B",
"clipboard-arrow-left-outline": "\uFCD4",
"clipboard-arrow-right": "\uFCD5",
"clipboard-arrow-right-outline": "\uFCD6",
"clipboard-arrow-up": "\uFC33",
"clipboard-arrow-up-outline": "\uFC34",
"clipboard-check": "\uF14C",
"clipboard-check-outline": "\uF8A7",
"clipboard-flow": "\uF6C7",
"clipboard-outline": "\uF14D",
"clipboard-play": "\uFC35",
"clipboard-play-outline": "\uFC36",
"clipboard-plus": "\uF750",
"clipboard-pulse": "\uF85C",
"clipboard-pulse-outline": "\uF85D",
"clipboard-text": "\uF14E",
"clipboard-text-outline": "\uFA37",
"clipboard-text-play": "\uFC37",
"clipboard-text-play-outline": "\uFC38",
"clippy": "\uF14F",
"clock": "\uF953",
"clock-alert": "\uF954",
"clock-alert-outline": "\uF5CE",
"clock-check": "\uFFC8",
"clock-check-outline": "\uFFC9",
"clock-digital": "\uFEB4",
"clock-end": "\uF151",
"clock-fast": "\uF152",
"clock-in": "\uF153",
"clock-out": "\uF154",
"clock-outline": "\uF150",
"clock-start": "\uF155",
"close": "\uF156",
"close-box": "\uF157",
"close-box-multiple": "\uFC39",
"close-box-multiple-outline": "\uFC3A",
"close-box-outline": "\uF158",
"close-circle": "\uF159",
"close-circle-outline": "\uF15A",
"close-network": "\uF15B",
"close-network-outline": "\uFC3B",
"close-octagon": "\uF15C",
"close-octagon-outline": "\uF15D",
"close-outline": "\uF6C8",
"closed-caption": "\uF15E",
"closed-caption-outline": "\uFD99",
"cloud": "\uF15F",
"cloud-alert": "\uF9DF",
"cloud-braces": "\uF7B4",
"cloud-check": "\uF160",
"cloud-circle": "\uF161",
"cloud-download": "\uF162",
"cloud-download-outline": "\uFB59",
"cloud-off-outline": "\uF164",
"cloud-outline": "\uF163",
"cloud-print": "\uF165",
"cloud-print-outline": "\uF166",
"cloud-question": "\uFA38",
"cloud-search": "\uF955",
"cloud-search-outline": "\uF956",
"cloud-sync": "\uF63F",
"cloud-tags": "\uF7B5",
"cloud-upload": "\uF167",
"cloud-upload-outline": "\uFB5A",
"clover": "\uF815",
"coach-lamp": "\U000F0042",
"code-array": "\uF168",
"code-braces": "\uF169",
"code-brackets": "\uF16A",
"code-equal": "\uF16B",
"code-greater-than": "\uF16C",
"code-greater-than-or-equal": "\uF16D",
"code-less-than": "\uF16E",
"code-less-than-or-equal": "\uF16F",
"code-not-equal": "\uF170",
"code-not-equal-variant": "\uF171",
"code-parentheses": "\uF172",
"code-string": "\uF173",
"code-tags": "\uF174",
"code-tags-check": "\uF693",
"codepen": "\uF175",
"coffee": "\uF176",
"coffee-off": "\uFFCA",
"coffee-off-outline": "\uFFCB",
"coffee-outline": "\uF6C9",
"coffee-to-go": "\uF177",
"coffin": "\uFB5B",
"cogs": "\uF8D5",
"coin": "\uF178",
"coins": "\uF694",
"collage": "\uF640",
"collapse-all": "\uFAA5",
"collapse-all-outline": "\uFAA6",
"color-helper": "\uF179",
"comma": "\uFE74",
"comma-box": "\uFE75",
"comma-box-outline": "\uFE76",
"comma-circle": "\uFE77",
"comma-circle-outline": "\uFE78",
"comment": "\uF17A",
"comment-account": "\uF17B",
"comment-account-outline": "\uF17C",
"comment-alert": "\uF17D",
"comment-alert-outline": "\uF17E",
"comment-arrow-left": "\uF9E0",
"comment-arrow-left-outline": "\uF9E1",
"comment-arrow-right": "\uF9E2",
"comment-arrow-right-outline": "\uF9E3",
"comment-check": "\uF17F",
"comment-check-outline": "\uF180",
"comment-eye": "\uFA39",
"comment-eye-outline": "\uFA3A",
"comment-multiple": "\uF85E",
"comment-multiple-outline": "\uF181",
"comment-outline": "\uF182",
"comment-plus": "\uF9E4",
"comment-plus-outline": "\uF183",
"comment-processing": "\uF184",
"comment-processing-outline": "\uF185",
"comment-question": "\uF816",
"comment-question-outline": "\uF186",
"comment-quote": "\U000F0043",
"comment-quote-outline": "\U000F0044",
"comment-remove": "\uF5DE",
"comment-remove-outline": "\uF187",
"comment-search": "\uFA3B",
"comment-search-outline": "\uFA3C",
"comment-text": "\uF188",
"comment-text-multiple": "\uF85F",
"comment-text-multiple-outline": "\uF860",
"comment-text-outline": "\uF189",
"compare": "\uF18A",
"compass": "\uF18B",
"compass-off": "\uFB5C",
"compass-off-outline": "\uFB5D",
"compass-outline": "\uF18C",
"console": "\uF18D",
"console-line": "\uF7B6",
"console-network": "\uF8A8",
"console-network-outline": "\uFC3C",
"contact-mail": "\uF18E",
"contact-mail-outline": "\uFEB5",
"contact-phone": "\uFEB6",
"contact-phone-outline": "\uFEB7",
"contactless-payment": "\uFD46",
"contacts": "\uF6CA",
"contain": "\uFA3D",
"contain-end": "\uFA3E",
"contain-start": "\uFA3F",
"content-copy": "\uF18F",
"content-cut": "\uF190",
"content-duplicate": "\uF191",
"content-paste": "\uF192",
"content-save": "\uF193",
"content-save-alert": "\uFF5F",
"content-save-alert-outline": "\uFF60",
"content-save-all": "\uF194",
"content-save-all-outline": "\uFF61",
"content-save-edit": "\uFCD7",
"content-save-edit-outline": "\uFCD8",
"content-save-move": "\uFE79",
"content-save-move-outline": "\uFE7A",
"content-save-outline": "\uF817",
"content-save-settings": "\uF61B",
"content-save-settings-outline": "\uFB13",
"contrast": "\uF195",
"contrast-box": "\uF196",
"contrast-circle": "\uF197",
"controller-classic": "\uFB5E",
"controller-classic-outline": "\uFB5F",
"cookie": "\uF198",
"coolant-temperature": "\uF3C8",
"copyright": "\uF5E6",
"cordova": "\uF957",
"corn": "\uF7B7",
"counter": "\uF199",
"cow": "\uF19A",
"cowboy": "\uFEB8",
"cpu-32-bit": "\uFEFC",
"cpu-64-bit": "\uFEFD",
"crane": "\uF861",
"creation": "\uF1C9",
"creative-commons": "\uFD47",
"credit-card": "\U000F0010",
"credit-card-clock": "\uFEFE",
"credit-card-clock-outline": "\uFFBC",
"credit-card-marker": "\uF6A7",
"credit-card-marker-outline": "\uFD9A",
"credit-card-minus": "\uFFCC",
"credit-card-minus-outline": "\uFFCD",
"credit-card-multiple": "\U000F0011",
"credit-card-multiple-outline": "\uF19C",
"credit-card-off": "\U000F0012",
"credit-card-off-outline": "\uF5E4",
"credit-card-outline": "\uF19B",
"credit-card-plus": "\U000F0013",
"credit-card-plus-outline": "\uF675",
"credit-card-refund": "\U000F0014",
"credit-card-refund-outline": "\uFAA7",
"credit-card-remove": "\uFFCE",
"credit-card-remove-outline": "\uFFCF",
"credit-card-scan": "\U000F0015",
"credit-card-scan-outline": "\uF19D",
"credit-card-settings": "\U000F0016",
"credit-card-settings-outline": "\uF8D6",
"credit-card-wireless": "\uF801",
"credit-card-wireless-outline": "\uFD48",
"cricket": "\uFD49",
"crop": "\uF19E",
"crop-free": "\uF19F",
"crop-landscape": "\uF1A0",
"crop-portrait": "\uF1A1",
"crop-rotate": "\uF695",
"crop-square": "\uF1A2",
"crosshairs": "\uF1A3",
"crosshairs-gps": "\uF1A4",
"crosshairs-off": "\uFF62",
"crown": "\uF1A5",
"cryengine": "\uF958",
"crystal-ball": "\uFB14",
"cube": "\uF1A6",
"cube-outline": "\uF1A7",
"cube-scan": "\uFB60",
"cube-send": "\uF1A8",
"cube-unfolded": "\uF1A9",
"cup": "\uF1AA",
"cup-off": "\uF5E5",
"cup-water": "\uF1AB",
"cupboard": "\uFF63",
"cupboard-outline": "\uFF64",
"cupcake": "\uF959",
"curling": "\uF862",
"currency-bdt": "\uF863",
"currency-brl": "\uFB61",
"currency-btc": "\uF1AC",
"currency-cny": "\uF7B9",
"currency-eth": "\uF7BA",
"currency-eur": "\uF1AD",
"currency-gbp": "\uF1AE",
"currency-ils": "\uFC3D",
"currency-inr": "\uF1AF",
"currency-jpy": "\uF7BB",
"currency-krw": "\uF7BC",
"currency-kzt": "\uF864",
"currency-ngn": "\uF1B0",
"currency-php": "\uF9E5",
"currency-rial": "\uFEB9",
"currency-rub": "\uF1B1",
"currency-sign": "\uF7BD",
"currency-try": "\uF1B2",
"currency-twd": "\uF7BE",
"currency-usd": "\uF1B3",
"currency-usd-off": "\uF679",
"current-ac": "\uF95A",
"current-dc": "\uF95B",
"cursor-default": "\uF1B4",
"cursor-default-click": "\uFCD9",
"cursor-default-click-outline": "\uFCDA",
"cursor-default-outline": "\uF1B5",
"cursor-move": "\uF1B6",
"cursor-pointer": "\uF1B7",
"cursor-text": "\uF5E7",
"database": "\uF1B8",
"database-check": "\uFAA8",
"database-edit": "\uFB62",
"database-export": "\uF95D",
"database-import": "\uF95C",
"database-lock": "\uFAA9",
"database-minus": "\uF1B9",
"database-plus": "\uF1BA",
"database-refresh": "\uFCDB",
"database-remove": "\uFCDC",
"database-search": "\uF865",
"database-settings": "\uFCDD",
"death-star": "\uF8D7",
"death-star-variant": "\uF8D8",
"deathly-hallows": "\uFB63",
"debian": "\uF8D9",
"debug-step-into": "\uF1BB",
"debug-step-out": "\uF1BC",
"debug-step-over": "\uF1BD",
"decagram": "\uF76B",
"decagram-outline": "\uF76C",
"decimal-decrease": "\uF1BE",
"decimal-increase": "\uF1BF",
"delete": "\uF1C0",
"delete-circle": "\uF682",
"delete-circle-outline": "\uFB64",
"delete-empty": "\uF6CB",
"delete-empty-outline": "\uFEBA",
"delete-forever": "\uF5E8",
"delete-forever-outline": "\uFB65",
"delete-outline": "\uF9E6",
"delete-restore": "\uF818",
"delete-sweep": "\uF5E9",
"delete-sweep-outline": "\uFC3E",
"delete-variant": "\uF1C1",
"delta": "\uF1C2",
"desk-lamp": "\uF95E",
"deskphone": "\uF1C3",
"desktop-classic": "\uF7BF",
"desktop-mac": "\uF1C4",
"desktop-mac-dashboard": "\uF9E7",
"desktop-tower": "\uF1C5",
"desktop-tower-monitor": "\uFAAA",
"details": "\uF1C6",
"dev-to": "\uFD4A",
"developer-board": "\uF696",
"deviantart": "\uF1C7",
"devices": "\uFFD0",
"dialpad": "\uF61C",
"diameter": "\uFC3F",
"diameter-outline": "\uFC40",
"diameter-variant": "\uFC41",
"diamond": "\uFB66",
"diamond-outline": "\uFB67",
"diamond-stone": "\uF1C8",
"dice-1": "\uF1CA",
"dice-2": "\uF1CB",
"dice-3": "\uF1CC",
"dice-4": "\uF1CD",
"dice-5": "\uF1CE",
"dice-6": "\uF1CF",
"dice-d10-outline": "\uF76E",
"dice-d12-outline": "\uF866",
"dice-d20-outline": "\uF5EA",
"dice-d4-outline": "\uF5EB",
"dice-d6-outline": "\uF5EC",
"dice-d8-outline": "\uF5ED",
"dice-multiple": "\uF76D",
"dictionary": "\uF61D",
"dip-switch": "\uF7C0",
"directions": "\uF1D0",
"directions-fork": "\uF641",
"disc": "\uF5EE",
"disc-alert": "\uF1D1",
"disc-player": "\uF95F",
"discord": "\uF66F",
"dishwasher": "\uFAAB",
"disqus": "\uF1D2",
"disqus-outline": "\uF1D3",
"diving-flippers": "\uFD9B",
"diving-helmet": "\uFD9C",
"diving-scuba": "\uFD9D",
"diving-scuba-flag": "\uFD9E",
"diving-scuba-tank": "\uFD9F",
"diving-scuba-tank-multiple": "\uFDA0",
"diving-snorkel": "\uFDA1",
"division": "\uF1D4",
"division-box": "\uF1D5",
"dlna": "\uFA40",
"dna": "\uF683",
"dns": "\uF1D6",
"dns-outline": "\uFB68",
"do-not-disturb": "\uF697",
"do-not-disturb-off": "\uF698",
"docker": "\uF867",
"doctor": "\uFA41",
"dog": "\uFA42",
"dog-service": "\uFAAC",
"dog-side": "\uFA43",
"dolby": "\uF6B2",
"dolly": "\uFEBB",
"domain": "\uF1D7",
"domain-off": "\uFD4B",
"domino-mask": "\U000F0045",
"donkey": "\uF7C1",
"door": "\uF819",
"door-closed": "\uF81A",
"door-open": "\uF81B",
"doorbell-video": "\uF868",
"dot-net": "\uFAAD",
"dots-horizontal": "\uF1D8",
"dots-horizontal-circle": "\uF7C2",
"dots-horizontal-circle-outline": "\uFB69",
"dots-vertical": "\uF1D9",
"dots-vertical-circle": "\uF7C3",
"dots-vertical-circle-outline": "\uFB6A",
"douban": "\uF699",
"download": "\uF1DA",
"download-multiple": "\uF9E8",
"download-network": "\uF6F3",
"download-network-outline": "\uFC42",
"download-outline": "\uFB6B",
"drag": "\uF1DB",
"drag-horizontal": "\uF1DC",
"drag-variant": "\uFB6C",
"drag-vertical": "\uF1DD",
"drama-masks": "\uFCDE",
"draw": "\uFF66",
"drawing": "\uF1DE",
"drawing-box": "\uF1DF",
"dresser": "\uFF67",
"dresser-outline": "\uFF68",
"dribbble": "\uF1E0",
"dribbble-box": "\uF1E1",
"drone": "\uF1E2",
"dropbox": "\uF1E3",
"drupal": "\uF1E4",
"duck": "\uF1E5",
"dumbbell": "\uF1E6",
"dump-truck": "\uFC43",
"ear-hearing": "\uF7C4",
"ear-hearing-off": "\uFA44",
"earth": "\uF1E7",
"earth-box": "\uF6CC",
"earth-box-off": "\uF6CD",
"earth-off": "\uF1E8",
"edge": "\uF1E9",
"egg": "\uFAAE",
"egg-easter": "\uFAAF",
"eight-track": "\uF9E9",
"eject": "\uF1EA",
"eject-outline": "\uFB6D",
"electric-switch": "\uFEBC",
"electron-framework": "\U000F0046",
"elephant": "\uF7C5",
"elevation-decline": "\uF1EB",
"elevation-rise": "\uF1EC",
"elevator": "\uF1ED",
"ellipse": "\uFEBD",
"ellipse-outline": "\uFEBE",
"email": "\uF1EE",
"email-alert": "\uF6CE",
"email-box": "\uFCDF",
"email-check": "\uFAB0",
"email-check-outline": "\uFAB1",
"email-edit": "\uFF00",
"email-edit-outline": "\uFF01",
"email-lock": "\uF1F1",
"email-mark-as-unread": "\uFB6E",
"email-minus": "\uFF02",
"email-minus-outline": "\uFF03",
"email-multiple": "\uFF04",
"email-multiple-outline": "\uFF05",
"email-newsletter": "\uFFD1",
"email-open": "\uF1EF",
"email-open-multiple": "\uFF06",
"email-open-multiple-outline": "\uFF07",
"email-open-outline": "\uF5EF",
"email-outline": "\uF1F0",
"email-plus": "\uF9EA",
"email-plus-outline": "\uF9EB",
"email-search": "\uF960",
"email-search-outline": "\uF961",
"email-variant": "\uF5F0",
"ember": "\uFB15",
"emby": "\uF6B3",
"emoticon": "\uFC44",
"emoticon-angry": "\uFC45",
"emoticon-angry-outline": "\uFC46",
"emoticon-cool": "\uFC47",
"emoticon-cool-outline": "\uF1F3",
"emoticon-cry": "\uFC48",
"emoticon-cry-outline": "\uFC49",
"emoticon-dead": "\uFC4A",
"emoticon-dead-outline": "\uF69A",
"emoticon-devil": "\uFC4B",
"emoticon-devil-outline": "\uF1F4",
"emoticon-excited": "\uFC4C",
"emoticon-excited-outline": "\uF69B",
"emoticon-frown": "\uFF69",
"emoticon-frown-outline": "\uFF6A",
"emoticon-happy": "\uFC4D",
"emoticon-happy-outline": "\uF1F5",
"emoticon-kiss": "\uFC4E",
"emoticon-kiss-outline": "\uFC4F",
"emoticon-neutral": "\uFC50",
"emoticon-neutral-outline": "\uF1F6",
"emoticon-outline": "\uF1F2",
"emoticon-poop": "\uF1F7",
"emoticon-poop-outline": "\uFC51",
"emoticon-sad": "\uFC52",
"emoticon-sad-outline": "\uF1F8",
"emoticon-tongue": "\uF1F9",
"emoticon-tongue-outline": "\uFC53",
"emoticon-wink": "\uFC54",
"emoticon-wink-outline": "\uFC55",
"engine": "\uF1FA",
"engine-off": "\uFA45",
"engine-off-outline": "\uFA46",
"engine-outline": "\uF1FB",
"equal": "\uF1FC",
"equal-box": "\uF1FD",
"equalizer": "\uFEBF",
"equalizer-outline": "\uFEC0",
"eraser": "\uF1FE",
"eraser-variant": "\uF642",
"escalator": "\uF1FF",
"eslint": "\uFC56",
"et": "\uFAB2",
"ethereum": "\uF869",
"ethernet": "\uF200",
"ethernet-cable": "\uF201",
"ethernet-cable-off": "\uF202",
"etsy": "\uF203",
"ev-station": "\uF5F1",
"eventbrite": "\uF7C6",
"evernote": "\uF204",
"excavator": "\U000F0047",
"exclamation": "\uF205",
"exit-run": "\uFA47",
"exit-to-app": "\uF206",
"expand-all": "\uFAB3",
"expand-all-outline": "\uFAB4",
"expansion-card": "\uF8AD",
"expansion-card-variant": "\uFFD2",
"exponent": "\uF962",
"exponent-box": "\uF963",
"export": "\uF207",
"export-variant": "\uFB6F",
"eye": "\uF208",
"eye-check": "\uFCE0",
"eye-check-outline": "\uFCE1",
"eye-circle": "\uFB70",
"eye-circle-outline": "\uFB71",
"eye-minus": "\U000F0048",
"eye-minus-outline": "\U000F0049",
"eye-off": "\uF209",
"eye-off-outline": "\uF6D0",
"eye-outline": "\uF6CF",
"eye-plus": "\uF86A",
"eye-plus-outline": "\uF86B",
"eye-settings": "\uF86C",
"eye-settings-outline": "\uF86D",
"eyedropper": "\uF20A",
"eyedropper-variant": "\uF20B",
"face": "\uF643",
"face-agent": "\uFD4C",
"face-outline": "\uFB72",
"face-profile": "\uF644",
"face-recognition": "\uFC57",
"facebook": "\uF20C",
"facebook-box": "\uF20D",
"facebook-messenger": "\uF20E",
"facebook-workplace": "\uFB16",
"factory": "\uF20F",
"fan": "\uF210",
"fan-off": "\uF81C",
"fast-forward": "\uF211",
"fast-forward-10": "\uFD4D",
"fast-forward-30": "\uFCE2",
"fast-forward-outline": "\uF6D1",
"fax": "\uF212",
"feather": "\uF6D2",
"feature-search": "\uFA48",
"feature-search-outline": "\uFA49",
"fedora": "\uF8DA",
"ferris-wheel": "\uFEC1",
"ferry": "\uF213",
"file": "\uF214",
"file-account": "\uF73A",
"file-account-outline": "\U000F004A",
"file-alert": "\uFA4A",
"file-alert-outline": "\uFA4B",
"file-cabinet": "\uFAB5",
"file-cad": "\uFF08",
"file-cad-box": "\uFF09",
"file-cancel": "\uFDA2",
"file-cancel-outline": "\uFDA3",
"file-chart": "\uF215",
"file-chart-outline": "\U000F004B",
"file-check": "\uF216",
"file-check-outline": "\uFE7B",
"file-cloud": "\uF217",
"file-cloud-outline": "\U000F004C",
"file-code": "\uF22E",
"file-code-outline": "\U000F004D",
"file-compare": "\uF8A9",
"file-delimited": "\uF218",
"file-delimited-outline": "\uFEC2",
"file-document": "\uF219",
"file-document-box": "\uF21A",
"file-document-box-check": "\uFEC3",
"file-document-box-check-outline": "\uFEC4",
"file-document-box-minus": "\uFEC5",
"file-document-box-minus-outline": "\uFEC6",
"file-document-box-multiple": "\uFAB6",
"file-document-box-multiple-outline": "\uFAB7",
"file-document-box-outline": "\uF9EC",
"file-document-box-plus": "\uFEC7",
"file-document-box-plus-outline": "\uFEC8",
"file-document-box-remove": "\uFEC9",
"file-document-box-remove-outline": "\uFECA",
"file-document-box-search": "\uFECB",
"file-document-box-search-outline": "\uFECC",
"file-document-edit": "\uFDA4",
"file-document-edit-outline": "\uFDA5",
"file-document-outline": "\uF9ED",
"file-download": "\uF964",
"file-download-outline": "\uF965",
"file-excel": "\uF21B",
"file-excel-box": "\uF21C",
"file-excel-box-outline": "\U000F004E",
"file-excel-outline": "\U000F004F",
"file-export": "\uF21D",
"file-export-outline": "\U000F0050",
"file-eye": "\uFDA6",
"file-eye-outline": "\uFDA7",
"file-find": "\uF21E",
"file-find-outline": "\uFB73",
"file-hidden": "\uF613",
"file-image": "\uF21F",
"file-image-outline": "\uFECD",
"file-import": "\uF220",
"file-import-outline": "\U000F0051",
"file-lock": "\uF221",
"file-lock-outline": "\U000F0052",
"file-move": "\uFAB8",
"file-move-outline": "\U000F0053",
"file-multiple": "\uF222",
"file-multiple-outline": "\U000F0054",
"file-music": "\uF223",
"file-music-outline": "\uFE7C",
"file-outline": "\uF224",
"file-pdf": "\uF225",
"file-pdf-box": "\uF226",
"file-pdf-box-outline": "\uFFD3",
"file-pdf-outline": "\uFE7D",
"file-percent": "\uF81D",
"file-percent-outline": "\U000F0055",
"file-plus": "\uF751",
"file-plus-outline": "\uFF0A",
"file-powerpoint": "\uF227",
"file-powerpoint-box": "\uF228",
"file-powerpoint-box-outline": "\U000F0056",
"file-powerpoint-outline": "\U000F0057",
"file-presentation-box": "\uF229",
"file-question": "\uF86E",
"file-question-outline": "\U000F0058",
"file-remove": "\uFB74",
"file-remove-outline": "\U000F0059",
"file-replace": "\uFB17",
"file-replace-outline": "\uFB18",
"file-restore": "\uF670",
"file-restore-outline": "\U000F005A",
"file-search": "\uFC58",
"file-search-outline": "\uFC59",
"file-send": "\uF22A",
"file-send-outline": "\U000F005B",
"file-star": "\U000F005C",
"file-star-outline": "\U000F005D",
"file-swap": "\uFFD4",
"file-swap-outline": "\uFFD5",
"file-table": "\uFC5A",
"file-table-outline": "\uFC5B",
"file-tree": "\uF645",
"file-undo": "\uF8DB",
"file-undo-outline": "\U000F005E",
"file-upload": "\uFA4C",
"file-upload-outline": "\uFA4D",
"file-video": "\uF22B",
"file-video-outline": "\uFE10",
"file-word": "\uF22C",
"file-word-box": "\uF22D",
"file-word-box-outline": "\U000F005F",
"file-word-outline": "\U000F0060",
"film": "\uF22F",
"filmstrip": "\uF230",
"filmstrip-off": "\uF231",
"filter": "\uF232",
"filter-minus": "\uFF0B",
"filter-minus-outline": "\uFF0C",
"filter-outline": "\uF233",
"filter-plus": "\uFF0D",
"filter-plus-outline": "\uFF0E",
"filter-remove": "\uF234",
"filter-remove-outline": "\uF235",
"filter-variant": "\uF236",
"filter-variant-remove": "\U000F0061",
"finance": "\uF81E",
"find-replace": "\uF6D3",
"fingerprint": "\uF237",
"fingerprint-off": "\uFECE",
"fire": "\uF238",
"fire-extinguisher": "\uFF0F",
"fire-truck": "\uF8AA",
"firebase": "\uF966",
"firefox": "\uF239",
"fireplace": "\uFE11",
"fireplace-off": "\uFE12",
"firework": "\uFE13",
"fish": "\uF23A",
"fishbowl": "\uFF10",
"fishbowl-outline": "\uFF11",
"fit-to-page": "\uFF12",
"fit-to-page-outline": "\uFF13",
"flag": "\uF23B",
"flag-checkered": "\uF23C",
"flag-minus": "\uFB75",
"flag-outline": "\uF23D",
"flag-plus": "\uFB76",
"flag-remove": "\uFB77",
"flag-triangle": "\uF23F",
"flag-variant": "\uF240",
"flag-variant-outline": "\uF23E",
"flare": "\uFD4E",
"flash": "\uF241",
"flash-alert": "\uFF14",
"flash-alert-outline": "\uFF15",
"flash-auto": "\uF242",
"flash-circle": "\uF81F",
"flash-off": "\uF243",
"flash-outline": "\uF6D4",
"flash-red-eye": "\uF67A",
"flashlight": "\uF244",
"flashlight-off": "\uF245",
"flask": "\uF093",
"flask-empty": "\uF094",
"flask-empty-outline": "\uF095",
"flask-outline": "\uF096",
"flattr": "\uF246",
"flickr": "\uFCE3",
"flip-to-back": "\uF247",
"flip-to-front": "\uF248",
"floor-lamp": "\uF8DC",
"floor-lamp-dual": "\U000F0062",
"floor-lamp-variant": "\U000F0063",
"floor-plan": "\uF820",
"floppy": "\uF249",
"floppy-variant": "\uF9EE",
"flower": "\uF24A",
"flower-outline": "\uF9EF",
"flower-poppy": "\uFCE4",
"flower-tulip": "\uF9F0",
"flower-tulip-outline": "\uF9F1",
"focus-auto": "\uFF6B",
"focus-field": "\uFF6C",
"focus-field-horizontal": "\uFF6D",
"focus-field-vertical": "\uFF6E",
"folder": "\uF24B",
"folder-account": "\uF24C",
"folder-account-outline": "\uFB78",
"folder-alert": "\uFDA8",
"folder-alert-outline": "\uFDA9",
"folder-clock": "\uFAB9",
"folder-clock-outline": "\uFABA",
"folder-download": "\uF24D",
"folder-edit": "\uF8DD",
"folder-edit-outline": "\uFDAA",
"folder-google-drive": "\uF24E",
"folder-image": "\uF24F",
"folder-key": "\uF8AB",
"folder-key-network": "\uF8AC",
"folder-key-network-outline": "\uFC5C",
"folder-lock": "\uF250",
"folder-lock-open": "\uF251",
"folder-move": "\uF252",
"folder-multiple": "\uF253",
"folder-multiple-image": "\uF254",
"folder-multiple-outline": "\uF255",
"folder-network": "\uF86F",
"folder-network-outline": "\uFC5D",
"folder-open": "\uF76F",
"folder-open-outline": "\uFDAB",
"folder-outline": "\uF256",
"folder-plus": "\uF257",
"folder-plus-outline": "\uFB79",
"folder-pound": "\uFCE5",
"folder-pound-outline": "\uFCE6",
"folder-remove": "\uF258",
"folder-remove-outline": "\uFB7A",
"folder-search": "\uF967",
"folder-search-outline": "\uF968",
"folder-star": "\uF69C",
"folder-star-outline": "\uFB7B",
"folder-swap": "\uFFD6",
"folder-swap-outline": "\uFFD7",
"folder-sync": "\uFCE7",
"folder-sync-outline": "\uFCE8",
"folder-text": "\uFC5E",
"folder-text-outline": "\uFC5F",
"folder-upload": "\uF259",
"folder-zip": "\uF6EA",
"folder-zip-outline": "\uF7B8",
"font-awesome": "\uF03A",
"food": "\uF25A",
"food-apple": "\uF25B",
"food-apple-outline": "\uFC60",
"food-croissant": "\uF7C7",
"food-fork-drink": "\uF5F2",
"food-off": "\uF5F3",
"food-variant": "\uF25C",
"foot-print": "\uFF6F",
"football": "\uF25D",
"football-australian": "\uF25E",
"football-helmet": "\uF25F",
"forklift": "\uF7C8",
"format-align-bottom": "\uF752",
"format-align-center": "\uF260",
"format-align-justify": "\uF261",
"format-align-left": "\uF262",
"format-align-middle": "\uF753",
"format-align-right": "\uF263",
"format-align-top": "\uF754",
"format-annotation-minus": "\uFABB",
"format-annotation-plus": "\uF646",
"format-bold": "\uF264",
"format-clear": "\uF265",
"format-color-fill": "\uF266",
"format-color-highlight": "\uFE14",
"format-color-text": "\uF69D",
"format-columns": "\uF8DE",
"format-float-center": "\uF267",
"format-float-left": "\uF268",
"format-float-none": "\uF269",
"format-float-right": "\uF26A",
"format-font": "\uF6D5",
"format-font-size-decrease": "\uF9F2",
"format-font-size-increase": "\uF9F3",
"format-header-1": "\uF26B",
"format-header-2": "\uF26C",
"format-header-3": "\uF26D",
"format-header-4": "\uF26E",
"format-header-5": "\uF26F",
"format-header-6": "\uF270",
"format-header-decrease": "\uF271",
"format-header-equal": "\uF272",
"format-header-increase": "\uF273",
"format-header-pound": "\uF274",
"format-horizontal-align-center": "\uF61E",
"format-horizontal-align-left": "\uF61F",
"format-horizontal-align-right": "\uF620",
"format-indent-decrease": "\uF275",
"format-indent-increase": "\uF276",
"format-italic": "\uF277",
"format-letter-case": "\uFB19",
"format-letter-case-lower": "\uFB1A",
"format-letter-case-upper": "\uFB1B",
"format-letter-ends-with": "\uFFD8",
"format-letter-matches": "\uFFD9",
"format-letter-starts-with": "\uFFDA",
"format-line-spacing": "\uF278",
"format-line-style": "\uF5C8",
"format-line-weight": "\uF5C9",
"format-list-bulleted": "\uF279",
"format-list-bulleted-square": "\uFDAC",
"format-list-bulleted-triangle": "\uFECF",
"format-list-bulleted-type": "\uF27A",
"format-list-checkbox": "\uF969",
"format-list-checks": "\uF755",
"format-list-numbered": "\uF27B",
"format-list-numbered-rtl": "\uFCE9",
"format-overline": "\uFED0",
"format-page-break": "\uF6D6",
"format-paint": "\uF27C",
"format-paragraph": "\uF27D",
"format-pilcrow": "\uF6D7",
"format-quote-close": "\uF27E",
"format-quote-open": "\uF756",
"format-rotate-90": "\uF6A9",
"format-section": "\uF69E",
"format-size": "\uF27F",
"format-strikethrough": "\uF280",
"format-strikethrough-variant": "\uF281",
"format-subscript": "\uF282",
"format-superscript": "\uF283",
"format-text": "\uF284",
"format-text-rotation-angle-down": "\uFFDB",
"format-text-rotation-angle-up": "\uFFDC",
"format-text-rotation-down": "\uFD4F",
"format-text-rotation-down-vertical": "\uFFDD",
"format-text-rotation-none": "\uFD50",
"format-text-rotation-up": "\uFFDE",
"format-text-rotation-vertical": "\uFFDF",
"format-text-variant": "\uFE15",
"format-text-wrapping-clip": "\uFCEA",
"format-text-wrapping-overflow": "\uFCEB",
"format-text-wrapping-wrap": "\uFCEC",
"format-textbox": "\uFCED",
"format-textdirection-l-to-r": "\uF285",
"format-textdirection-r-to-l": "\uF286",
"format-title": "\uF5F4",
"format-underline": "\uF287",
"format-vertical-align-bottom": "\uF621",
"format-vertical-align-center": "\uF622",
"format-vertical-align-top": "\uF623",
"format-wrap-inline": "\uF288",
"format-wrap-square": "\uF289",
"format-wrap-tight": "\uF28A",
"format-wrap-top-bottom": "\uF28B",
"forum": "\uF28C",
"forum-outline": "\uF821",
"forward": "\uF28D",
"forwardburger": "\uFD51",
"fountain": "\uF96A",
"fountain-pen": "\uFCEE",
"fountain-pen-tip": "\uFCEF",
"foursquare": "\uF28E",
"freebsd": "\uF8DF",
"frequently-asked-questions": "\uFED1",
"fridge": "\uF290",
"fridge-bottom": "\uF292",
"fridge-outline": "\uF28F",
"fridge-top": "\uF291",
"fruit-cherries": "\U000F0064",
"fruit-citrus": "\U000F0065",
"fruit-grapes": "\U000F0066",
"fruit-grapes-outline": "\U000F0067",
"fruit-pineapple": "\U000F0068",
"fruit-watermelon": "\U000F0069",
"fuel": "\uF7C9",
"fullscreen": "\uF293",
"fullscreen-exit": "\uF294",
"function": "\uF295",
"function-variant": "\uF870",
"fuse": "\uFC61",
"fuse-blade": "\uFC62",
"gamepad": "\uF296",
"gamepad-circle": "\uFE16",
"gamepad-circle-down": "\uFE17",
"gamepad-circle-left": "\uFE18",
"gamepad-circle-outline": "\uFE19",
"gamepad-circle-right": "\uFE1A",
"gamepad-circle-up": "\uFE1B",
"gamepad-down": "\uFE1C",
"gamepad-left": "\uFE1D",
"gamepad-right": "\uFE1E",
"gamepad-round": "\uFE1F",
"gamepad-round-down": "\uFE7E",
"gamepad-round-left": "\uFE7F",
"gamepad-round-outline": "\uFE80",
"gamepad-round-right": "\uFE81",
"gamepad-round-up": "\uFE82",
"gamepad-square": "\uFED2",
"gamepad-square-outline": "\uFED3",
"gamepad-up": "\uFE83",
"gamepad-variant": "\uF297",
"gamepad-variant-outline": "\uFED4",
"gantry-crane": "\uFDAD",
"garage": "\uF6D8",
"garage-alert": "\uF871",
"garage-open": "\uF6D9",
"gas-cylinder": "\uF647",
"gas-station": "\uF298",
"gas-station-outline": "\uFED5",
"gate": "\uF299",
"gate-and": "\uF8E0",
"gate-nand": "\uF8E1",
"gate-nor": "\uF8E2",
"gate-not": "\uF8E3",
"gate-or": "\uF8E4",
"gate-xnor": "\uF8E5",
"gate-xor": "\uF8E6",
"gatsby": "\uFE84",
"gauge": "\uF29A",
"gauge-empty": "\uF872",
"gauge-full": "\uF873",
"gauge-low": "\uF874",
"gavel": "\uF29B",
"gender-female": "\uF29C",
"gender-male": "\uF29D",
"gender-male-female": "\uF29E",
"gender-transgender": "\uF29F",
"gentoo": "\uF8E7",
"gesture": "\uF7CA",
"gesture-double-tap": "\uF73B",
"gesture-pinch": "\uFABC",
"gesture-spread": "\uFABD",
"gesture-swipe": "\uFD52",
"gesture-swipe-down": "\uF73C",
"gesture-swipe-horizontal": "\uFABE",
"gesture-swipe-left": "\uF73D",
"gesture-swipe-right": "\uF73E",
"gesture-swipe-up": "\uF73F",
"gesture-swipe-vertical": "\uFABF",
"gesture-tap": "\uF740",
"gesture-tap-hold": "\uFD53",
"gesture-two-double-tap": "\uF741",
"gesture-two-tap": "\uF742",
"ghost": "\uF2A0",
"ghost-off": "\uF9F4",
"gif": "\uFD54",
"gift": "\uFE85",
"gift-outline": "\uF2A1",
"git": "\uF2A2",
"github-box": "\uF2A3",
"github-circle": "\uF2A4",
"github-face": "\uF6DA",
"gitlab": "\uFB7C",
"glass-cocktail": "\uF356",
"glass-flute": "\uF2A5",
"glass-mug": "\uF2A6",
"glass-stange": "\uF2A7",
"glass-tulip": "\uF2A8",
"glass-wine": "\uF875",
"glassdoor": "\uF2A9",
"glasses": "\uF2AA",
"globe-model": "\uF8E8",
"gmail": "\uF2AB",
"gnome": "\uF2AC",
"go-kart": "\uFD55",
"go-kart-track": "\uFD56",
"gog": "\uFB7D",
"golf": "\uF822",
"gondola": "\uF685",
"goodreads": "\uFD57",
"google": "\uF2AD",
"google-adwords": "\uFC63",
"google-analytics": "\uF7CB",
"google-assistant": "\uF7CC",
"google-cardboard": "\uF2AE",
"google-chrome": "\uF2AF",
"google-circles": "\uF2B0",
"google-circles-communities": "\uF2B1",
"google-circles-extended": "\uF2B2",
"google-circles-group": "\uF2B3",
"google-classroom": "\uF2C0",
"google-controller": "\uF2B4",
"google-controller-off": "\uF2B5",
"google-drive": "\uF2B6",
"google-earth": "\uF2B7",
"google-fit": "\uF96B",
"google-glass": "\uF2B8",
"google-hangouts": "\uF2C9",
"google-home": "\uF823",
"google-keep": "\uF6DB",
"google-lens": "\uF9F5",
"google-maps": "\uF5F5",
"google-my-business": "\U000F006A",
"google-nearby": "\uF2B9",
"google-pages": "\uF2BA",
"google-photos": "\uF6DC",
"google-physical-web": "\uF2BB",
"google-play": "\uF2BC",
"google-plus": "\uF2BD",
"google-plus-box": "\uF2BE",
"google-podcast": "\uFED6",
"google-spreadsheet": "\uF9F6",
"google-street-view": "\uFC64",
"google-translate": "\uF2BF",
"gradient": "\uF69F",
"grain": "\uFD58",
"graph": "\U000F006B",
"graph-outline": "\U000F006C",
"graphql": "\uF876",
"grave-stone": "\uFB7E",
"grease-pencil": "\uF648",
"greater-than": "\uF96C",
"greater-than-or-equal": "\uF96D",
"grid": "\uF2C1",
"grid-large": "\uF757",
"grid-off": "\uF2C2",
"grill": "\uFE86",
"group": "\uF2C3",
"guitar-acoustic": "\uF770",
"guitar-electric": "\uF2C4",
"guitar-pick": "\uF2C5",
"guitar-pick-outline": "\uF2C6",
"guy-fawkes-mask": "\uF824",
"hackernews": "\uF624",
"hail": "\uFAC0",
"halloween": "\uFB7F",
"hamburger": "\uF684",
"hammer": "\uF8E9",
"hand": "\uFA4E",
"hand-left": "\uFE87",
"hand-okay": "\uFA4F",
"hand-peace": "\uFA50",
"hand-peace-variant": "\uFA51",
"hand-pointing-down": "\uFA52",
"hand-pointing-left": "\uFA53",
"hand-pointing-right": "\uF2C7",
"hand-pointing-up": "\uFA54",
"hand-right": "\uFE88",
"hand-saw": "\uFE89",
"handball": "\uFF70",
"hanger": "\uF2C8",
"hard-hat": "\uF96E",
"harddisk": "\uF2CA",
"harddisk-plus": "\U000F006D",
"harddisk-remove": "\U000F006E",
"hat-fedora": "\uFB80",
"hazard-lights": "\uFC65",
"hdr": "\uFD59",
"hdr-off": "\uFD5A",
"headphones": "\uF2CB",
"headphones-bluetooth": "\uF96F",
"headphones-box": "\uF2CC",
"headphones-off": "\uF7CD",
"headphones-settings": "\uF2CD",
"headset": "\uF2CE",
"headset-dock": "\uF2CF",
"headset-off": "\uF2D0",
"heart": "\uF2D1",
"heart-box": "\uF2D2",
"heart-box-outline": "\uF2D3",
"heart-broken": "\uF2D4",
"heart-broken-outline": "\uFCF0",
"heart-circle": "\uF970",
"heart-circle-outline": "\uF971",
"heart-flash": "\uFF16",
"heart-half": "\uF6DE",
"heart-half-full": "\uF6DD",
"heart-half-outline": "\uF6DF",
"heart-multiple": "\uFA55",
"heart-multiple-outline": "\uFA56",
"heart-off": "\uF758",
"heart-outline": "\uF2D5",
"heart-pulse": "\uF5F6",
"helicopter": "\uFAC1",
"help": "\uF2D6",
"help-box": "\uF78A",
"help-circle": "\uF2D7",
"help-circle-outline": "\uF625",
"help-network": "\uF6F4",
"help-network-outline": "\uFC66",
"help-rhombus": "\uFB81",
"help-rhombus-outline": "\uFB82",
"hexagon": "\uF2D8",
"hexagon-multiple": "\uF6E0",
"hexagon-outline": "\uF2D9",
"hexagon-slice-1": "\uFAC2",
"hexagon-slice-2": "\uFAC3",
"hexagon-slice-3": "\uFAC4",
"hexagon-slice-4": "\uFAC5",
"hexagon-slice-5": "\uFAC6",
"hexagon-slice-6": "\uFAC7",
"hexagram": "\uFAC8",
"hexagram-outline": "\uFAC9",
"high-definition": "\uF7CE",
"high-definition-box": "\uF877",
"highway": "\uF5F7",
"hiking": "\uFD5B",
"hinduism": "\uF972",
"history": "\uF2DA",
"hockey-puck": "\uF878",
"hockey-sticks": "\uF879",
"hololens": "\uF2DB",
"home": "\uF2DC",
"home-account": "\uF825",
"home-alert": "\uF87A",
"home-analytics": "\uFED7",
"home-assistant": "\uF7CF",
"home-automation": "\uF7D0",
"home-circle": "\uF7D1",
"home-circle-outline": "\U000F006F",
"home-city": "\uFCF1",
"home-city-outline": "\uFCF2",
"home-currency-usd": "\uF8AE",
"home-export-outline": "\uFFB8",
"home-flood": "\uFF17",
"home-floor-0": "\uFDAE",
"home-floor-1": "\uFD5C",
"home-floor-2": "\uFD5D",
"home-floor-3": "\uFD5E",
"home-floor-a": "\uFD5F",
"home-floor-b": "\uFD60",
"home-floor-g": "\uFD61",
"home-floor-l": "\uFD62",
"home-floor-negative-1": "\uFDAF",
"home-group": "\uFDB0",
"home-heart": "\uF826",
"home-import-outline": "\uFFB9",
"home-lock": "\uF8EA",
"home-lock-open": "\uF8EB",
"home-map-marker": "\uF5F8",
"home-minus": "\uF973",
"home-modern": "\uF2DD",
"home-outline": "\uF6A0",
"home-plus": "\uF974",
"home-thermometer": "\uFF71",
"home-thermometer-outline": "\uFF72",
"home-variant": "\uF2DE",
"home-variant-outline": "\uFB83",
"hook": "\uF6E1",
"hook-off": "\uF6E2",
"hops": "\uF2DF",
"horseshoe": "\uFA57",
"hospital": "\U000F0017",
"hospital-box": "\uF2E0",
"hospital-box-outline": "\U000F0018",
"hospital-building": "\uF2E1",
"hospital-marker": "\uF2E2",
"hot-tub": "\uF827",
"hotel": "\uF2E3",
"houzz": "\uF2E4",
"houzz-box": "\uF2E5",
"hubspot": "\uFCF3",
"hulu": "\uF828",
"human": "\uF2E6",
"human-child": "\uF2E7",
"human-female": "\uF649",
"human-female-boy": "\uFA58",
"human-female-female": "\uFA59",
"human-female-girl": "\uFA5A",
"human-greeting": "\uF64A",
"human-handsdown": "\uF64B",
"human-handsup": "\uF64C",
"human-male": "\uF64D",
"human-male-boy": "\uFA5B",
"human-male-female": "\uF2E8",
"human-male-girl": "\uFA5C",
"human-male-height": "\uFF18",
"human-male-height-variant": "\uFF19",
"human-male-male": "\uFA5D",
"human-pregnant": "\uF5CF",
"humble-bundle": "\uF743",
"ice-cream": "\uF829",
"ice-pop": "\uFF1A",
"id-card": "\uFFE0",
"identifier": "\uFF1B",
"iframe": "\uFC67",
"iframe-outline": "\uFC68",
"image": "\uF2E9",
"image-album": "\uF2EA",
"image-area": "\uF2EB",
"image-area-close": "\uF2EC",
"image-auto-adjust": "\uFFE1",
"image-broken": "\uF2ED",
"image-broken-variant": "\uF2EE",
"image-filter": "\uF2EF",
"image-filter-black-white": "\uF2F0",
"image-filter-center-focus": "\uF2F1",
"image-filter-center-focus-strong": "\uFF1C",
"image-filter-center-focus-strong-outline": "\uFF1D",
"image-filter-center-focus-weak": "\uF2F2",
"image-filter-drama": "\uF2F3",
"image-filter-frames": "\uF2F4",
"image-filter-hdr": "\uF2F5",
"image-filter-none": "\uF2F6",
"image-filter-tilt-shift": "\uF2F7",
"image-filter-vintage": "\uF2F8",
"image-frame": "\uFE8A",
"image-move": "\uF9F7",
"image-multiple": "\uF2F9",
"image-off": "\uF82A",
"image-outline": "\uF975",
"image-plus": "\uF87B",
"image-search": "\uF976",
"image-search-outline": "\uF977",
"image-size-select-actual": "\uFC69",
"image-size-select-large": "\uFC6A",
"image-size-select-small": "\uFC6B",
"import": "\uF2FA",
"inbox": "\uF686",
"inbox-arrow-down": "\uF2FB",
"inbox-arrow-up": "\uF3D1",
"inbox-multiple": "\uF8AF",
"inbox-multiple-outline": "\uFB84",
"incognito": "\uF5F9",
"infinity": "\uF6E3",
"information": "\uF2FC",
"information-outline": "\uF2FD",
"information-variant": "\uF64E",
"instagram": "\uF2FE",
"instapaper": "\uF2FF",
"instrument-triangle": "\U000F0070",
"internet-explorer": "\uF300",
"invert-colors": "\uF301",
"invert-colors-off": "\uFE8B",
"ip": "\uFA5E",
"ip-network": "\uFA5F",
"ip-network-outline": "\uFC6C",
"ipod": "\uFC6D",
"islam": "\uF978",
"island": "\U000F0071",
"itunes": "\uF676",
"jabber": "\uFDB1",
"jeepney": "\uF302",
"jellyfish": "\uFF1E",
"jellyfish-outline": "\uFF1F",
"jira": "\uF303",
"jquery": "\uF87C",
"jsfiddle": "\uF304",
"json": "\uF626",
"judaism": "\uF979",
"kabaddi": "\uFD63",
"karate": "\uF82B",
"keg": "\uF305",
"kettle": "\uF5FA",
"kettle-outline": "\uFF73",
"key": "\uF306",
"key-change": "\uF307",
"key-minus": "\uF308",
"key-outline": "\uFDB2",
"key-plus": "\uF309",
"key-remove": "\uF30A",
"key-variant": "\uF30B",
"key-wireless": "\uFFE2",
"keyboard": "\uF30C",
"keyboard-backspace": "\uF30D",
"keyboard-caps": "\uF30E",
"keyboard-close": "\uF30F",
"keyboard-off": "\uF310",
"keyboard-off-outline": "\uFE8C",
"keyboard-outline": "\uF97A",
"keyboard-return": "\uF311",
"keyboard-settings": "\uF9F8",
"keyboard-settings-outline": "\uF9F9",
"keyboard-space": "\U000F0072",
"keyboard-tab": "\uF312",
"keyboard-variant": "\uF313",
"kickstarter": "\uF744",
"knife": "\uF9FA",
"knife-military": "\uF9FB",
"kodi": "\uF314",
"label": "\uF315",
"label-off": "\uFACA",
"label-off-outline": "\uFACB",
"label-outline": "\uF316",
"label-variant": "\uFACC",
"label-variant-outline": "\uFACD",
"ladybug": "\uF82C",
"lambda": "\uF627",
"lamp": "\uF6B4",
"lan": "\uF317",
"lan-connect": "\uF318",
"lan-disconnect": "\uF319",
"lan-pending": "\uF31A",
"language-c": "\uF671",
"language-cpp": "\uF672",
"language-csharp": "\uF31B",
"language-css3": "\uF31C",
"language-go": "\uF7D2",
"language-haskell": "\uFC6E",
"language-html5": "\uF31D",
"language-java": "\uFB1C",
"language-javascript": "\uF31E",
"language-lua": "\uF8B0",
"language-php": "\uF31F",
"language-python": "\uF320",
"language-python-text": "\uF321",
"language-r": "\uF7D3",
"language-ruby-on-rails": "\uFACE",
"language-swift": "\uF6E4",
"language-typescript": "\uF6E5",
"laptop": "\uF322",
"laptop-chromebook": "\uF323",
"laptop-mac": "\uF324",
"laptop-off": "\uF6E6",
"laptop-windows": "\uF325",
"laravel": "\uFACF",
"lasso": "\uFF20",
"lastfm": "\uF326",
"lastpass": "\uF446",
"latitude": "\uFF74",
"launch": "\uF327",
"lava-lamp": "\uF7D4",
"layers": "\uF328",
"layers-minus": "\uFE8D",
"layers-off": "\uF329",
"layers-off-outline": "\uF9FC",
"layers-outline": "\uF9FD",
"layers-plus": "\uFE30",
"layers-remove": "\uFE31",
"layers-triple": "\uFF75",
"layers-triple-outline": "\uFF76",
"lead-pencil": "\uF64F",
"leaf": "\uF32A",
"leaf-maple": "\uFC6F",
"leak": "\uFDB3",
"leak-off": "\uFDB4",
"led-off": "\uF32B",
"led-on": "\uF32C",
"led-outline": "\uF32D",
"led-strip": "\uF7D5",
"led-strip-variant": "\U000F0073",
"led-variant-off": "\uF32E",
"led-variant-on": "\uF32F",
"led-variant-outline": "\uF330",
"less-than": "\uF97B",
"less-than-or-equal": "\uF97C",
"library": "\uF331",
"library-books": "\uF332",
"library-movie": "\uFCF4",
"library-music": "\uF333",
"library-music-outline": "\uFF21",
"library-shelves": "\uFB85",
"library-video": "\uFCF5",
"license": "\uFFE3",
"lifebuoy": "\uF87D",
"light-switch": "\uF97D",
"lightbulb": "\uF335",
"lightbulb-off": "\uFE32",
"lightbulb-off-outline": "\uFE33",
"lightbulb-on": "\uF6E7",
"lightbulb-on-outline": "\uF6E8",
"lightbulb-outline": "\uF336",
"lighthouse": "\uF9FE",
"lighthouse-on": "\uF9FF",
"link": "\uF337",
"link-box": "\uFCF6",
"link-box-outline": "\uFCF7",
"link-box-variant": "\uFCF8",
"link-box-variant-outline": "\uFCF9",
"link-off": "\uF338",
"link-plus": "\uFC70",
"link-variant": "\uF339",
"link-variant-off": "\uF33A",
"linkedin": "\uF33B",
"linkedin-box": "\uF33C",
"linux": "\uF33D",
"linux-mint": "\uF8EC",
"litecoin": "\uFA60",
"loading": "\uF771",
"location-enter": "\uFFE4",
"location-exit": "\uFFE5",
"lock": "\uF33E",
"lock-alert": "\uF8ED",
"lock-clock": "\uF97E",
"lock-open": "\uF33F",
"lock-open-outline": "\uF340",
"lock-open-variant": "\uFFE6",
"lock-open-variant-outline": "\uFFE7",
"lock-outline": "\uF341",
"lock-pattern": "\uF6E9",
"lock-plus": "\uF5FB",
"lock-question": "\uF8EE",
"lock-reset": "\uF772",
"lock-smart": "\uF8B1",
"locker": "\uF7D6",
"locker-multiple": "\uF7D7",
"login": "\uF342",
"login-variant": "\uF5FC",
"logout": "\uF343",
"logout-variant": "\uF5FD",
"longitude": "\uFF77",
"looks": "\uF344",
"loupe": "\uF345",
"lumx": "\uF346",
"lyft": "\uFB1D",
"magnet": "\uF347",
"magnet-on": "\uF348",
"magnify": "\uF349",
"magnify-close": "\uF97F",
"magnify-minus": "\uF34A",
"magnify-minus-cursor": "\uFA61",
"magnify-minus-outline": "\uF6EB",
"magnify-plus": "\uF34B",
"magnify-plus-cursor": "\uFA62",
"magnify-plus-outline": "\uF6EC",
"mail": "\uFED8",
"mail-ru": "\uF34C",
"mailbox": "\uF6ED",
"mailbox-open": "\uFD64",
"mailbox-open-outline": "\uFD65",
"mailbox-open-up": "\uFD66",
"mailbox-open-up-outline": "\uFD67",
"mailbox-outline": "\uFD68",
"mailbox-up": "\uFD69",
"mailbox-up-outline": "\uFD6A",
"map": "\uF34D",
"map-check": "\uFED9",
"map-check-outline": "\uFEDA",
"map-clock": "\uFCFA",
"map-clock-outline": "\uFCFB",
"map-legend": "\uFA00",
"map-marker": "\uF34E",
"map-marker-alert": "\uFF22",
"map-marker-alert-outline": "\uFF23",
"map-marker-check": "\uFC71",
"map-marker-circle": "\uF34F",
"map-marker-distance": "\uF8EF",
"map-marker-minus": "\uF650",
"map-marker-multiple": "\uF350",
"map-marker-off": "\uF351",
"map-marker-outline": "\uF7D8",
"map-marker-path": "\uFCFC",
"map-marker-plus": "\uF651",
"map-marker-question": "\uFF24",
"map-marker-question-outline": "\uFF25",
"map-marker-radius": "\uF352",
"map-marker-remove": "\uFF26",
"map-marker-remove-variant": "\uFF27",
"map-minus": "\uF980",
"map-outline": "\uF981",
"map-plus": "\uF982",
"map-search": "\uF983",
"map-search-outline": "\uF984",
"mapbox": "\uFB86",
"margin": "\uF353",
"markdown": "\uF354",
"markdown-outline": "\uFF78",
"marker": "\uF652",
"marker-cancel": "\uFDB5",
"marker-check": "\uF355",
"mastodon": "\uFAD0",
"mastodon-variant": "\uFAD1",
"material-design": "\uF985",
"material-ui": "\uF357",
"math-compass": "\uF358",
"math-cos": "\uFC72",
"math-integral": "\uFFE8",
"math-integral-box": "\uFFE9",
"math-norm": "\uFFEA",
"math-norm-box": "\uFFEB",
"math-sin": "\uFC73",
"math-tan": "\uFC74",
"matrix": "\uF628",
"maxcdn": "\uF359",
"medal": "\uF986",
"medical-bag": "\uF6EE",
"medium": "\uF35A",
"meetup": "\uFAD2",
"memory": "\uF35B",
"menu": "\uF35C",
"menu-down": "\uF35D",
"menu-down-outline": "\uF6B5",
"menu-left": "\uF35E",
"menu-left-outline": "\uFA01",
"menu-open": "\uFB87",
"menu-right": "\uF35F",
"menu-right-outline": "\uFA02",
"menu-swap": "\uFA63",
"menu-swap-outline": "\uFA64",
"menu-up": "\uF360",
"menu-up-outline": "\uF6B6",
"merge": "\uFF79",
"message": "\uF361",
"message-alert": "\uF362",
"message-alert-outline": "\uFA03",
"message-bulleted": "\uF6A1",
"message-bulleted-off": "\uF6A2",
"message-draw": "\uF363",
"message-image": "\uF364",
"message-lock": "\uFFEC",
"message-outline": "\uF365",
"message-plus": "\uF653",
"message-processing": "\uF366",
"message-reply": "\uF367",
"message-reply-text": "\uF368",
"message-settings": "\uF6EF",
"message-settings-variant": "\uF6F0",
"message-text": "\uF369",
"message-text-lock": "\uFFED",
"message-text-outline": "\uF36A",
"message-video": "\uF36B",
"meteor": "\uF629",
"metronome": "\uF7D9",
"metronome-tick": "\uF7DA",
"micro-sd": "\uF7DB",
"microphone": "\uF36C",
"microphone-minus": "\uF8B2",
"microphone-off": "\uF36D",
"microphone-outline": "\uF36E",
"microphone-plus": "\uF8B3",
"microphone-settings": "\uF36F",
"microphone-variant": "\uF370",
"microphone-variant-off": "\uF371",
"microscope": "\uF654",
"microsoft": "\uF372",
"microsoft-dynamics": "\uF987",
"microwave": "\uFC75",
"middleware": "\uFF7A",
"middleware-outline": "\uFF7B",
"midi": "\uF8F0",
"midi-port": "\uF8F1",
"mine": "\uFDB6",
"minecraft": "\uF373",
"mini-sd": "\uFA04",
"minidisc": "\uFA05",
"minus": "\uF374",
"minus-box": "\uF375",
"minus-box-outline": "\uF6F1",
"minus-circle": "\uF376",
"minus-circle-outline": "\uF377",
"minus-network": "\uF378",
"minus-network-outline": "\uFC76",
"mixcloud": "\uF62A",
"mixed-martial-arts": "\uFD6B",
"mixed-reality": "\uF87E",
"mixer": "\uF7DC",
"molecule": "\uFB88",
"monitor": "\uF379",
"monitor-cellphone": "\uF988",
"monitor-cellphone-star": "\uF989",
"monitor-dashboard": "\uFA06",
"monitor-lock": "\uFDB7",
"monitor-multiple": "\uF37A",
"monitor-off": "\uFD6C",
"monitor-screenshot": "\uFE34",
"monitor-speaker": "\uFF7C",
"monitor-speaker-off": "\uFF7D",
"monitor-star": "\uFDB8",
"moon-first-quarter": "\uFF7E",
"moon-full": "\uFF7F",
"moon-last-quarter": "\uFF80",
"moon-new": "\uFF81",
"moon-waning-crescent": "\uFF82",
"moon-waning-gibbous": "\uFF83",
"moon-waxing-crescent": "\uFF84",
"moon-waxing-gibbous": "\uFF85",
"more": "\uF37B",
"mother-nurse": "\uFCFD",
"motion-sensor": "\uFD6D",
"motorbike": "\uF37C",
"mouse": "\uF37D",
"mouse-bluetooth": "\uF98A",
"mouse-off": "\uF37E",
"mouse-variant": "\uF37F",
"mouse-variant-off": "\uF380",
"move-resize": "\uF655",
"move-resize-variant": "\uF656",
"movie": "\uF381",
"movie-open": "\uFFEE",
"movie-open-outline": "\uFFEF",
"movie-outline": "\uFDB9",
"movie-roll": "\uF7DD",
"muffin": "\uF98B",
"multiplication": "\uF382",
"multiplication-box": "\uF383",
"mushroom": "\uF7DE",
"mushroom-outline": "\uF7DF",
"music": "\uF759",
"music-accidental-double-flat": "\uFF86",
"music-accidental-double-sharp": "\uFF87",
"music-accidental-flat": "\uFF88",
"music-accidental-natural": "\uFF89",
"music-accidental-sharp": "\uFF8A",
"music-box": "\uF384",
"music-box-outline": "\uF385",
"music-circle": "\uF386",
"music-circle-outline": "\uFAD3",
"music-clef-alto": "\uFF8B",
"music-clef-bass": "\uFF8C",
"music-clef-treble": "\uFF8D",
"music-note": "\uF387",
"music-note-bluetooth": "\uF5FE",
"music-note-bluetooth-off": "\uF5FF",
"music-note-eighth": "\uF388",
"music-note-eighth-dotted": "\uFF8E",
"music-note-half": "\uF389",
"music-note-half-dotted": "\uFF8F",
"music-note-off": "\uF38A",
"music-note-off-outline": "\uFF90",
"music-note-outline": "\uFF91",
"music-note-plus": "\uFDBA",
"music-note-quarter": "\uF38B",
"music-note-quarter-dotted": "\uFF92",
"music-note-sixteenth": "\uF38C",
"music-note-sixteenth-dotted": "\uFF93",
"music-note-whole": "\uF38D",
"music-note-whole-dotted": "\uFF94",
"music-off": "\uF75A",
"music-rest-eighth": "\uFF95",
"music-rest-half": "\uFF96",
"music-rest-quarter": "\uFF97",
"music-rest-sixteenth": "\uFF98",
"music-rest-whole": "\uFF99",
"nail": "\uFDBB",
"nas": "\uF8F2",
"nativescript": "\uF87F",
"nature": "\uF38E",
"nature-people": "\uF38F",
"navigation": "\uF390",
"near-me": "\uF5CD",
"necklace": "\uFF28",
"needle": "\uF391",
"netflix": "\uF745",
"network": "\uF6F2",
"network-off": "\uFC77",
"network-off-outline": "\uFC78",
"network-outline": "\uFC79",
"network-strength-1": "\uF8F3",
"network-strength-1-alert": "\uF8F4",
"network-strength-2": "\uF8F5",
"network-strength-2-alert": "\uF8F6",
"network-strength-3": "\uF8F7",
"network-strength-3-alert": "\uF8F8",
"network-strength-4": "\uF8F9",
"network-strength-4-alert": "\uF8FA",
"network-strength-off": "\uF8FB",
"network-strength-off-outline": "\uF8FC",
"network-strength-outline": "\uF8FD",
"new-box": "\uF394",
"newspaper": "\uF395",
"newspaper-minus": "\uFF29",
"newspaper-plus": "\uFF2A",
"newspaper-variant": "\U000F0023",
"newspaper-variant-multiple": "\U000F0024",
"newspaper-variant-multiple-outline": "\U000F0025",
"newspaper-variant-outline": "\U000F0026",
"nfc": "\uF396",
"nfc-off": "\uFE35",
"nfc-search-variant": "\uFE36",
"nfc-tap": "\uF397",
"nfc-variant": "\uF398",
"nfc-variant-off": "\uFE37",
"ninja": "\uF773",
"nintendo-switch": "\uF7E0",
"nodejs": "\uF399",
"not-equal": "\uF98C",
"not-equal-variant": "\uF98D",
"note": "\uF39A",
"note-multiple": "\uF6B7",
"note-multiple-outline": "\uF6B8",
"note-outline": "\uF39B",
"note-plus": "\uF39C",
"note-plus-outline": "\uF39D",
"note-text": "\uF39E",
"notebook": "\uF82D",
"notebook-multiple": "\uFE38",
"notebook-outline": "\uFEDC",
"notification-clear-all": "\uF39F",
"npm": "\uF6F6",
"npm-variant": "\uF98E",
"npm-variant-outline": "\uF98F",
"nuke": "\uF6A3",
"null": "\uF7E1",
"numeric": "\uF3A0",
"numeric-0": "\u0030",
"numeric-0-box": "\uF3A1",
"numeric-0-box-multiple": "\uFF2B",
"numeric-0-box-multiple-outline": "\uF3A2",
"numeric-0-box-outline": "\uF3A3",
"numeric-0-circle": "\uFC7A",
"numeric-0-circle-outline": "\uFC7B",
"numeric-1": "\u0031",
"numeric-1-box": "\uF3A4",
"numeric-1-box-multiple": "\uFF2C",
"numeric-1-box-multiple-outline": "\uF3A5",
"numeric-1-box-outline": "\uF3A6",
"numeric-1-circle": "\uFC7C",
"numeric-1-circle-outline": "\uFC7D",
"numeric-10": "\U000F000A",
"numeric-10-box": "\uFF9A",
"numeric-10-box-multiple": "\U000F000B",
"numeric-10-box-multiple-outline": "\U000F000C",
"numeric-10-box-outline": "\uFF9B",
"numeric-10-circle": "\U000F000D",
"numeric-10-circle-outline": "\U000F000E",
"numeric-2": "\u0032",
"numeric-2-box": "\uF3A7",
"numeric-2-box-multiple": "\uFF2D",
"numeric-2-box-multiple-outline": "\uF3A8",
"numeric-2-box-outline": "\uF3A9",
"numeric-2-circle": "\uFC7E",
"numeric-2-circle-outline": "\uFC7F",
"numeric-3": "\u0033",
"numeric-3-box": "\uF3AA",
"numeric-3-box-multiple": "\uFF2E",
"numeric-3-box-multiple-outline": "\uF3AB",
"numeric-3-box-outline": "\uF3AC",
"numeric-3-circle": "\uFC80",
"numeric-3-circle-outline": "\uFC81",
"numeric-4": "\u0034",
"numeric-4-box": "\uF3AD",
"numeric-4-box-multiple": "\uFF2F",
"numeric-4-box-multiple-outline": "\uF3AE",
"numeric-4-box-outline": "\uF3AF",
"numeric-4-circle": "\uFC82",
"numeric-4-circle-outline": "\uFC83",
"numeric-5": "\u0035",
"numeric-5-box": "\uF3B0",
"numeric-5-box-multiple": "\uFF30",
"numeric-5-box-multiple-outline": "\uF3B1",
"numeric-5-box-outline": "\uF3B2",
"numeric-5-circle": "\uFC84",
"numeric-5-circle-outline": "\uFC85",
"numeric-6": "\u0036",
"numeric-6-box": "\uF3B3",
"numeric-6-box-multiple": "\uFF31",
"numeric-6-box-multiple-outline": "\uF3B4",
"numeric-6-box-outline": "\uF3B5",
"numeric-6-circle": "\uFC86",
"numeric-6-circle-outline": "\uFC87",
"numeric-7": "\u0037",
"numeric-7-box": "\uF3B6",
"numeric-7-box-multiple": "\uFF32",
"numeric-7-box-multiple-outline": "\uF3B7",
"numeric-7-box-outline": "\uF3B8",
"numeric-7-circle": "\uFC88",
"numeric-7-circle-outline": "\uFC89",
"numeric-8": "\u0038",
"numeric-8-box": "\uF3B9",
"numeric-8-box-multiple": "\uFF33",
"numeric-8-box-multiple-outline": "\uF3BA",
"numeric-8-box-outline": "\uF3BB",
"numeric-8-circle": "\uFC8A",
"numeric-8-circle-outline": "\uFC8B",
"numeric-9": "\u0039",
"numeric-9-box": "\uF3BC",
"numeric-9-box-multiple": "\uFF34",
"numeric-9-box-multiple-outline": "\uF3BD",
"numeric-9-box-outline": "\uF3BE",
"numeric-9-circle": "\uFC8C",
"numeric-9-circle-outline": "\uFC8D",
"numeric-9-plus": "\U000F000F",
"numeric-9-plus-box": "\uF3BF",
"numeric-9-plus-box-multiple": "\uFF35",
"numeric-9-plus-box-multiple-outline": "\uF3C0",
"numeric-9-plus-box-outline": "\uF3C1",
"numeric-9-plus-circle": "\uFC8E",
"numeric-9-plus-circle-outline": "\uFC8F",
"numeric-negative-1": "\U000F0074",
"nut": "\uF6F7",
"nutrition": "\uF3C2",
"oar": "\uF67B",
"ocarina": "\uFDBC",
"octagon": "\uF3C3",
"octagon-outline": "\uF3C4",
"octagram": "\uF6F8",
"octagram-outline": "\uF774",
"odnoklassniki": "\uF3C5",
"office": "\uF3C6",
"office-building": "\uF990",
"oil": "\uF3C7",
"oil-lamp": "\uFF36",
"oil-level": "\U000F0075",
"oil-temperature": "\U000F0019",
"omega": "\uF3C9",
"one-up": "\uFB89",
"onedrive": "\uF3CA",
"onenote": "\uF746",
"onepassword": "\uF880",
"opacity": "\uF5CC",
"open-in-app": "\uF3CB",
"open-in-new": "\uF3CC",
"open-source-initiative": "\uFB8A",
"openid": "\uF3CD",
"opera": "\uF3CE",
"orbit": "\uF018",
"origin": "\uFB2B",
"ornament": "\uF3CF",
"ornament-variant": "\uF3D0",
"outdoor-lamp": "\U000F0076",
"outlook": "\uFCFE",
"overscan": "\U000F0027",
"owl": "\uF3D2",
"pac-man": "\uFB8B",
"package": "\uF3D3",
"package-down": "\uF3D4",
"package-up": "\uF3D5",
"package-variant": "\uF3D6",
"package-variant-closed": "\uF3D7",
"page-first": "\uF600",
"page-last": "\uF601",
"page-layout-body": "\uF6F9",
"page-layout-footer": "\uF6FA",
"page-layout-header": "\uF6FB",
"page-layout-header-footer": "\uFF9C",
"page-layout-sidebar-left": "\uF6FC",
"page-layout-sidebar-right": "\uF6FD",
"page-next": "\uFB8C",
"page-next-outline": "\uFB8D",
"page-previous": "\uFB8E",
"page-previous-outline": "\uFB8F",
"palette": "\uF3D8",
"palette-advanced": "\uF3D9",
"palette-outline": "\uFE6C",
"palette-swatch": "\uF8B4",
"palm-tree": "\U000F0077",
"pan": "\uFB90",
"pan-bottom-left": "\uFB91",
"pan-bottom-right": "\uFB92",
"pan-down": "\uFB93",
"pan-horizontal": "\uFB94",
"pan-left": "\uFB95",
"pan-right": "\uFB96",
"pan-top-left": "\uFB97",
"pan-top-right": "\uFB98",
"pan-up": "\uFB99",
"pan-vertical": "\uFB9A",
"panda": "\uF3DA",
"pandora": "\uF3DB",
"panorama": "\uF3DC",
"panorama-fisheye": "\uF3DD",
"panorama-horizontal": "\uF3DE",
"panorama-vertical": "\uF3DF",
"panorama-wide-angle": "\uF3E0",
"paper-cut-vertical": "\uF3E1",
"paperclip": "\uF3E2",
"parachute": "\uFC90",
"parachute-outline": "\uFC91",
"parking": "\uF3E3",
"party-popper": "\U000F0078",
"passport": "\uF7E2",
"passport-biometric": "\uFDBD",
"patio-heater": "\uFF9D",
"patreon": "\uF881",
"pause": "\uF3E4",
"pause-circle": "\uF3E5",
"pause-circle-outline": "\uF3E6",
"pause-octagon": "\uF3E7",
"pause-octagon-outline": "\uF3E8",
"paw": "\uF3E9",
"paw-off": "\uF657",
"paypal": "\uF882",
"pdf-box": "\uFE39",
"peace": "\uF883",
"peanut": "\U000F001E",
"peanut-off": "\U000F001F",
"peanut-off-outline": "\U000F0021",
"peanut-outline": "\U000F0020",
"pen": "\uF3EA",
"pen-lock": "\uFDBE",
"pen-minus": "\uFDBF",
"pen-off": "\uFDC0",
"pen-plus": "\uFDC1",
"pen-remove": "\uFDC2",
"pencil": "\uF3EB",
"pencil-box": "\uF3EC",
"pencil-box-outline": "\uF3ED",
"pencil-circle": "\uF6FE",
"pencil-circle-outline": "\uF775",
"pencil-lock": "\uF3EE",
"pencil-lock-outline": "\uFDC3",
"pencil-minus": "\uFDC4",
"pencil-minus-outline": "\uFDC5",
"pencil-off": "\uF3EF",
"pencil-off-outline": "\uFDC6",
"pencil-outline": "\uFC92",
"pencil-plus": "\uFDC7",
"pencil-plus-outline": "\uFDC8",
"pencil-remove": "\uFDC9",
"pencil-remove-outline": "\uFDCA",
"penguin": "\uFEDD",
"pentagon": "\uF6FF",
"pentagon-outline": "\uF700",
"percent": "\uF3F0",
"periodic-table": "\uF8B5",
"periodic-table-co2": "\uF7E3",
"periscope": "\uF747",
"perspective-less": "\uFCFF",
"perspective-more": "\uFD00",
"pharmacy": "\uF3F1",
"phone": "\uF3F2",
"phone-alert": "\uFF37",
"phone-bluetooth": "\uF3F3",
"phone-classic": "\uF602",
"phone-forward": "\uF3F4",
"phone-hangup": "\uF3F5",
"phone-in-talk": "\uF3F6",
"phone-incoming": "\uF3F7",
"phone-lock": "\uF3F8",
"phone-log": "\uF3F9",
"phone-minus": "\uF658",
"phone-missed": "\uF3FA",
"phone-off": "\uFDCB",
"phone-outgoing": "\uF3FB",
"phone-outline": "\uFDCC",
"phone-paused": "\uF3FC",
"phone-plus": "\uF659",
"phone-return": "\uF82E",
"phone-rotate-landscape": "\uF884",
"phone-rotate-portrait": "\uF885",
"phone-settings": "\uF3FD",
"phone-voip": "\uF3FE",
"pi": "\uF3FF",
"pi-box": "\uF400",
"pi-hole": "\uFDCD",
"piano": "\uF67C",
"pickaxe": "\uF8B6",
"picture-in-picture-bottom-right": "\uFE3A",
"picture-in-picture-bottom-right-outline": "\uFE3B",
"picture-in-picture-top-right": "\uFE3C",
"picture-in-picture-top-right-outline": "\uFE3D",
"pier": "\uF886",
"pier-crane": "\uF887",
"pig": "\uF401",
"pig-variant": "\U000F0028",
"piggy-bank": "\U000F0029",
"pill": "\uF402",
"pillar": "\uF701",
"pin": "\uF403",
"pin-off": "\uF404",
"pin-off-outline": "\uF92F",
"pin-outline": "\uF930",
"pine-tree": "\uF405",
"pine-tree-box": "\uF406",
"pinterest": "\uF407",
"pinterest-box": "\uF408",
"pinwheel": "\uFAD4",
"pinwheel-outline": "\uFAD5",
"pipe": "\uF7E4",
"pipe-disconnected": "\uF7E5",
"pipe-leak": "\uF888",
"pirate": "\uFA07",
"pistol": "\uF702",
"piston": "\uF889",
"pizza": "\uF409",
"play": "\uF40A",
"play-box-outline": "\uF40B",
"play-circle": "\uF40C",
"play-circle-outline": "\uF40D",
"play-network": "\uF88A",
"play-network-outline": "\uFC93",
"play-outline": "\uFF38",
"play-pause": "\uF40E",
"play-protected-content": "\uF40F",
"play-speed": "\uF8FE",
"playlist-check": "\uF5C7",
"playlist-edit": "\uF8FF",
"playlist-minus": "\uF410",
"playlist-music": "\uFC94",
"playlist-music-outline": "\uFC95",
"playlist-play": "\uF411",
"playlist-plus": "\uF412",
"playlist-remove": "\uF413",
"playlist-star": "\uFDCE",
"playstation": "\uF414",
"plex": "\uF6B9",
"plus": "\uF415",
"plus-box": "\uF416",
"plus-box-multiple": "\uF334",
"plus-box-outline": "\uF703",
"plus-circle": "\uF417",
"plus-circle-multiple-outline": "\uF418",
"plus-circle-outline": "\uF419",
"plus-minus": "\uF991",
"plus-minus-box": "\uF992",
"plus-network": "\uF41A",
"plus-network-outline": "\uFC96",
"plus-one": "\uF41B",
"plus-outline": "\uF704",
"pocket": "\uF41C",
"podcast": "\uF993",
"podium": "\uFD01",
"podium-bronze": "\uFD02",
"podium-gold": "\uFD03",
"podium-silver": "\uFD04",
"point-of-sale": "\uFD6E",
"pokeball": "\uF41D",
"pokemon-go": "\uFA08",
"poker-chip": "\uF82F",
"polaroid": "\uF41E",
"poll": "\uF41F",
"poll-box": "\uF420",
"polymer": "\uF421",
"pool": "\uF606",
"popcorn": "\uF422",
"post": "\U000F002A",
"post-outline": "\U000F002B",
"postage-stamp": "\uFC97",
"pot": "\uF65A",
"pot-mix": "\uF65B",
"pound": "\uF423",
"pound-box": "\uF424",
"power": "\uF425",
"power-cycle": "\uF900",
"power-off": "\uF901",
"power-on": "\uF902",
"power-plug": "\uF6A4",
"power-plug-off": "\uF6A5",
"power-settings": "\uF426",
"power-sleep": "\uF903",
"power-socket": "\uF427",
"power-socket-au": "\uF904",
"power-socket-eu": "\uF7E6",
"power-socket-uk": "\uF7E7",
"power-socket-us": "\uF7E8",
"power-standby": "\uF905",
"powershell": "\uFA09",
"prescription": "\uF705",
"presentation": "\uF428",
"presentation-play": "\uF429",
"printer": "\uF42A",
"printer-3d": "\uF42B",
"printer-3d-nozzle": "\uFE3E",
"printer-3d-nozzle-outline": "\uFE3F",
"printer-alert": "\uF42C",
"printer-off": "\uFE40",
"printer-pos": "\U000F0079",
"printer-settings": "\uF706",
"printer-wireless": "\uFA0A",
"priority-high": "\uF603",
"priority-low": "\uF604",
"professional-hexagon": "\uF42D",
"progress-alert": "\uFC98",
"progress-check": "\uF994",
"progress-clock": "\uF995",
"progress-download": "\uF996",
"progress-upload": "\uF997",
"progress-wrench": "\uFC99",
"projector": "\uF42E",
"projector-screen": "\uF42F",
"protocol": "\uFFF9",
"publish": "\uF6A6",
"pulse": "\uF430",
"pumpkin": "\uFB9B",
"purse": "\uFF39",
"purse-outline": "\uFF3A",
"puzzle": "\uF431",
"puzzle-outline": "\uFA65",
"qi": "\uF998",
"qqchat": "\uF605",
"qrcode": "\uF432",
"qrcode-edit": "\uF8B7",
"qrcode-scan": "\uF433",
"quadcopter": "\uF434",
"quality-high": "\uF435",
"quality-low": "\uFA0B",
"quality-medium": "\uFA0C",
"quicktime": "\uF436",
"quora": "\uFD05",
"rabbit": "\uF906",
"racing-helmet": "\uFD6F",
"racquetball": "\uFD70",
"radar": "\uF437",
"radiator": "\uF438",
"radiator-disabled": "\uFAD6",
"radiator-off": "\uFAD7",
"radio": "\uF439",
"radio-am": "\uFC9A",
"radio-fm": "\uFC9B",
"radio-handheld": "\uF43A",
"radio-tower": "\uF43B",
"radioactive": "\uF43C",
"radioactive-off": "\uFEDE",
"radiobox-blank": "\uF43D",
"radiobox-marked": "\uF43E",
"radius": "\uFC9C",
"radius-outline": "\uFC9D",
"railroad-light": "\uFF3B",
"raspberry-pi": "\uF43F",
"ray-end": "\uF440",
"ray-end-arrow": "\uF441",
"ray-start": "\uF442",
"ray-start-arrow": "\uF443",
"ray-start-end": "\uF444",
"ray-vertex": "\uF445",
"react": "\uF707",
"read": "\uF447",
"receipt": "\uF449",
"record": "\uF44A",
"record-circle": "\uFEDF",
"record-circle-outline": "\uFEE0",
"record-player": "\uF999",
"record-rec": "\uF44B",
"rectangle": "\uFE41",
"rectangle-outline": "\uFE42",
"recycle": "\uF44C",
"reddit": "\uF44D",
"redo": "\uF44E",
"redo-variant": "\uF44F",
"reflect-horizontal": "\uFA0D",
"reflect-vertical": "\uFA0E",
"refresh": "\uF450",
"regex": "\uF451",
"registered-trademark": "\uFA66",
"relative-scale": "\uF452",
"reload": "\uF453",
"reminder": "\uF88B",
"remote": "\uF454",
"remote-desktop": "\uF8B8",
"remote-off": "\uFEE1",
"remote-tv": "\uFEE2",
"remote-tv-off": "\uFEE3",
"rename-box": "\uF455",
"reorder-horizontal": "\uF687",
"reorder-vertical": "\uF688",
"repeat": "\uF456",
"repeat-off": "\uF457",
"repeat-once": "\uF458",
"replay": "\uF459",
"reply": "\uF45A",
"reply-all": "\uF45B",
"reply-all-outline": "\uFF3C",
"reply-outline": "\uFF3D",
"reproduction": "\uF45C",
"resistor": "\uFB1F",
"resistor-nodes": "\uFB20",
"resize": "\uFA67",
"resize-bottom-right": "\uF45D",
"responsive": "\uF45E",
"restart": "\uF708",
"restart-off": "\uFD71",
"restore": "\uF99A",
"rewind": "\uF45F",
"rewind-10": "\uFD06",
"rewind-30": "\uFD72",
"rewind-outline": "\uF709",
"rhombus": "\uF70A",
"rhombus-medium": "\uFA0F",
"rhombus-outline": "\uF70B",
"rhombus-split": "\uFA10",
"ribbon": "\uF460",
"rice": "\uF7E9",
"ring": "\uF7EA",
"rivet": "\uFE43",
"road": "\uF461",
"road-variant": "\uF462",
"robber": "\U000F007A",
"robot": "\uF6A8",
"robot-industrial": "\uFB21",
"robot-vacuum": "\uF70C",
"robot-vacuum-variant": "\uF907",
"rocket": "\uF463",
"roller-skate": "\uFD07",
"rollerblade": "\uFD08",
"rollupjs": "\uFB9C",
"room-service": "\uF88C",
"room-service-outline": "\uFD73",
"rotate-3d": "\uFEE4",
"rotate-3d-variant": "\uF464",
"rotate-left": "\uF465",
"rotate-left-variant": "\uF466",
"rotate-orbit": "\uFD74",
"rotate-right": "\uF467",
"rotate-right-variant": "\uF468",
"rounded-corner": "\uF607",
"router-wireless": "\uF469",
"router-wireless-settings": "\uFA68",
"routes": "\uF46A",
"routes-clock": "\U000F007B",
"rowing": "\uF608",
"rss": "\uF46B",
"rss-box": "\uF46C",
"rss-off": "\uFF3E",
"ruby": "\uFD09",
"rugby": "\uFD75",
"ruler": "\uF46D",
"ruler-square": "\uFC9E",
"ruler-square-compass": "\uFEDB",
"run": "\uF70D",
"run-fast": "\uF46E",
"sack": "\uFD0A",
"sack-percent": "\uFD0B",
"safe": "\uFA69",
"safety-goggles": "\uFD0C",
"sailing": "\uFEE5",
"sale": "\uF46F",
"salesforce": "\uF88D",
"sass": "\uF7EB",
"satellite": "\uF470",
"satellite-uplink": "\uF908",
"satellite-variant": "\uF471",
"sausage": "\uF8B9",
"saw-blade": "\uFE44",
"saxophone": "\uF609",
"scale": "\uF472",
"scale-balance": "\uF5D1",
"scale-bathroom": "\uF473",
"scale-off": "\U000F007C",
"scanner": "\uF6AA",
"scanner-off": "\uF909",
"scatter-plot": "\uFEE6",
"scatter-plot-outline": "\uFEE7",
"school": "\uF474",
"scissors-cutting": "\uFA6A",
"screen-rotation": "\uF475",
"screen-rotation-lock": "\uF476",
"screw-flat-top": "\uFDCF",
"screw-lag": "\uFE54",
"screw-machine-flat-top": "\uFE55",
"screw-machine-round-top": "\uFE56",
"screw-round-top": "\uFE57",
"screwdriver": "\uF477",
"script": "\uFB9D",
"script-outline": "\uF478",
"script-text": "\uFB9E",
"script-text-outline": "\uFB9F",
"sd": "\uF479",
"seal": "\uF47A",
"seal-variant": "\uFFFA",
"search-web": "\uF70E",
"seat": "\uFC9F",
"seat-flat": "\uF47B",
"seat-flat-angled": "\uF47C",
"seat-individual-suite": "\uF47D",
"seat-legroom-extra": "\uF47E",
"seat-legroom-normal": "\uF47F",
"seat-legroom-reduced": "\uF480",
"seat-outline": "\uFCA0",
"seat-recline-extra": "\uF481",
"seat-recline-normal": "\uF482",
"seatbelt": "\uFCA1",
"security": "\uF483",
"security-network": "\uF484",
"seed": "\uFE45",
"seed-outline": "\uFE46",
"segment": "\uFEE8",
"select": "\uF485",
"select-all": "\uF486",
"select-color": "\uFD0D",
"select-compare": "\uFAD8",
"select-drag": "\uFA6B",
"select-group": "\uFF9F",
"select-inverse": "\uF487",
"select-off": "\uF488",
"select-place": "\uFFFB",
"selection": "\uF489",
"selection-drag": "\uFA6C",
"selection-ellipse": "\uFD0E",
"selection-ellipse-arrow-inside": "\uFF3F",
"selection-off": "\uF776",
"send": "\uF48A",
"send-circle": "\uFE58",
"send-circle-outline": "\uFE59",
"send-lock": "\uF7EC",
"serial-port": "\uF65C",
"server": "\uF48B",
"server-minus": "\uF48C",
"server-network": "\uF48D",
"server-network-off": "\uF48E",
"server-off": "\uF48F",
"server-plus": "\uF490",
"server-remove": "\uF491",
"server-security": "\uF492",
"set-all": "\uF777",
"set-center": "\uF778",
"set-center-right": "\uF779",
"set-left": "\uF77A",
"set-left-center": "\uF77B",
"set-left-right": "\uF77C",
"set-none": "\uF77D",
"set-right": "\uF77E",
"set-top-box": "\uF99E",
"settings": "\uF493",
"settings-box": "\uF494",
"settings-helper": "\uFA6D",
"settings-outline": "\uF8BA",
"settings-transfer": "\U000F007D",
"settings-transfer-outline": "\U000F007E",
"shape": "\uF830",
"shape-circle-plus": "\uF65D",
"shape-outline": "\uF831",
"shape-plus": "\uF495",
"shape-polygon-plus": "\uF65E",
"shape-rectangle-plus": "\uF65F",
"shape-square-plus": "\uF660",
"share": "\uF496",
"share-off": "\uFF40",
"share-off-outline": "\uFF41",
"share-outline": "\uF931",
"share-variant": "\uF497",
"sheep": "\uFCA2",
"shield": "\uF498",
"shield-account": "\uF88E",
"shield-account-outline": "\uFA11",
"shield-airplane": "\uF6BA",
"shield-airplane-outline": "\uFCA3",
"shield-alert": "\uFEE9",
"shield-alert-outline": "\uFEEA",
"shield-car": "\uFFA0",
"shield-check": "\uF565",
"shield-check-outline": "\uFCA4",
"shield-cross": "\uFCA5",
"shield-cross-outline": "\uFCA6",
"shield-half-full": "\uF77F",
"shield-home": "\uF689",
"shield-home-outline": "\uFCA7",
"shield-key": "\uFBA0",
"shield-key-outline": "\uFBA1",
"shield-link-variant": "\uFD0F",
"shield-link-variant-outline": "\uFD10",
"shield-lock": "\uF99C",
"shield-lock-outline": "\uFCA8",
"shield-off": "\uF99D",
"shield-off-outline": "\uF99B",
"shield-outline": "\uF499",
"shield-plus": "\uFAD9",
"shield-plus-outline": "\uFADA",
"shield-remove": "\uFADB",
"shield-remove-outline": "\uFADC",
"shield-search": "\uFD76",
"shield-sun": "\U000F007F",
"shield-sun-outline": "\U000F0080",
"ship-wheel": "\uF832",
"shoe-formal": "\uFB22",
"shoe-heel": "\uFB23",
"shoe-print": "\uFE5A",
"shopify": "\uFADD",
"shopping": "\uF49A",
"shopping-music": "\uF49B",
"shopping-search": "\uFFA1",
"shovel": "\uF70F",
"shovel-off": "\uF710",
"shower": "\uF99F",
"shower-head": "\uF9A0",
"shredder": "\uF49C",
"shuffle": "\uF49D",
"shuffle-disabled": "\uF49E",
"shuffle-variant": "\uF49F",
"sigma": "\uF4A0",
"sigma-lower": "\uF62B",
"sign-caution": "\uF4A1",
"sign-direction": "\uF780",
"sign-direction-minus": "\U000F0022",
"sign-direction-plus": "\uFFFD",
"sign-direction-remove": "\uFFFE",
"sign-text": "\uF781",
"signal": "\uF4A2",
"signal-2g": "\uF711",
"signal-3g": "\uF712",
"signal-4g": "\uF713",
"signal-5g": "\uFA6E",
"signal-cellular-1": "\uF8BB",
"signal-cellular-2": "\uF8BC",
"signal-cellular-3": "\uF8BD",
"signal-cellular-outline": "\uF8BE",
"signal-distance-variant": "\uFE47",
"signal-hspa": "\uF714",
"signal-hspa-plus": "\uF715",
"signal-off": "\uF782",
"signal-variant": "\uF60A",
"signature": "\uFE5B",
"signature-freehand": "\uFE5C",
"signature-image": "\uFE5D",
"signature-text": "\uFE5E",
"silo": "\uFB24",
"silverware": "\uF4A3",
"silverware-clean": "\uFFFF",
"silverware-fork": "\uF4A4",
"silverware-fork-knife": "\uFA6F",
"silverware-spoon": "\uF4A5",
"silverware-variant": "\uF4A6",
"sim": "\uF4A7",
"sim-alert": "\uF4A8",
"sim-off": "\uF4A9",
"sina-weibo": "\uFADE",
"sitemap": "\uF4AA",
"skate": "\uFD11",
"skew-less": "\uFD12",
"skew-more": "\uFD13",
"skip-backward": "\uF4AB",
"skip-backward-outline": "\uFF42",
"skip-forward": "\uF4AC",
"skip-forward-outline": "\uFF43",
"skip-next": "\uF4AD",
"skip-next-circle": "\uF661",
"skip-next-circle-outline": "\uF662",
"skip-next-outline": "\uFF44",
"skip-previous": "\uF4AE",
"skip-previous-circle": "\uF663",
"skip-previous-circle-outline": "\uF664",
"skip-previous-outline": "\uFF45",
"skull": "\uF68B",
"skull-crossbones": "\uFBA2",
"skull-crossbones-outline": "\uFBA3",
"skull-outline": "\uFBA4",
"skype": "\uF4AF",
"skype-business": "\uF4B0",
"slack": "\uF4B1",
"slackware": "\uF90A",
"slash-forward": "\U000F0000",
"slash-forward-box": "\U000F0001",
"sleep": "\uF4B2",
"sleep-off": "\uF4B3",
"slope-downhill": "\uFE5F",
"slope-uphill": "\uFE60",
"smog": "\uFA70",
"smoke-detector": "\uF392",
"smoking": "\uF4B4",
"smoking-off": "\uF4B5",
"snapchat": "\uF4B6",
"snowflake": "\uF716",
"snowflake-alert": "\uFF46",
"snowflake-variant": "\uFF47",
"snowman": "\uF4B7",
"soccer": "\uF4B8",
"soccer-field": "\uF833",
"sofa": "\uF4B9",
"solar-panel": "\uFD77",
"solar-panel-large": "\uFD78",
"solar-power": "\uFA71",
"solid": "\uF68C",
"sort": "\uF4BA",
"sort-alphabetical": "\uF4BB",
"sort-ascending": "\uF4BC",
"sort-descending": "\uF4BD",
"sort-numeric": "\uF4BE",
"sort-variant": "\uF4BF",
"sort-variant-lock": "\uFCA9",
"sort-variant-lock-open": "\uFCAA",
"soundcloud": "\uF4C0",
"source-branch": "\uF62C",
"source-commit": "\uF717",
"source-commit-end": "\uF718",
"source-commit-end-local": "\uF719",
"source-commit-local": "\uF71A",
"source-commit-next-local": "\uF71B",
"source-commit-start": "\uF71C",
"source-commit-start-next-local": "\uF71D",
"source-fork": "\uF4C1",
"source-merge": "\uF62D",
"source-pull": "\uF4C2",
"source-repository": "\uFCAB",
"source-repository-multiple": "\uFCAC",
"soy-sauce": "\uF7ED",
"spa": "\uFCAD",
"spa-outline": "\uFCAE",
"space-invaders": "\uFBA5",
"spade": "\uFE48",
"speaker": "\uF4C3",
"speaker-bluetooth": "\uF9A1",
"speaker-multiple": "\uFD14",
"speaker-off": "\uF4C4",
"speaker-wireless": "\uF71E",
"speedometer": "\uF4C5",
"speedometer-medium": "\uFFA2",
"speedometer-slow": "\uFFA3",
"spellcheck": "\uF4C6",
"spider-web": "\uFBA6",
"spotify": "\uF4C7",
"spotlight": "\uF4C8",
"spotlight-beam": "\uF4C9",
"spray": "\uF665",
"spray-bottle": "\uFADF",
"sprinkler": "\U000F0081",
"sprinkler-variant": "\U000F0082",
"sprout": "\uFE49",
"sprout-outline": "\uFE4A",
"square": "\uF763",
"square-edit-outline": "\uF90B",
"square-inc": "\uF4CA",
"square-inc-cash": "\uF4CB",
"square-medium": "\uFA12",
"square-medium-outline": "\uFA13",
"square-outline": "\uF762",
"square-root": "\uF783",
"square-root-box": "\uF9A2",
"square-small": "\uFA14",
"squeegee": "\uFAE0",
"ssh": "\uF8BF",
"stack-exchange": "\uF60B",
"stack-overflow": "\uF4CC",
"stadium": "\U000F001A",
"stadium-variant": "\uF71F",
"stairs": "\uF4CD",
"stamper": "\uFD15",
"standard-definition": "\uF7EE",
"star": "\uF4CE",
"star-box": "\uFA72",
"star-box-outline": "\uFA73",
"star-circle": "\uF4CF",
"star-circle-outline": "\uF9A3",
"star-face": "\uF9A4",
"star-four-points": "\uFAE1",
"star-four-points-outline": "\uFAE2",
"star-half": "\uF4D0",
"star-off": "\uF4D1",
"star-outline": "\uF4D2",
"star-three-points": "\uFAE3",
"star-three-points-outline": "\uFAE4",
"steam": "\uF4D3",
"steam-box": "\uF90C",
"steering": "\uF4D4",
"steering-off": "\uF90D",
"step-backward": "\uF4D5",
"step-backward-2": "\uF4D6",
"step-forward": "\uF4D7",
"step-forward-2": "\uF4D8",
"stethoscope": "\uF4D9",
"sticker": "\uF5D0",
"sticker-emoji": "\uF784",
"stocking": "\uF4DA",
"stop": "\uF4DB",
"stop-circle": "\uF666",
"stop-circle-outline": "\uF667",
"store": "\uF4DC",
"store-24-hour": "\uF4DD",
"stove": "\uF4DE",
"strava": "\uFB25",
"stretch-to-page": "\uFF48",
"stretch-to-page-outline": "\uFF49",
"subdirectory-arrow-left": "\uF60C",
"subdirectory-arrow-right": "\uF60D",
"subtitles": "\uFA15",
"subtitles-outline": "\uFA16",
"subway": "\uF6AB",
"subway-alert-variant": "\uFD79",
"subway-variant": "\uF4DF",
"summit": "\uF785",
"sunglasses": "\uF4E0",
"surround-sound": "\uF5C5",
"surround-sound-2-0": "\uF7EF",
"surround-sound-3-1": "\uF7F0",
"surround-sound-5-1": "\uF7F1",
"surround-sound-7-1": "\uF7F2",
"svg": "\uF720",
"swap-horizontal": "\uF4E1",
"swap-horizontal-bold": "\uFBA9",
"swap-horizontal-circle": "\U000F0002",
"swap-horizontal-circle-outline": "\U000F0003",
"swap-horizontal-variant": "\uF8C0",
"swap-vertical": "\uF4E2",
"swap-vertical-bold": "\uFBAA",
"swap-vertical-circle": "\U000F0004",
"swap-vertical-circle-outline": "\U000F0005",
"swap-vertical-variant": "\uF8C1",
"swim": "\uF4E3",
"switch": "\uF4E4",
"sword": "\uF4E5",
"sword-cross": "\uF786",
"symfony": "\uFAE5",
"sync": "\uF4E6",
"sync-alert": "\uF4E7",
"sync-off": "\uF4E8",
"tab": "\uF4E9",
"tab-minus": "\uFB26",
"tab-plus": "\uF75B",
"tab-remove": "\uFB27",
"tab-unselected": "\uF4EA",
"table": "\uF4EB",
"table-border": "\uFA17",
"table-chair": "\U000F0083",
"table-column": "\uF834",
"table-column-plus-after": "\uF4EC",
"table-column-plus-before": "\uF4ED",
"table-column-remove": "\uF4EE",
"table-column-width": "\uF4EF",
"table-edit": "\uF4F0",
"table-large": "\uF4F1",
"table-large-plus": "\uFFA4",
"table-large-remove": "\uFFA5",
"table-merge-cells": "\uF9A5",
"table-of-contents": "\uF835",
"table-plus": "\uFA74",
"table-remove": "\uFA75",
"table-row": "\uF836",
"table-row-height": "\uF4F2",
"table-row-plus-after": "\uF4F3",
"table-row-plus-before": "\uF4F4",
"table-row-remove": "\uF4F5",
"table-search": "\uF90E",
"table-settings": "\uF837",
"table-tennis": "\uFE4B",
"tablet": "\uF4F6",
"tablet-android": "\uF4F7",
"tablet-cellphone": "\uF9A6",
"tablet-dashboard": "\uFEEB",
"tablet-ipad": "\uF4F8",
"taco": "\uF761",
"tag": "\uF4F9",
"tag-faces": "\uF4FA",
"tag-heart": "\uF68A",
"tag-heart-outline": "\uFBAB",
"tag-minus": "\uF90F",
"tag-multiple": "\uF4FB",
"tag-outline": "\uF4FC",
"tag-plus": "\uF721",
"tag-remove": "\uF722",
"tag-text-outline": "\uF4FD",
"tank": "\uFD16",
"tanker-truck": "\U000F0006",
"tape-measure": "\uFB28",
"target": "\uF4FE",
"target-account": "\uFBAC",
"target-variant": "\uFA76",
"taxi": "\uF4FF",
"tea": "\uFD7A",
"tea-outline": "\uFD7B",
"teach": "\uF88F",
"teamviewer": "\uF500",
"telegram": "\uF501",
"telescope": "\uFB29",
"television": "\uF502",
"television-box": "\uF838",
"television-classic": "\uF7F3",
"television-classic-off": "\uF839",
"television-guide": "\uF503",
"television-off": "\uF83A",
"television-pause": "\uFFA6",
"television-play": "\uFEEC",
"television-stop": "\uFFA7",
"temperature-celsius": "\uF504",
"temperature-fahrenheit": "\uF505",
"temperature-kelvin": "\uF506",
"tennis": "\uFD7C",
"tennis-ball": "\uF507",
"tent": "\uF508",
"terraform": "\U000F0084",
"terrain": "\uF509",
"test-tube": "\uF668",
"test-tube-empty": "\uF910",
"test-tube-off": "\uF911",
"text": "\uF9A7",
"text-shadow": "\uF669",
"text-short": "\uF9A8",
"text-subject": "\uF9A9",
"text-to-speech": "\uF50A",
"text-to-speech-off": "\uF50B",
"textbox": "\uF60E",
"textbox-password": "\uF7F4",
"texture": "\uF50C",
"texture-box": "\U000F0007",
"theater": "\uF50D",
"theme-light-dark": "\uF50E",
"thermometer": "\uF50F",
"thermometer-alert": "\uFE61",
"thermometer-chevron-down": "\uFE62",
"thermometer-chevron-up": "\uFE63",
"thermometer-lines": "\uF510",
"thermometer-minus": "\uFE64",
"thermometer-plus": "\uFE65",
"thermostat": "\uF393",
"thermostat-box": "\uF890",
"thought-bubble": "\uF7F5",
"thought-bubble-outline": "\uF7F6",
"thumb-down": "\uF511",
"thumb-down-outline": "\uF512",
"thumb-up": "\uF513",
"thumb-up-outline": "\uF514",
"thumbs-up-down": "\uF515",
"ticket": "\uF516",
"ticket-account": "\uF517",
"ticket-confirmation": "\uF518",
"ticket-outline": "\uF912",
"ticket-percent": "\uF723",
"tie": "\uF519",
"tilde": "\uF724",
"timelapse": "\uF51A",
"timeline": "\uFBAD",
"timeline-alert": "\uFFB2",
"timeline-alert-outline": "\uFFB5",
"timeline-help": "\uFFB6",
"timeline-help-outline": "\uFFB7",
"timeline-outline": "\uFBAE",
"timeline-plus": "\uFFB3",
"timeline-plus-outline": "\uFFB4",
"timeline-text": "\uFBAF",
"timeline-text-outline": "\uFBB0",
"timer": "\uF51B",
"timer-10": "\uF51C",
"timer-3": "\uF51D",
"timer-off": "\uF51E",
"timer-sand": "\uF51F",
"timer-sand-empty": "\uF6AC",
"timer-sand-full": "\uF78B",
"timetable": "\uF520",
"toaster": "\U000F0085",
"toaster-oven": "\uFCAF",
"toggle-switch": "\uF521",
"toggle-switch-off": "\uF522",
"toggle-switch-off-outline": "\uFA18",
"toggle-switch-outline": "\uFA19",
"toilet": "\uF9AA",
"toolbox": "\uF9AB",
"toolbox-outline": "\uF9AC",
"tools": "\U000F0086",
"tooltip": "\uF523",
"tooltip-account": "\uF00C",
"tooltip-edit": "\uF524",
"tooltip-image": "\uF525",
"tooltip-image-outline": "\uFBB1",
"tooltip-outline": "\uF526",
"tooltip-plus": "\uFBB2",
"tooltip-plus-outline": "\uF527",
"tooltip-text": "\uF528",
"tooltip-text-outline": "\uFBB3",
"tooth": "\uF8C2",
"tooth-outline": "\uF529",
"tor": "\uF52A",
"tortoise": "\uFD17",
"tournament": "\uF9AD",
"tower-beach": "\uF680",
"tower-fire": "\uF681",
"towing": "\uF83B",
"track-light": "\uF913",
"trackpad": "\uF7F7",
"trackpad-lock": "\uF932",
"tractor": "\uF891",
"trademark": "\uFA77",
"traffic-light": "\uF52B",
"train": "\uF52C",
"train-car": "\uFBB4",
"train-variant": "\uF8C3",
"tram": "\uF52D",
"tram-side": "\U000F0008",
"transcribe": "\uF52E",
"transcribe-close": "\uF52F",
"transfer": "\U000F0087",
"transfer-down": "\uFD7D",
"transfer-left": "\uFD7E",
"transfer-right": "\uF530",
"transfer-up": "\uFD7F",
"transit-connection": "\uFD18",
"transit-connection-variant": "\uFD19",
"transit-detour": "\uFFA8",
"transit-transfer": "\uF6AD",
"transition": "\uF914",
"transition-masked": "\uF915",
"translate": "\uF5CA",
"translate-off": "\uFE66",
"transmission-tower": "\uFD1A",
"trash-can": "\uFA78",
"trash-can-outline": "\uFA79",
"treasure-chest": "\uF725",
"tree": "\uF531",
"tree-outline": "\uFE4C",
"trello": "\uF532",
"trending-down": "\uF533",
"trending-neutral": "\uF534",
"trending-up": "\uF535",
"triangle": "\uF536",
"triangle-outline": "\uF537",
"triforce": "\uFBB5",
"trophy": "\uF538",
"trophy-award": "\uF539",
"trophy-broken": "\uFD80",
"trophy-outline": "\uF53A",
"trophy-variant": "\uF53B",
"trophy-variant-outline": "\uF53C",
"truck": "\uF53D",
"truck-check": "\uFCB0",
"truck-delivery": "\uF53E",
"truck-fast": "\uF787",
"truck-trailer": "\uF726",
"tshirt-crew": "\uFA7A",
"tshirt-crew-outline": "\uF53F",
"tshirt-v": "\uFA7B",
"tshirt-v-outline": "\uF540",
"tumble-dryer": "\uF916",
"tumblr": "\uF541",
"tumblr-box": "\uF917",
"tumblr-reblog": "\uF542",
"tune": "\uF62E",
"tune-vertical": "\uF66A",
"turnstile": "\uFCB1",
"turnstile-outline": "\uFCB2",
"turtle": "\uFCB3",
"twitch": "\uF543",
"twitter": "\uF544",
"twitter-box": "\uF545",
"twitter-circle": "\uF546",
"twitter-retweet": "\uF547",
"two-factor-authentication": "\uF9AE",
"typewriter": "\uFF4A",
"uber": "\uF748",
"ubisoft": "\uFBB6",
"ubuntu": "\uF548",
"ultra-high-definition": "\uF7F8",
"umbraco": "\uF549",
"umbrella": "\uF54A",
"umbrella-closed": "\uF9AF",
"umbrella-outline": "\uF54B",
"undo": "\uF54C",
"undo-variant": "\uF54D",
"unfold-less-horizontal": "\uF54E",
"unfold-less-vertical": "\uF75F",
"unfold-more-horizontal": "\uF54F",
"unfold-more-vertical": "\uF760",
"ungroup": "\uF550",
"unicode": "\uFEED",
"unity": "\uF6AE",
"unreal": "\uF9B0",
"untappd": "\uF551",
"update": "\uF6AF",
"upload": "\uF552",
"upload-multiple": "\uF83C",
"upload-network": "\uF6F5",
"upload-network-outline": "\uFCB4",
"upload-outline": "\uFE67",
"usb": "\uF553",
"valve": "\U000F0088",
"valve-closed": "\U000F0089",
"valve-open": "\U000F008A",
"van-passenger": "\uF7F9",
"van-utility": "\uF7FA",
"vanish": "\uF7FB",
"variable": "\uFAE6",
"vector-arrange-above": "\uF554",
"vector-arrange-below": "\uF555",
"vector-bezier": "\uFAE7",
"vector-circle": "\uF556",
"vector-circle-variant": "\uF557",
"vector-combine": "\uF558",
"vector-curve": "\uF559",
"vector-difference": "\uF55A",
"vector-difference-ab": "\uF55B",
"vector-difference-ba": "\uF55C",
"vector-ellipse": "\uF892",
"vector-intersection": "\uF55D",
"vector-line": "\uF55E",
"vector-link": "\U000F0009",
"vector-point": "\uF55F",
"vector-polygon": "\uF560",
"vector-polyline": "\uF561",
"vector-radius": "\uF749",
"vector-rectangle": "\uF5C6",
"vector-selection": "\uF562",
"vector-square": "\uF001",
"vector-triangle": "\uF563",
"vector-union": "\uF564",
"venmo": "\uF578",
"vhs": "\uFA1A",
"vibrate": "\uF566",
"vibrate-off": "\uFCB5",
"video": "\uF567",
"video-3d": "\uF7FC",
"video-3d-variant": "\uFEEE",
"video-4k-box": "\uF83D",
"video-account": "\uF918",
"video-check": "\U000F008B",
"video-check-outline": "\U000F008C",
"video-image": "\uF919",
"video-input-antenna": "\uF83E",
"video-input-component": "\uF83F",
"video-input-hdmi": "\uF840",
"video-input-scart": "\uFFA9",
"video-input-svideo": "\uF841",
"video-minus": "\uF9B1",
"video-off": "\uF568",
"video-off-outline": "\uFBB7",
"video-outline": "\uFBB8",
"video-plus": "\uF9B2",
"video-stabilization": "\uF91A",
"video-switch": "\uF569",
"video-vintage": "\uFA1B",
"video-wireless": "\uFEEF",
"video-wireless-outline": "\uFEF0",
"view-agenda": "\uF56A",
"view-array": "\uF56B",
"view-carousel": "\uF56C",
"view-column": "\uF56D",
"view-comfy": "\uFE4D",
"view-compact": "\uFE4E",
"view-compact-outline": "\uFE4F",
"view-dashboard": "\uF56E",
"view-dashboard-outline": "\uFA1C",
"view-dashboard-variant": "\uF842",
"view-day": "\uF56F",
"view-grid": "\uF570",
"view-grid-plus": "\uFFAA",
"view-headline": "\uF571",
"view-list": "\uF572",
"view-module": "\uF573",
"view-parallel": "\uF727",
"view-quilt": "\uF574",
"view-sequential": "\uF728",
"view-split-horizontal": "\uFBA7",
"view-split-vertical": "\uFBA8",
"view-stream": "\uF575",
"view-week": "\uF576",
"vimeo": "\uF577",
"violin": "\uF60F",
"virtual-reality": "\uF893",
"visual-studio": "\uF610",
"visual-studio-code": "\uFA1D",
"vk": "\uF579",
"vk-box": "\uF57A",
"vk-circle": "\uF57B",
"vlc": "\uF57C",
"voice": "\uF5CB",
"voice-off": "\uFEF1",
"voicemail": "\uF57D",
"volleyball": "\uF9B3",
"volume-high": "\uF57E",
"volume-low": "\uF57F",
"volume-medium": "\uF580",
"volume-minus": "\uF75D",
"volume-mute": "\uF75E",
"volume-off": "\uF581",
"volume-plus": "\uF75C",
"volume-variant-off": "\uFE68",
"vote": "\uFA1E",
"vote-outline": "\uFA1F",
"vpn": "\uF582",
"vuejs": "\uF843",
"vuetify": "\uFE50",
"walk": "\uF583",
"wall": "\uF7FD",
"wall-sconce": "\uF91B",
"wall-sconce-flat": "\uF91C",
"wall-sconce-variant": "\uF91D",
"wallet": "\uF584",
"wallet-giftcard": "\uF585",
"wallet-membership": "\uF586",
"wallet-outline": "\uFBB9",
"wallet-plus": "\uFFAB",
"wallet-plus-outline": "\uFFAC",
"wallet-travel": "\uF587",
"wallpaper": "\uFE69",
"wan": "\uF588",
"wardrobe": "\uFFAD",
"wardrobe-outline": "\uFFAE",
"warehouse": "\uFFBB",
"washing-machine": "\uF729",
"watch": "\uF589",
"watch-export": "\uF58A",
"watch-export-variant": "\uF894",
"watch-import": "\uF58B",
"watch-import-variant": "\uF895",
"watch-variant": "\uF896",
"watch-vibrate": "\uF6B0",
"watch-vibrate-off": "\uFCB6",
"water": "\uF58C",
"water-boiler": "\uFFAF",
"water-off": "\uF58D",
"water-outline": "\uFE6A",
"water-percent": "\uF58E",
"water-pump": "\uF58F",
"water-pump-off": "\uFFB0",
"water-well": "\U000F008D",
"water-well-outline": "\U000F008E",
"watermark": "\uF612",
"wave": "\uFF4B",
"waves": "\uF78C",
"waze": "\uFBBA",
"weather-cloudy": "\uF590",
"weather-cloudy-alert": "\uFF4C",
"weather-cloudy-arrow-right": "\uFE51",
"weather-fog": "\uF591",
"weather-hail": "\uF592",
"weather-hazy": "\uFF4D",
"weather-hurricane": "\uF897",
"weather-lightning": "\uF593",
"weather-lightning-rainy": "\uF67D",
"weather-night": "\uF594",
"weather-night-partly-cloudy": "\uFF4E",
"weather-partly-cloudy": "\uF595",
"weather-partly-lightning": "\uFF4F",
"weather-partly-rainy": "\uFF50",
"weather-partly-snowy": "\uFF51",
"weather-partly-snowy-rainy": "\uFF52",
"weather-pouring": "\uF596",
"weather-rainy": "\uF597",
"weather-snowy": "\uF598",
"weather-snowy-heavy": "\uFF53",
"weather-snowy-rainy": "\uF67E",
"weather-sunny": "\uF599",
"weather-sunny-alert": "\uFF54",
"weather-sunset": "\uF59A",
"weather-sunset-down": "\uF59B",
"weather-sunset-up": "\uF59C",
"weather-tornado": "\uFF55",
"weather-windy": "\uF59D",
"weather-windy-variant": "\uF59E",
"web": "\uF59F",
"web-box": "\uFFB1",
"webcam": "\uF5A0",
"webhook": "\uF62F",
"webpack": "\uF72A",
"wechat": "\uF611",
"weight": "\uF5A1",
"weight-gram": "\uFD1B",
"weight-kilogram": "\uF5A2",
"weight-pound": "\uF9B4",
"whatsapp": "\uF5A3",
"wheelchair-accessibility": "\uF5A4",
"whistle": "\uF9B5",
"white-balance-auto": "\uF5A5",
"white-balance-incandescent": "\uF5A6",
"white-balance-iridescent": "\uF5A7",
"white-balance-sunny": "\uF5A8",
"widgets": "\uF72B",
"wifi": "\uF5A9",
"wifi-off": "\uF5AA",
"wifi-star": "\uFE6B",
"wifi-strength-1": "\uF91E",
"wifi-strength-1-alert": "\uF91F",
"wifi-strength-1-lock": "\uF920",
"wifi-strength-2": "\uF921",
"wifi-strength-2-alert": "\uF922",
"wifi-strength-2-lock": "\uF923",
"wifi-strength-3": "\uF924",
"wifi-strength-3-alert": "\uF925",
"wifi-strength-3-lock": "\uF926",
"wifi-strength-4": "\uF927",
"wifi-strength-4-alert": "\uF928",
"wifi-strength-4-lock": "\uF929",
"wifi-strength-alert-outline": "\uF92A",
"wifi-strength-lock-outline": "\uF92B",
"wifi-strength-off": "\uF92C",
"wifi-strength-off-outline": "\uF92D",
"wifi-strength-outline": "\uF92E",
"wii": "\uF5AB",
"wiiu": "\uF72C",
"wikipedia": "\uF5AC",
"wind-turbine": "\uFD81",
"window-close": "\uF5AD",
"window-closed": "\uF5AE",
"window-maximize": "\uF5AF",
"window-minimize": "\uF5B0",
"window-open": "\uF5B1",
"window-restore": "\uF5B2",
"windows": "\uF5B3",
"windows-classic": "\uFA20",
"wiper": "\uFAE8",
"wiper-wash": "\uFD82",
"wordpress": "\uF5B4",
"worker": "\uF5B5",
"wrap": "\uF5B6",
"wrap-disabled": "\uFBBB",
"wrench": "\uF5B7",
"wrench-outline": "\uFBBC",
"wunderlist": "\uF5B8",
"xamarin": "\uF844",
"xamarin-outline": "\uF845",
"xaml": "\uF673",
"xbox": "\uF5B9",
"xbox-controller": "\uF5BA",
"xbox-controller-battery-alert": "\uF74A",
"xbox-controller-battery-charging": "\uFA21",
"xbox-controller-battery-empty": "\uF74B",
"xbox-controller-battery-full": "\uF74C",
"xbox-controller-battery-low": "\uF74D",
"xbox-controller-battery-medium": "\uF74E",
"xbox-controller-battery-unknown": "\uF74F",
"xbox-controller-menu": "\uFE52",
"xbox-controller-off": "\uF5BB",
"xbox-controller-view": "\uFE53",
"xda": "\uF5BC",
"xing": "\uF5BD",
"xing-box": "\uF5BE",
"xing-circle": "\uF5BF",
"xml": "\uF5C0",
"xmpp": "\uF7FE",
"yahoo": "\uFB2A",
"yammer": "\uF788",
"yeast": "\uF5C1",
"yelp": "\uF5C2",
"yin-yang": "\uF67F",
"youtube": "\uF5C3",
"youtube-creator-studio": "\uF846",
"youtube-gaming": "\uF847",
"youtube-subscription": "\uFD1C",
"youtube-tv": "\uF448",
"z-wave": "\uFAE9",
"zend": "\uFAEA",
"zigbee": "\uFD1D",
"zip-box": "\uF5C4",
"zip-box-outline": "\U000F001B",
"zip-disk": "\uFA22",
"zodiac-aquarius": "\uFA7C",
"zodiac-aries": "\uFA7D",
"zodiac-cancer": "\uFA7E",
"zodiac-capricorn": "\uFA7F",
"zodiac-gemini": "\uFA80",
"zodiac-leo": "\uFA81",
"zodiac-libra": "\uFA82",
"zodiac-pisces": "\uFA83",
"zodiac-sagittarius": "\uFA84",
"zodiac-scorpio": "\uFA85",
"zodiac-taurus": "\uFA86",
"zodiac-virgo": "\uFA87",
}
| [
"73114649+barry-2710@users.noreply.github.com"
] | 73114649+barry-2710@users.noreply.github.com |
a15ba967a5a20d5c51a3fe4bd4b1b7ee046e37de | 350db570521d3fc43f07df645addb9d6e648c17e | /0779_K-th_Symbol_in_Grammar/solution_test.py | 954e62fd402ddafba1bf568523536c189f2aedf3 | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py |
'''
779. K-th Symbol in Grammar
Level: Medium
https://leetcode.com/problems/k-th-symbol-in-grammar
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main() | [
"benjaminhuanghuang@gmail.com"
] | benjaminhuanghuang@gmail.com |
729f9a3979e886e96caff217aede3262a8025832 | 4e3bd458f62b4184c3a09cd23cbba4c82d8c40e8 | /Leetcode_Easy/169_Majority_Element.py | a46268a29f53f5a0a3e4e6795fae0c67883b5e64 | [] | no_license | gaoyanwang/Algorithms_and_Data_Structures | 07cc649c1e4b2604b7c845dda3f89696a818ede3 | 19a0171af20735e3233fc82b9a3e0ca4d7b9ae87 | refs/heads/master | 2020-04-15T12:48:03.144043 | 2016-11-17T06:12:38 | 2016-11-17T06:12:38 | 63,971,701 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
candidate, count = None, 0
for e in nums:
if count == 0:
candidate, count = e, count + 1
elif e == candidate:
count = count + 1
else:
count = count - 1
return candidate
# Time:O(n)
# Space:O(1)
| [
"gaoyanwang68@gmail.com"
] | gaoyanwang68@gmail.com |
4f7dbf9ae47f08b5c51baea7cc318e1300cd2c29 | 4d319be5778463ffe520811abee00a969e978e2b | /scrap/test.py | 49e2d8b06a9ebb9ee321277c7f308f8184834e56 | [] | no_license | dzhou121/scrap | 22edc777c745c83b6c5d66b36eeea86814b13fb5 | b7460c20a29c9de8809187bcbcce0494bb082cbf | refs/heads/master | 2020-04-09T19:17:41.942641 | 2013-05-22T10:45:58 | 2013-05-22T10:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | import unittest
import stubout
class TestCase(unittest.TestCase):
""" Base test case for all unit tests """
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
| [
"dzhou121@gmail.com"
] | dzhou121@gmail.com |
ae5dfacd6fbc6b933807feff6753196811247728 | 2a8f90da3aecd8e0b420e5ca2c24c5987285b5ec | /model/pipeline.py | fc94d9e10bfa31bcbc158f04491d1fb045cfdd9d | [] | no_license | wangyuchen2020/AuxST | e5baedaf65c275bc2db8d83a9487080303c930f8 | cf4057005ff09ffb51dd61d66668cbd587c2fe26 | refs/heads/main | 2023-09-02T15:19:40.623235 | 2021-11-09T09:46:04 | 2021-11-09T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,783 | py | import torchvision
import torch
import utils
from model import net
def setup(train_patients, test_patients, args, device, cv = False):
### Get mean and std
train_dataset = utils.dataloader.Spatial(train_patients,
count_root='training/counts/',
img_root='training/images/',
window=args.window, gene_filter=args.gene_filter,
transform=torchvision.transforms.ToTensor()) # range [0, 255] -> [0.0,1.0]
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch,
num_workers=args.workers, shuffle=True)
mean, std, count_mean, count_std = \
utils.normalize.get_mean_and_std(train_loader, args)
### Train transform
train_transform = torchvision.transforms.Compose([
# torchvision.transforms.Resize((224, 224)), # can resize if the input is not 224
torchvision.transforms.RandomHorizontalFlip(), # default p = 0.5
torchvision.transforms.RandomVerticalFlip(),
torchvision.transforms.RandomApply([torchvision.transforms.RandomRotation((90, 90))]),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=mean, std=std)])
### Train data loader
train_dataset = utils.dataloader.Spatial(train_patients,
count_root='training/counts/',
img_root='training/images/',
window=args.window, gene_filter=args.gene_filter,
transform=train_transform,normalization = [count_mean, count_std]) # range [0, 255] -> [0.0,1.0]
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch,
num_workers=args.workers, shuffle=True)
### Val / test transform
val_transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=mean, std=std)])
### Test data loader
if cv:
test_dataset = utils.dataloader.Spatial(test_patients,
count_root='training/counts/',
img_root='training/images/',
window=args.window, gene_filter=args.gene_filter,
transform = val_transform, normalization = [count_mean, count_std])
else:
test_dataset = utils.dataloader.Spatial(test_patients,
count_root='test/counts/',
img_root='test/images/',
window=args.window, gene_filter=args.gene_filter,
transform = val_transform, normalization = [count_mean, count_std])
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch,
num_workers=args.workers, shuffle=True)
### Model setup
# model = ViT('B_16', pretrained=True)
# model.fc = torch.nn.Linear(in_features=model.fc.in_features, out_features= outputs, bias=True)
architecture = net.set_models(args.model) # raw model
# set pretrained or fine-tuning, default is to initialize all the layers, only train fc? or based on all layers?
if args.transfer == 'randinit':
elif args.transfer == 'randinit':
else:
model = net.set_out_features(architecture, args.gene_filter)
# for param in model.parameters():
# param.requires_grad = False
# for param in model.classifier[-1].parameters():
# param.requires_grad = True
# model.classifier[-1].weight.data.zero_()
# model.classifier[-1].bias.data = torch.tensor(count_mean).clone()
model = torch.nn.DataParallel(model)
model.to(device)
criterion = torch.nn.MSELoss()
# optim = torch.optim.__dict__['Adam'](model.parameters(), lr=3e-4, weight_decay = 1e-6) # here need to be revised
optim = torch.optim.__dict__['SGD'](model.parameters(), lr=1e-3,
# momentum=0.9) # here need to be revised
momentum=0.9, weight_decay = 1e-6) # here need to be revised
# optim = torch.optim.AdamW(model.parameters(), lr=3e-4, weight_decay=1e-6)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=5, gamma=0.1)
# lr_scheduler = utils.util.LRScheduler(optim)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=5)
# # early_stopping = utils.util.EarlyStopping()
return model, train_loader, test_loader, optim, lr_scheduler, criterion
| [
"xingjchen3-c@my.cityu.edu.hk"
] | xingjchen3-c@my.cityu.edu.hk |
4923c8a25b7d427a7ea167fd7ee6c73d727d1ec9 | 328bc5a846fb951e1127f13706ad09e75b93b023 | /0x00-python_variable_annotations/6-sum_mixed_list.py | 83170218eebd6c944c0c665044909fc61438bccc | [] | no_license | MatriMariem/holbertonschool-web_back_end | 37f39cc286d949e347baafee0697c8ad042dbb05 | 2ab609541ff8b45cdc923c24d629f160ddc6f3cf | refs/heads/master | 2023-02-28T23:06:47.490221 | 2021-01-28T13:08:43 | 2021-01-28T13:08:43 | 305,419,798 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #!/usr/bin/env python3
""" a type-annotated function sum_mixed_list """
from typing import List, Union
def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:
"""
a type-annotated function sum_mixed_list that
takes a list mxd_lst of floats and integers as argument
and returns their sum as a float.
"""
return sum(mxd_lst)
| [
"meriemmatri1994@gmail.com"
] | meriemmatri1994@gmail.com |
b8a86f66e33ce501000bbf95b6ae197aee5b8fc3 | 24449fa092082f629c12d2420b5bfb7ea5a59979 | /test/test_requests_factory.py | f2a81d3f569bef655c5125c1c285b0ba000040cc | [
"Apache-2.0"
] | permissive | hsdp/requests-factory | 7925798c6832752b58d72a7ec2b744e4bf89c494 | c622be218e1a54e8619de8f9a029a46c51d3f2de | refs/heads/master | 2021-05-11T08:06:47.087250 | 2018-06-14T20:11:51 | 2018-06-14T20:11:51 | 118,041,211 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,978 | py | import six
import json
import requests
import responses
import requests_factory
import websocket
from unittest import TestCase
from websocket import WebSocketConnectionClosedException
from requests_factory import MIME_JSON, MIME_FORM
from requests_factory import (
RequestMixin, Request, Response, RequestFactory, WebSocket)
from requests_factory import ResponseException
if six.PY3:
from unittest import mock
else:
import mock
test_data = None
class CustomRequest(Request):
pass
class CustomResponse(Response):
pass
def my_callback(req, req_args, *args, **kwargs):
pass
expected_callback = (my_callback, ('abc',), {'arg2': 123})
class TestRequestMixin(TestCase):
def setUp(self):
self.req = RequestMixin()
def test_set_content_type_form_urlencoded(self):
req = self.req.form_urlencoded()
self.assertIsInstance(req, RequestMixin)
self.assertIn('content-type', req.headers)
self.assertEqual(req.headers['content-type'], MIME_FORM)
def test_set_content_type_application_json(self):
req = self.req.application_json()
self.assertIsInstance(req, RequestMixin)
self.assertIn('content-type', req.headers)
self.assertEqual(req.headers['content-type'], MIME_JSON)
def test_set_accept_json(self):
req = self.req.accept_json()
self.assertIsInstance(req, RequestMixin)
self.assertIn('accept', req.headers)
self.assertEqual(req.headers['accept'], MIME_JSON)
def test_set_custom_requests_args(self):
req = self.req.set_custom_requests_args(verify=False)
self.assertIsInstance(req, RequestMixin)
self.assertIn('verify', req.custom_requests_args)
self.assertEqual(req.custom_requests_args['verify'], False)
def test_set_verify_ssl(self):
req = self.req.set_verify_ssl(False)
self.assertIsInstance(req, RequestMixin)
self.assertEqual(req.verify_ssl, False)
def test_set_basic_auth(self):
req = self.req.set_basic_auth('abc', '123')
self.assertIsInstance(req, RequestMixin)
self.assertIn('authorization', req.headers)
self.assertEqual('Basic YWJjOjEyMw==',
self.req.headers['authorization'])
def test_set_bearer_auth(self):
req = self.req.set_bearer_auth('value')
self.assertIsInstance(req, RequestMixin)
self.assertIn('authorization', req.headers)
self.assertEqual('bearer value', self.req.headers['authorization'])
def test_set_auth(self):
req = self.req.set_auth('bearer value')
self.assertIsInstance(req, RequestMixin)
self.assertIn('authorization', req.headers)
self.assertEqual('bearer value', self.req.headers['authorization'])
def test_set_header(self):
req = self.req.set_header('X-ABC', '123')
self.assertIsInstance(req, RequestMixin)
self.assertIn('x-abc', req.headers)
self.assertEqual('123', req.headers['x-abc'])
def test_set_base_url(self):
req = self.req.set_base_url('http://localhost/')
self.assertIsInstance(req, RequestMixin)
self.assertEqual('http://localhost', req.base_url)
def test_set_response_class(self):
req = self.req.set_response_class(CustomResponse)
self.assertIsInstance(req, RequestMixin)
self.assertEqual(req.response_class, CustomResponse)
def test_set_callback(self):
req = self.req.set_callback(my_callback, 'abc', arg2=123)
self.assertIsInstance(req, RequestMixin)
self.assertTupleEqual(expected_callback, req.callback)
def test_get_url(self):
req = self.req.set_base_url('http://localhost/')
url = req.get_url('')
self.assertEqual('http://localhost/', url)
url = req.get_url('abc')
self.assertEqual('http://localhost/abc', url)
def test_add_url(self):
req = self.req.set_base_url('http://localhost/')
req = req.add_url('/abc/')
self.assertIsInstance(req, RequestMixin)
url = req.get_url('/def')
self.assertEqual('http://localhost/abc/def', url)
url = req.get_url('/123')
self.assertEqual('http://localhost/abc/123', url)
class TestRequestFactory(TestCase):
def setUp(self):
self.fact = RequestFactory().set_base_url('http://localhost/')
def test_request(self):
req = self.fact.request('foo', 'bar')
self.assertIsInstance(req, Request)
self.assertEqual('http://localhost/foo/bar/abc/def',
req.get_url('abc/def'))
self.assertEqual('http://localhost/foo/bar/abc/123',
req.get_url('abc/123'))
def test_set_request_class(self):
fact = self.fact.set_request_class(CustomRequest)
self.assertIsInstance(fact, RequestFactory)
self.assertEqual(fact.request_class, CustomRequest)
req = fact.request('abc')
self.assertIsInstance(req, CustomRequest)
class TestRequest(TestCase):
@classmethod
def setUpClass(cls):
cls.fact = RequestFactory()\
.set_base_url('http://localhost/')\
.set_verify_ssl(False)\
.set_response_class(CustomResponse)\
.set_request_class(CustomRequest)\
.set_basic_auth('abc', '123')\
.set_custom_requests_args(allow_redirects=False)\
.set_callback(my_callback, 'abc', arg2=123)
def setUp(self):
self.req = self.fact.request('abc')
def test_init(self):
self.assertIsInstance(self.req, CustomRequest)
self.assertTupleEqual(expected_callback, self.req.callback)
self.assertEqual('http://localhost/abc', self.req.base_url)
self.assertEqual(self.req.verify_ssl, False)
self.assertEqual(self.req.headers['authorization'],
'Basic YWJjOjEyMw==')
self.assertEqual(self.req.response_class, self.fact.response_class)
self.assertDictEqual(self.req.custom_requests_args,
{'allow_redirects': False})
def test_set_method(self):
req = self.req.set_method('GET')
self.assertIsInstance(req, Request)
self.assertEqual(req.method, 'GET')
def test_set_params(self):
data = {'abc': 123}
req = self.req.set_params(data)
self.assertIsInstance(req, Request)
self.assertDictEqual(data, self.req.params)
self.req.set_params(**data)
self.assertDictEqual(data, self.req.params)
def test_set_query(self):
qtup = [('q', '1'), ('q', '2')]
qdic = {'q1': '1', 'q2': '2'}
expected = [('q', '1'), ('q', '2'), ('q1', '1'), ('q2', '2')]
req = self.req.set_query(*qtup, **qdic)
self.assertIsInstance(req, Request)
self.assertEqual(req.query, expected)
def test_param(self):
data = {'abc': 123}
req = self.req.set_params(data)
self.assertEqual(req.params['abc'], 123)
req.param('abc', 'def')
self.assertEqual(req.params['abc'], 'def')
def test_add_field(self):
self.req.add_field('abc', '123')
self.assertTupleEqual(self.req.multipart_files['abc'], (None, '123'))
def test_add_file(self):
fh = object()
self.req.add_file('abc', 'abc.txt', fh, 'text/plain')
self.assertTupleEqual(
self.req.multipart_files['abc'], ('abc.txt', fh, 'text/plain'))
def test_get_requests_args(self):
func, url, kwargs = self.req.get_requests_args()
self.assertEqual(self.req.session.get, func)
self.assertEqual(url, 'http://localhost/abc')
self.assertDictEqual(kwargs, {
'allow_redirects': False,
'headers': {'authorization': 'Basic YWJjOjEyMw=='},
'verify': False,
'data': {}})
@responses.activate
def test_send(self):
responses.add(responses.GET, 'http://localhost/abc', status=200)
res = self.req.send()
self.assertIsInstance(res, CustomResponse)
class TestResponse(TestCase):
@classmethod
def setUpClass(cls):
cls.fact = RequestFactory()\
.set_base_url('http://localhost/')\
.set_verify_ssl(False)\
.set_response_class(CustomResponse)\
.set_request_class(CustomRequest)\
.set_basic_auth('abc', '123')\
.set_custom_requests_args(allow_redirects=False)\
.set_callback(my_callback, 'abc', arg2=123)
@responses.activate
def get_response(self, status_code, **kwargs):
responses.add(responses.GET, 'http://localhost/abc',
status=status_code, **kwargs)
self.req = self.fact.request('abc')
return self.req.send()
def test_raise_for_status(self):
res = self.get_response(400)
with self.assertRaises(ResponseException):
res.raise_for_status()
def test_has_error(self):
res = self.get_response(400)
self.assertTrue(res.has_error)
def test_is_not_found(self):
res = self.get_response(404)
self.assertTrue(res.is_not_found)
def test_is_json(self):
res = self.get_response(200, headers={'content-type': MIME_JSON})
self.assertTrue(res.is_json)
def test_headers(self):
ex = {'x-abc': '123'}
res = self.get_response(200, headers=ex).headers
for n, v in ex.items():
self.assertIn(n, res)
self.assertEqual(res[n], v)
def test_data(self):
ex = json.dumps({'foo': 'bar'})
res = self.get_response(
200, headers={'content-type': MIME_JSON}, body=ex)
self.assertDictEqual(res.data, json.loads(ex))
res = self.get_response(
400, headers={'content-type': MIME_JSON}, body='')
with self.assertRaises(ResponseException):
res.data
def test_response(self):
res = self.get_response(200)
self.assertIsInstance(res.response, requests.Response)
def test_text(self):
res = self.get_response(200, body='foo')
self.assertEqual(res.text.decode('utf-8'), 'foo')
def test_raw_data(self):
ex = json.dumps({'foo': 'bar'})
res = self.get_response(
400, headers={'content-type': MIME_JSON}, body=ex)
self.assertDictEqual(res.raw_data, json.loads(ex))
class TestWebSocket(TestCase):
def test_init(self):
WebSocket('ws://localhost/stream')
def test_log(self):
ws = WebSocket('ws://localhost/stream')
ws.log.info('abc123')
self.assertEqual(ws.log.name, requests_factory.__name__)
def test_custom_log(self):
ws = WebSocket('ws://localhost/stream', logger='custom')
ws.log.info('abc123')
self.assertEqual(ws.log.name, 'custom')
def test_connect(self):
with mock.patch.object(requests_factory.websocket.WebSocket, 'connect',
return_value=None) as connect:
ws = WebSocket('ws://localhost/stream', verify_ssl=True)
ws.connect()
connect.assert_called_once_with(ws.url, header=ws.headers)
def test_watch(self):
with mock.patch.object(
websocket.WebSocket,
'connect',
return_value=None
) as connect, mock.patch.object(
websocket.WebSocket,
'recv',
return_value='abc123'
) as recv:
ws = WebSocket('ws://localhost/stream', verify_ssl=True)
ws.connect()
connect.assert_called_once_with(ws.url, header=ws.headers)
def onmessage(m):
global test_data
test_data = m
raise WebSocketConnectionClosedException()
ws.watch(onmessage)
recv.assert_called_once()
self.assertEqual(test_data, 'abc123')
| [
"ajaso@hsdp.io"
] | ajaso@hsdp.io |
26c69de60b556ed13a97ad0932632e28c17e451d | 2ae838b8a40f5b2866cee4305ab94291c3942e95 | /app/routes.py | 41880e2d47fdf5f536fa5c34154dc8f3ec736aac | [] | no_license | pedromiguelmvs/bookshelf | 1f0b529543cf555378722423d43403b865e662c4 | 2a1c539af97eceb75ea1271c3d6b84984d6909eb | refs/heads/main | 2023-03-14T12:51:22.112898 | 2021-03-07T19:15:58 | 2021-03-07T19:15:58 | 345,430,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from flask import render_template, request, redirect
from app import app, db
from app.models import Entry
@app.route('/')
@app.route('/index')
def index():
entries = Entry.query.all()
return render_template('index.html', entries=entries)
@app.route('/add', methods=['POST'])
def add():
if request.method == 'POST':
form = request.form
title = form.get('title')
description = form.get('description')
if not title or description:
entry = Entry(title = title, description = description)
db.session.add(entry)
db.session.commit()
return redirect('/')
return ""
@app.route('/update/<int:id>')
def updateRoute(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
return render_template('update.html', entry=entry)
return ""
@app.route('/update', methods=['POST'])
def update():
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
db.session.delete(entry)
db.session.commit()
return redirect('/')
return ""
@app.route('/delete/<int:id>')
def delete(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
db.session.delete(entry)
db.session.commit()
return redirect('/')
return ""
@app.route('/turn/<int:id>')
def turn(id):
if not id or id != 0:
entry = Entry.query.get(id)
if entry:
entry.status = not entry.status
db.session.commit()
return redirect('/')
return "" | [
"pedromiguelmvs@gmail.com"
] | pedromiguelmvs@gmail.com |
64899c99b053884b463b2aca741431e307f7b480 | 1919fc2555dbcb6b865fdef0cc44c56c6c47e2f0 | /chapter_7/demo_7_2.py | 8b2b373f88eeeae54cc30d598f1dd6d869f9e330 | [] | no_license | ender8848/the_fluent_python | 10f8dd98fcf206b04ea6d34f47ad5e35f896a3ac | 058d59f6a11da34e23deb228e24a160d907f7643 | refs/heads/master | 2022-12-19T07:51:51.908127 | 2019-01-17T09:53:33 | 2019-01-17T09:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | '''
装饰器的一个关键特性是,它们在被装饰的函数定义之后立即运行。
这通常是在导入时(即 Python 加载模块时) ,
如示例 7-2 中的 demo_7_2.py 模块所示。
'''
# registry 保存被 @register 装饰的函数引用
registry = []
# register 的参数是一个函数
def register(func):
print('running register(%s)' % func)
registry.append(func)
return func
@register
def f1():
print('running f1()')
@register
def f2():
print('running f2()')
def f3():
print('running f3()')
def main():
print('running main()')
print('registry ->', registry)
f1()
f2()
f3()
if __name__ == '__main__':
main()
'''
running register(<function f1 at 0x1081ad268>)
running register(<function f2 at 0x1095c99d8>)
running main()
registry -> [<function f1 at 0x1081ad268>, <function f2 at 0x1095c99d8>]
running f1()
running f2()
running f3()
'''
'''
注意,register 在模块中其他函数之前运行(两次) 。
调用 register 时,传给它的参数是被装饰的函数,例如 <function f1 at 0x100631bf8>。
加载模块后,registry 中有两个被装饰函数的引用:f1 和 f2。
这两个函数,以及 f3,只在 main 明确调用它们时才执行。
如果导入 demo_7_2.py 模块(不作为脚本运行) ,
输出如下:
>>> import registration
running register(<function f1 at 0x10063b1e0>)
running register(<function f2 at 0x10063b268>)
此时查看 registry 的值,得到的输出如下:
>>> registration.registry
[<function f1 at 0x10063b1e0>, <function f2 at 0x10063b268>]
示例 7-2 主要想强调,函数装饰器在导入模块时立即执行,而被装饰的函数只在明确调用时运行。
这突出了 Python 程序员所说的导入时和运行时之间的区别
''' | [
"js_huang@foxmail.com"
] | js_huang@foxmail.com |
abee031f9dd8d4d4d9ab634022ebb1f4a6c78b3d | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/argo/models/io_argoproj_workflow_v1alpha1_workflow.py | 9004cddc944622ff5565d97235c12f3fc64c9b23 | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,259 | py | # coding: utf-8
"""
Argo
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v2.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from argo.models.io_argoproj_workflow_v1alpha1_workflow_spec import IoArgoprojWorkflowV1alpha1WorkflowSpec # noqa: F401,E501
from argo.models.io_argoproj_workflow_v1alpha1_workflow_status import IoArgoprojWorkflowV1alpha1WorkflowStatus # noqa: F401,E501
from argo.models.io_k8s_apimachinery_pkg_apis_meta_v1_object_meta import IoK8sApimachineryPkgApisMetaV1ObjectMeta # noqa: F401,E501
class IoArgoprojWorkflowV1alpha1Workflow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'IoK8sApimachineryPkgApisMetaV1ObjectMeta',
'spec': 'IoArgoprojWorkflowV1alpha1WorkflowSpec',
'status': 'IoArgoprojWorkflowV1alpha1WorkflowStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""IoArgoprojWorkflowV1alpha1Workflow - a model defined in Swagger""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.metadata = metadata
self.spec = spec
self.status = status
@property
def api_version(self):
"""Gets the api_version of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this IoArgoprojWorkflowV1alpha1Workflow.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this IoArgoprojWorkflowV1alpha1Workflow.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:return: The metadata of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:rtype: IoK8sApimachineryPkgApisMetaV1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this IoArgoprojWorkflowV1alpha1Workflow.
:param metadata: The metadata of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:type: IoK8sApimachineryPkgApisMetaV1ObjectMeta
"""
if metadata is None:
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:return: The spec of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:rtype: IoArgoprojWorkflowV1alpha1WorkflowSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this IoArgoprojWorkflowV1alpha1Workflow.
:param spec: The spec of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:type: IoArgoprojWorkflowV1alpha1WorkflowSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:return: The status of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:rtype: IoArgoprojWorkflowV1alpha1WorkflowStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this IoArgoprojWorkflowV1alpha1Workflow.
:param status: The status of this IoArgoprojWorkflowV1alpha1Workflow. # noqa: E501
:type: IoArgoprojWorkflowV1alpha1WorkflowStatus
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IoArgoprojWorkflowV1alpha1Workflow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoArgoprojWorkflowV1alpha1Workflow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"nfillot@weborama.com"
] | nfillot@weborama.com |
de9f1199dc5aaa7e1cc8da79229dc61e059a54e2 | 25e2d5bb03c5b2534880ab41fb7de82d815f83da | /menpo/landmark/labels/__init__.py | 36cab907dbfd7bbb95e2c5c10a432ae9e3e2fea5 | [
"BSD-3-Clause"
] | permissive | justusschock/menpo | f4c4a15b62eab17f06387c5772af2699d52a9419 | d915bec26de64a5711b96be75cd145661a32290e | refs/heads/master | 2020-04-06T18:08:26.397844 | 2019-03-09T20:54:07 | 2019-03-09T20:54:07 | 157,687,031 | 0 | 0 | NOASSERTION | 2018-11-15T09:39:03 | 2018-11-15T09:39:03 | null | UTF-8 | Python | false | false | 612 | py | from .base import labeller
from .human import *
from .car import (
car_streetscene_20_to_car_streetscene_view_0_8,
car_streetscene_20_to_car_streetscene_view_1_14,
car_streetscene_20_to_car_streetscene_view_2_10,
car_streetscene_20_to_car_streetscene_view_3_14,
car_streetscene_20_to_car_streetscene_view_4_14,
car_streetscene_20_to_car_streetscene_view_5_10,
car_streetscene_20_to_car_streetscene_view_6_14,
car_streetscene_20_to_car_streetscene_view_7_8)
from .bounding_box import (bounding_box_to_bounding_box,
bounding_box_mirrored_to_bounding_box)
| [
"patricksnape@gmail.com"
] | patricksnape@gmail.com |
27a2982791e485a8d4a48f5bf1b30eb87d869ecc | d45b08745ab3f1e951485fa6b9f905be65f71da4 | /programmers/kakao20_경주로건설.py | 9bc2872919783233501f1b27d53f832c1fc7caf3 | [] | no_license | HYUNMIN-HWANG/Algorithm_practice | d0a82e73a735e22a70c98fd4882b66929061bd6c | 96b70fbea42ca11881546531fa6a9486e9a5289f | refs/heads/main | 2023-04-25T05:40:24.126757 | 2021-05-08T13:42:56 | 2021-05-08T13:42:56 | 324,109,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from collections import deque
def solution(board):
n = len(board)
visited = [[1000000 for _ in range(n)] for _ in range(n)]
ds = [[-1, 0], [1, 0], [0, 1], [0, -1]]
q = deque()
q.append((0, 0, 0, -1))
visited[0][0] = 0
while q:
y, x, c, t = q.popleft()
for i in range(4):
ay, ax = y + ds[i][0], x + ds[i][1]
if 0 <= ay < n and 0 <= ax < n and not board[ay][ax]:
if t == i or t == -1:
if visited[ay][ax] >= c + 100:
visited[ay][ax] = c + 100
q.append((ay, ax, c + 100, i))
elif t != i:
if visited[ay][ax] >= c + 600:
visited[ay][ax] = c + 600
q.append((ay, ax, c + 600, i))
answer = visited[n - 1][n - 1]
return answer
# 위에 꺼 참고한 나의 풀이
from collections import deque
def solution(board):
queue = deque()
queue.append((0,0,0,-1)) # x, y, cost, 방향
n = len(board[0])
visited = [[1000000] * n for _ in range(n)]
visited[0][0] = 0
dx = [-1, 1, 0, 0]
dy = [0, 0, 1, -1]
while queue :
x, y, c, t = queue.popleft()
for i in range(4) :
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < n and not board[nx][ny] :
if t == i or t == -1 : # 방향이 같다. 직선거리
if visited[nx][ny] >= c + 100 : # 최소 가격으로 갱신하기 위함
visited[nx][ny] = c + 100
queue.append((nx, ny, c + 100, i))
elif t != i : # 방향이 달라짐. 곡선거리
if visited[nx][ny] >= c + 600 :
visited[nx][ny] = c + 600
queue.append((nx, ny, c + 600, i))
answer = visited[n-1][n-1]
return answer
| [
"hwangkei0212@gmail.com"
] | hwangkei0212@gmail.com |
6530b1a86af8656cabde3c93041a1dcf7ce153bd | 04bef44d27dc7b0557ab7c2e8f31c96c3c3f6563 | /q65.py | 8d07a2cfb5f6931d1a03084a7b9133324c699371 | [] | no_license | shivraj8615/python | 51f4c151276acf841a5f24396717b258ab42e61c | b843b72605054654487be46ef0d021ea036dbbe9 | refs/heads/main | 2023-06-20T04:06:03.189809 | 2021-07-13T04:10:11 | 2021-07-13T04:10:11 | 382,014,935 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | #Write a program to prepare a simple registration form using tkinter library.
from tkinter import *
root = Tk()
root.geometry('500x500')
root.title("Registration Form")
label_0 = Label(root, text="Registration form",width=20,font=("bold", 20))
label_0.place(x=90,y=53)
label_1 = Label(root, text="FullName",width=20,font=("bold", 10))
label_1.place(x=80,y=130)
entry_1 = Entry(root)
entry_1.place(x=240,y=130)
label_2 = Label(root, text="Email",width=20,font=("bold", 10))
label_2.place(x=68,y=180)
entry_2 = Entry(root)
entry_2.place(x=240,y=180)
label_3 = Label(root, text="Gender",width=20,font=("bold", 10))
label_3.place(x=70,y=230)
var = IntVar()
Radiobutton(root, text="Male",padx = 5, variable=var, value=1).place(x=235,y=230)
Radiobutton(root, text="Female",padx = 20, variable=var, value=2).place(x=290,y=230)
label_4 = Label(root, text="country",width=20,font=("bold", 10))
label_4.place(x=70,y=280)
list1 = ['Canada','India','UK','Nepal','Iceland','South Africa'];
c=StringVar()
droplist=OptionMenu(root,c, *list1)
droplist.config(width=15)
c.set('select your country')
droplist.place(x=240,y=280)
label_4 = Label(root, text="Programming",width=20,font=("bold", 10))
label_4.place(x=85,y=330)
var1 = IntVar()
Checkbutton(root, text="java", variable=var1).place(x=235,y=330)
var2 = IntVar()
Checkbutton(root, text="python", variable=var2).place(x=290,y=330)
Button(root, text='Submit',width=20,bg='brown',fg='white').place(x=180,y=380)
root.mainloop()
| [
"noreply@github.com"
] | shivraj8615.noreply@github.com |
be64c747b3fe69288308a0d07504b003f86708b4 | 71b9ef710fc59c8b936dbbe849bc206809de15db | /vsa/vsa/legend.py | da4bda83b383c93c34d5ba4ea93751efa5b3607a | [] | no_license | HBPNeurorobotics/holographic | 80f60e60ea13debf09e0384343abe5a4f194753e | 4ab808125c66388578414a4827cdff87237c7e64 | refs/heads/master | 2021-09-25T02:10:44.930627 | 2018-10-16T20:46:05 | 2018-10-16T20:46:05 | 111,441,294 | 2 | 2 | null | 2018-05-17T11:57:15 | 2017-11-20T17:25:53 | Jupyter Notebook | UTF-8 | Python | false | false | 404 | py | import numpy as np
import matplotlib.pyplot as plt
def getLegend(v,start):
sh = len(v)
linestyles = ['-', '--', '-.', ':']
for i in range(sh):
plt.plot(v[i], linestyles[3-i*4/sh], label=start + i * 0.01)
plt.axis([0,len(v[0]),0,1.01])
plt.xlabel('Bindings')
plt.ylabel('Accuracy')
legend = plt.legend(loc='lower left', shadow=True, fontsize='x-large')
plt.show()
| [
"lesi@fzi.de"
] | lesi@fzi.de |
fc982f99dc26de713c7111eef9973060e4bb5c17 | a2b67c23c87b56972bd9c5c9e53581c2141ff52e | /bloc/block.py | d10b42ffb6e5ba0845d181e55ec5bded54c494b1 | [
"MIT"
] | permissive | siddharthsudheer/Alysida | c10ddc6a3e6d03d2205239d7549ea4d87aad8608 | 9359f519e96a7af1608b5dfcdde5c81df720a03c | refs/heads/master | 2021-05-07T00:00:51.122410 | 2018-02-18T02:53:28 | 2018-02-18T02:53:28 | 110,082,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,738 | py | #!/usr/bin/env python3
import hashlib
import falcon
import json
import db.service as DBService
from bloc.transaction import Transaction
DIFFICULTY_TARGET = "000"
class Block(object):
def __init__(
self,
block_num=None,
block_hash=None,
prev_block_hash=None,
time_stamp=None,
nonce=None,
txn_hashes=None,
txn_recs=None):
self.block_num = block_num
self.block_hash = block_hash
self.prev_block_hash = prev_block_hash
self.time_stamp = time_stamp
self.nonce = nonce
self.txn_hashes = txn_hashes
self.txn_recs = txn_recs
def create(self, last_block_hash):
if self.get_txn_recs():
self.time_stamp = DBService.get_timestamp()
self.prev_block_hash = last_block_hash
self.nonce = self.proof_of_work(self.time_stamp, self.txn_hashes_string(), self.prev_block_hash)
self.block_hash = self.gen_block_hash()
return self.block_hash
else:
print("Something wrong with transactions chosen.")
def proof_of_work(self, time_stamp, txn_hashes, prev_block_hash):
nonce = 0
def _is_valid_block(nonce, prev_block_hash, time_stamp, txn_hashes):
if nonce not in self.used_nonces():
guess = self.gen_block_string(nonce, prev_block_hash, time_stamp, txn_hashes)
guess_hash = hashlib.sha256(guess).hexdigest()
print('~~~> Hash: {}'.format(guess_hash), end="\r")
return guess_hash[:len(DIFFICULTY_TARGET)] == DIFFICULTY_TARGET
else:
return False
print(" ")
while _is_valid_block(nonce, prev_block_hash, time_stamp, txn_hashes) is False:
nonce += 1
print(" ")
return nonce
def convert_to_txns_obj(self, res):
txn_recs = [dict(zip(res['column_names'], c))
for c in res['rows']] if res else []
txo = lambda t: Transaction(
sender=t['sender'],
receiver=t['receiver'],
amount=t['amount'],
txn_hash=t['TXN_HASH'],
time_stamp=t['TXN_TIME_STAMP']
)
return list(map(txo, txn_recs))
def get_txn_recs(self):
if self.txn_recs is None:
hashes = str(tuple(self.txn_hashes)).replace(",)",")")
sql_query = """
SELECT TXN_HASH, TXN_TIME_STAMP, sender, receiver, amount
FROM unconfirmed_pool
WHERE TXN_HASH IN {}
""".format(hashes)
res = DBService.query("unconfirmed_pool", sql_query)
if res:
txn_recs = self.convert_to_txns_obj(res)
if len(txn_recs) != len(self.txn_hashes):
return False
else:
self.txn_recs = self.convert_to_txns_obj(res)
return self.txn_recs
else:
return False
else:
if len(self.txn_recs) != len(self.txn_hashes):
return False
return self.txn_recs
def get_block_confirmed_txns(self):
sql_query = """
SELECT TXN_HASH, TXN_TIME_STAMP, sender, receiver, amount
FROM confirmed_txns
WHERE BLOCK_HASH = '{}'
""".format(self.block_hash)
res = DBService.query("main_chain", sql_query)
if res:
self.txn_recs = self.convert_to_txns_obj(res)
self.txn_hashes = self.gen_txn_hashes()
return self.txn_recs
else:
return False
def gen_block_hash(self):
block_string = self.gen_block_string(self.nonce, self.prev_block_hash, self.time_stamp, self.txn_hashes_string())
return hashlib.sha256(block_string).hexdigest()
def is_valid(self):
if self.time_stamp and self.block_hash and self.nonce:
return self.gen_block_hash() == self.block_hash
else:
print("Hash, Nonce, and/or Timestamp not provided.")
return False
def add_to_chain(self):
main_chain = """
INSERT INTO main_chain (BLOCK_NUM, BLOCK_HASH, NONCE, TIME_STAMP)
VALUES (NULL,'{}',{},'{}');
""".format(self.block_hash, self.nonce, self.time_stamp)
self.txn_recs = self.get_txn_recs()
print(self.txn_recs)
txn_inserts = [txn.confirmed_txns_insert_sql(self.block_hash) for txn in self.txn_recs]
confirmed_txns = '{}'.format(''.join(map(str, txn_inserts)))
final = main_chain + "\n" + confirmed_txns
db_resp = DBService.post_many("main_chain", final)
if db_resp != True:
final_title = 'Error'
final_msg = db_resp
resp_status = falcon.HTTP_400
else:
final_title = 'Success'
final_msg = 'New transaction successfully added to DB.'
resp_status = falcon.HTTP_201
self.get_block_num()
self.clean_unconfirmed_pool()
return (final_title, final_msg, resp_status)
def clean_unconfirmed_pool(self):
hashes = str(tuple(self.txn_hashes)).replace(",)",")")
delete_sql = """
DELETE FROM unconfirmed_pool WHERE TXN_HASH in {};
""".format(hashes)
db_resp = DBService.post("unconfirmed_pool", delete_sql)
def gen_dict(self):
if self.txn_recs is None:
self.get_txn_recs()
txn_recs_dict = [t.gen_dict() for t in self.txn_recs]
block_dict = {
"block_num": self.block_num,
"block_hash": self.block_hash,
"time_stamp": self.time_stamp,
"nonce": self.nonce,
"txns": txn_recs_dict
}
return block_dict
def to_obj(self, data):
self.block_num = data['block_num']
self.block_hash = data['block_hash']
self.time_stamp = data['time_stamp']
self.nonce = data['nonce']
def _txo(t):
new_t = Transaction()
new_t.to_obj(t)
return new_t
self.txn_recs = list(map(_txo, data['txns']))
self.txn_hashes = [t.txn_hash for t in self.txn_recs]
# self.prev_block_hash = self.get_prev_block_hash()
def get_block_num(self):
sql_query = "SELECT BLOCK_NUM FROM main_chain WHERE BLOCK_HASH='{}'".format(self.block_hash)
res = DBService.query("main_chain", sql_query)
self.block_num = None if not res else res['rows'][0]
return self.block_num
#####################################################################################
# Helper Functions
#####################################################################################
def used_nonces(self):
sql_query = "SELECT NONCE FROM main_chain"
result = DBService.query("main_chain", sql_query)
nonces = result['rows'] if result else []
return nonces
def gen_txn_hashes(self):
return [t.txn_hash for t in self.txn_recs]
def txn_hashes_string(self):
x = '{}'.format(','.join(map(str, sorted(self.txn_hashes))))
return x
def gen_block_string(self, nonce, prev_block_hash, time_stamp, txn_hashes):
block_string = '{}{}{}{}'.format(
str(nonce), prev_block_hash, time_stamp, txn_hashes
)
final = f'{block_string}'.encode()
return final
def get_prev_block_hash(self):
if self.prev_block_hash != None:
return self.prev_block_hash
def _using_block_num():
if self.block_num != None:
if self.block_num - 1 < 1:
return '0'
else:
prev_block_num = self.block_num - 1
sql_query = "SELECT BLOCK_HASH FROM main_chain WHERE BLOCK_NUM = {}".format(prev_block_num)
res = DBService.query("main_chain", sql_query)
prev_hash = False if not res else '{}'.format(res['rows'][0])
return prev_hash
else:
return False
prev_hash = _using_block_num()
if prev_hash != True:
if self.block_hash != None:
sql_query = "SELECT BLOCK_NUM FROM main_chain WHERE BLOCK_HASH = '{}'".format(self.block_hash)
res = DBService.query("main_chain", sql_query)
self.block_num = None if not res else res['rows'][0]
prev_hash = _using_block_num()
self.prev_block_hash = prev_hash if prev_hash else None
return self.prev_block_hash
| [
"siddharthsudheer@gmail.com"
] | siddharthsudheer@gmail.com |
8653d1c1e2828d427d6f9be27a3a3c7da923e248 | 1e6d30ee31dfd41e8db5388b432d987cfee41406 | /RsyncHelper.py | f26caf7b0fc89b6df3c96389efefe5b416b94122 | [
"MIT"
] | permissive | UbiquitousPhoton/RsyncHelper | 7ea25e4c694b5a5c30c575fe6c4947159d63685b | 956ec21682bb899e2350ddb6a761780fe87e33fc | refs/heads/master | 2020-09-21T20:01:33.799668 | 2020-01-24T16:50:23 | 2020-01-27T21:49:06 | 224,909,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,052 | py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2019 Paul Elliott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# RsyncHelper.py - a wrapper for rsync in python that would allow reporting via logfile
# and / or email of a regular sync job, most likely run from cron or systemd. Configuarion is
# via ini file.
import os
import argparse
import logging
import configparser
import smtplib
from email.mime.text import MIMEText
import socket
import sys
import subprocess
from subprocess import Popen, PIPE
from datetime import datetime
import shlex
from enum import Enum
class FileLogger:
def __init__(self):
self.logger = logging.getLogger("RsyncHelper")
self.log_formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s",
datefmt="%H:%M:%S")
self.rotated_logfiles = []
def RotateOldFiles(self, logfile, num_rotated):
if logfile not in self.rotated_logfiles:
if(os.path.isfile(logfile)):
if os.path.isfile("{}.{}".format(logfile, num_rotated)):
os.remove("{}.{}".format(logfile, num_rotated))
for file_ver in range(num_rotated, 0, -1):
if file_ver > 1:
target_file = "{}.{}".format(logfile, (file_ver - 1))
else:
target_file = logfile
if os.path.isfile(target_file):
os.replace(target_file, "{}.{}".format(logfile, file_ver))
self.rotated_logfiles.append(logfile)
def SetLogfile(self, logfile, num_rotated):
if logfile != '':
# There can be only one...
if self.logger.hasHandlers():
self.logger.removeHandler(self.log_handler)
self.RotateOldFiles(logfile, num_rotated)
self.log_handler = logging.FileHandler(logfile)
self.logger.addHandler(self.log_handler)
self.logger.setLevel(logging.INFO)
self.log_handler.setFormatter(self.log_formatter)
def Log(self, log_level, log_string):
if self.logger.hasHandlers():
self.logger.log(log_level, log_string)
class MailLogger:
def __init__(self):
self.initialised = False
self.body = ""
def Setup(self, server, from_email, to_email, subject):
if self.initialised and self.body != "" and (server != self.server or from_email
!= self.from_email or to_email
!= self.to_email or subject
!= self.subject):
Send(self)
self.server = server
self.from_email = from_email
self.to_email = to_email
self.subject = subject
self.initialised = True
def AddToBody(self, body):
if self.initialised == True:
self.body = self.body + body + "\n"
def Send(self):
if self.initialised == True:
email_msg = MIMEText(self.body)
email_msg["Subject"] = self.subject
email_msg["From"] = self.from_email
email_msg["To"] = self.to_email
# Send the message via our own SMTP server, but don't include the
# envelope header.
mail_server = smtplib.SMTP(self.server)
mail_server.sendmail(self.from_email, self.to_email, email_msg.as_string())
mail_server.quit()
self.body = ""
self.initialised = False
class SyncLogger:
def __init__(self):
self.file_logger = FileLogger()
self.mail_logger = MailLogger()
def SetLogfile(self, logfile, num_rotated):
self.file_logger.SetLogfile(logfile, num_rotated)
def SetupMail(self, server, from_email, to_email, subject):
self.mail_logger.Setup(server, from_email, to_email, subject)
def Log(self, log_level, message):
self.mail_logger.AddToBody(message)
self.file_logger.Log(log_level, message)
def SendMail(self):
self.mail_logger.Send()
class SyncMounter:
class MountType(Enum):
mount_none = 0
mount_check = 1
mount_try = 2
def __init__(self):
self.mount_type = SyncMounter.MountType.mount_none
def Setup(self, sync_logger, sync_section, sync_section_name):
if self.mount_type != SyncMounter.MountType.mount_none:
Shutdown(self)
self.mount_type = SyncMounter.MountType.mount_none
if "try_mount" in sync_section:
self.mount_point = sync_section.get("try_mount")
self.mount_type = SyncMounter.MountType.mount_try
elif "check_mount" in sync_section:
self.mount_point = sync_section.get("check_mount")
self.mount_type = SyncMounter.MountType.mount_check
self.should_unmount = sync_section.getboolean("should_unmount", False)
if self.mount_type != SyncMounter.MountType.mount_none:
if not os.path.ismount(self.mount_point):
if self.mount_type == SyncMounter.MountType.mount_check:
sync_logger.Log(logging.ERROR,
"Checked mount {} not mounted, abandoning section {}".format(self.mount_point,
sync_section_name))
return False
else:
if not os.path.isdir(self.mount_point):
Do_Shell_Exec(sync_logger, "mkdir -p {}".format(self.mount_point))
Do_Shell_Exec(sync_logger, "mount {}".format(self.mount_point))
if not os.path.ismount(self.mount_point):
sync_logger.Log(logging.ERROR,
"Attempt to mount {} failed, abandoning section {}".format(self.mount_point,
sync_section_name))
self.mount_type = SyncMounter.MountType.mount_none
return False
else:
sync_logger.Log(logging.INFO,
"Successfully mounted {}".format(self.mount_point))
else:
sync_logger.Log(logging.INFO,
"Checked mount {} is mounted".format(self.mount_point))
return True
def Shutdown(self, sync_logger, sync_section, sync_section_name):
if self.mount_type == SyncMounter.MountType.mount_try and self.should_unmount:
Do_Shell_Exec(sync_logger, "umount {}".format(self.mount_point))
if not os.path.ismount(self.mount_point):
sync_logger.Log(logging.INFO,
"Successfully unmounted {} for section {}".format(self.mount_point,
sync_section_name))
return False
else:
sync_logger.Log(logging.ERROR,
"Failed to unmount {} for setion {}".format(self.mount_point,
sync_section_name))
self.mount_type = SyncMounter.MountType.mount_none
def Check_Elements(element_list, required_element_list, sync_logger, section_name):
for element in required_element_list:
if element not in element_list:
error_msg = ("Invalid config : {} not found in sync section {}".format(element,
section_name))
sync_logger.Log(logging.ERROR, error_msg)
return False
return True
def Do_Shell_Exec(sync_logger, exec_string):
shell_process = Popen(shlex.split(exec_string), stdin=PIPE, stdout=PIPE, stderr=PIPE)
(shell_stdout, shell_stderr) = shell_process.communicate()
if shell_process.returncode != 0:
sync_logger.Log(logging.INFO, "{} returned {}".format(exec_string,
shell_process.returncode))
sync_logger.Log(logging.INFO, "stderr: {}".format(shell_stderr.decode("UTF-8")))
return False
else:
sync_logger.Log(logging.INFO, "stdout: {}".format(shell_stdout.decode("UTF-8")))
return True
def Setup_Logging_And_Mail(sync_logger, sync_section, sync_section_name):
essential_mail_elements = ["mail_to", "mail_from", "mail_server"]
mail_elements = essential_mail_elements.copy()
mail_elements.extend(["mail_server_port", "mail_subject"])
if 'logfile' in sync_section:
sync_logger.SetLogfile(sync_section.get("logfile"), sync_section.get("num_keep_logs", 5))
mail_found = False
# if one of the mail elements is in the config, make sure all the required ones are.
for element in mail_elements:
if element in sync_section:
mail_found = True
break
if mail_found == True:
if Check_Elements(sync_section, essential_mail_elements, sync_logger, sync_section_name):
server = sync_section.get("mail_server", "127.0.0.1")
subject = sync_section.get("mail_subject",
"Rsync Helper on {}".format(socket.gethostname()))
sync_logger.SetupMail(server, sync_section.get("mail_from"),
sync_section.get("mail_to"), subject)
return True
else:
return False
else:
return True
def Do_Sync(sync_logger, sync_section, sync_section_name):
essential_local_elements = ["target_dir", "source_dir"]
essential_remote_elements = essential_local_elements.copy()
essential_remote_elements.extend(["remote_user", "remote_host"])
should_delete = sync_section.getboolean('delete', False)
if should_delete:
delete_string = "--delete"
else:
delete_string = ""
sync_type = sync_section.get("sync_type", "local").lower()
if sync_type == "local":
if not Check_Elements(sync_section, essential_local_elements,
sync_logger,
sync_section_name):
return False
if not Do_Shell_Exec(sync_logger,
"rsync -av {} {} {}".format(delete_string,
sync_section.get('source_dir'),
sync_section.get('target_dir'))):
return False
elif sync_type == "remote":
if not Check_Elements(sync_section, essential_remote_elements,
sync_logger,
sync_section_name):
return False
if not Do_Shell_Exec(sync_logger,
"rsync -av {} --rsh=ssh {}@{}::{} {}".format(delete_string,
sync_section.get('remote_user'),
sync_section.get('remote_host'),
sync_section.get('source_dir'),
sync_section.get('target_dir'))):
return False
else:
error_msg = ("Invalid sync type : {} for sync section {}".format(sync_type,
section_name))
sync_logger.Log(logging.ERROR, error_msg)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Rsync wrapper for scheduled jobs')
parser.add_argument("config_file", help="Sync Config File", type = argparse.FileType('r'))
args = parser.parse_args()
config = configparser.ConfigParser()
config.read_file(args.config_file)
sync_logger = SyncLogger()
sync_mounter = SyncMounter()
for sync_section_name in config.sections():
sync_section = config[sync_section_name]
should_continue = True
if not Setup_Logging_And_Mail(sync_logger, sync_section, sync_section_name):
continue
start_time = datetime.now()
sync_logger.Log(logging.INFO,
"Sync section {} begins {}".format(sync_section_name,
start_time.strftime("%d/%m/%Y %H:%M")))
if not sync_mounter.Setup(sync_logger, sync_section, sync_section_name):
continue
if not Do_Sync(sync_logger, sync_section, sync_section_name):
continue
end_time = datetime.now()
time_taken = end_time - start_time;
hours_taken, taken_remainder = divmod(time_taken.total_seconds(), 3600)
minutes_taken, seconds_taken = divmod(taken_remainder, 60)
if hours_taken > 0:
taken_string = "{} hours, {} min".format(hours_taken, minutes_taken)
else:
taken_string = "{} min {} secs".format(minutes_taken, seconds_taken)
sync_logger.Log(logging.INFO,
"Sync section {} ends {} (Took {})".format(sync_section_name,
end_time.strftime("%d/%m/%Y %H:%M"),
taken_string))
if "post_sync" in sync_section:
sync_logger.Log(logging.INFO,
"Executing post sync action for {}".format(sync_section_name))
Do_Shell_Exec(sync_logger, sync_section.get('post_sync'))
sync_mounter.Shutdown(sync_logger, sync_section, sync_section_name)
sync_logger.SendMail()
| [
"paul.elliott@arm.com"
] | paul.elliott@arm.com |
3248d8abf873f771774003dc8d0226a2dff074f3 | 599069eeeae294950aab730ca8d4858ac1929a5c | /bemani/client/iidx/tricoro.py | 1ea4a005f356b6586ed3b5e9f75655973ecc8b36 | [] | no_license | ByteFun/bemaniutils | 232d057d4b548f929af4da4f145565ad51482113 | bd467a9b732a25a1c8aba75106dc459fbdff61b0 | refs/heads/master | 2020-12-04T07:45:45.503620 | 2019-12-08T21:57:08 | 2019-12-08T21:57:08 | 231,683,196 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,071 | py | import random
import time
from typing import Any, Dict, Optional, Tuple
from bemani.client.base import BaseClient
from bemani.protocol import Node
class IIDXTricoroClient(BaseClient):
NAME = 'TEST'
def verify_shop_getname(self, lid: str) -> str:
call = self.call_node()
# Construct node
IIDX21shop = Node.void('shop')
call.add_child(IIDX21shop)
IIDX21shop.set_attribute('method', 'getname')
IIDX21shop.set_attribute('lid', lid)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/shop/@opname")
self.assert_path(resp, "response/shop/@pid")
self.assert_path(resp, "response/shop/@cls_opt")
return resp.child('shop').attribute('opname')
def verify_shop_savename(self, lid: str, name: str) -> None:
call = self.call_node()
# Construct node
IIDX21shop = Node.void('shop')
IIDX21shop.set_attribute('lid', lid)
IIDX21shop.set_attribute('pid', '51')
IIDX21shop.set_attribute('method', 'savename')
IIDX21shop.set_attribute('cls_opt', '0')
IIDX21shop.set_attribute('ccode', 'US')
IIDX21shop.set_attribute('opname', name)
IIDX21shop.set_attribute('rcode', '.')
call.add_child(IIDX21shop)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/shop")
def verify_pc_common(self) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('method', 'common')
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/pc/ir/@beat")
self.assert_path(resp, "response/pc/limit/@phase")
self.assert_path(resp, "response/pc/boss/@phase")
self.assert_path(resp, "response/pc/red/@phase")
self.assert_path(resp, "response/pc/yellow/@phase")
self.assert_path(resp, "response/pc/medal/@phase")
self.assert_path(resp, "response/pc/tricolettepark/@open")
self.assert_path(resp, "response/pc/cafe/@open")
def verify_music_crate(self) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('music')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('method', 'crate')
# Swap with server
resp = self.exchange('', call)
self.assert_path(resp, "response/music")
for child in resp.child("music").children:
if child.name != 'c':
raise Exception('Invalid node {} in clear rate response!'.format(child))
if len(child.value) != 12:
raise Exception('Invalid node data {} in clear rate response!'.format(child))
for v in child.value:
if v < 0 or v > 101:
raise Exception('Invalid clear percent {} in clear rate response!'.format(child))
def verify_shop_getconvention(self, lid: str) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('shop')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('method', 'getconvention')
IIDX21pc.set_attribute('lid', lid)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/shop/valid")
self.assert_path(resp, "response/shop/@music_0")
self.assert_path(resp, "response/shop/@music_1")
self.assert_path(resp, "response/shop/@music_2")
self.assert_path(resp, "response/shop/@music_3")
def verify_pc_visit(self, extid: int, lid: str) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('iidxid', str(extid))
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('method', 'visit')
IIDX21pc.set_attribute('pid', '51')
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/pc/@aflg")
self.assert_path(resp, "response/pc/@anum")
self.assert_path(resp, "response/pc/@pflg")
self.assert_path(resp, "response/pc/@pnum")
self.assert_path(resp, "response/pc/@sflg")
self.assert_path(resp, "response/pc/@snum")
def verify_ranking_getranker(self, lid: str) -> None:
for clid in [0, 1, 2, 3, 4, 5, 6]:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('ranking')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('method', 'getranker')
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('clid', str(clid))
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/ranking")
def verify_shop_sentinfo(self, lid: str) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('shop')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('method', 'sentinfo')
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('bflg', '1')
IIDX21pc.set_attribute('bnum', '2')
IIDX21pc.set_attribute('ioid', '0')
IIDX21pc.set_attribute('tax_phase', '0')
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/shop")
def verify_pc_get(self, ref_id: str, card_id: str, lid: str) -> Dict[str, Any]:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('rid', ref_id)
IIDX21pc.set_attribute('did', ref_id)
IIDX21pc.set_attribute('pid', '51')
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('cid', card_id)
IIDX21pc.set_attribute('method', 'get')
IIDX21pc.set_attribute('ctype', '1')
# Swap with server
resp = self.exchange('', call)
# Verify that the response is correct
self.assert_path(resp, "response/pc/pcdata/@name")
self.assert_path(resp, "response/pc/pcdata/@pid")
self.assert_path(resp, "response/pc/pcdata/@id")
self.assert_path(resp, "response/pc/pcdata/@idstr")
self.assert_path(resp, "response/pc/packinfo")
self.assert_path(resp, "response/pc/commonboss/@deller")
self.assert_path(resp, "response/pc/commonboss/@orb")
self.assert_path(resp, "response/pc/commonboss/@baron")
self.assert_path(resp, "response/pc/secret/flg1")
self.assert_path(resp, "response/pc/secret/flg2")
self.assert_path(resp, "response/pc/secret/flg3")
self.assert_path(resp, "response/pc/achievements/trophy")
self.assert_path(resp, "response/pc/skin")
self.assert_path(resp, "response/pc/grade")
self.assert_path(resp, "response/pc/rlist")
self.assert_path(resp, "response/pc/step")
name = resp.child('pc/pcdata').attribute('name')
if name != self.NAME:
raise Exception('Invalid name \'{}\' returned for Ref ID \'{}\''.format(name, ref_id))
return {
'extid': int(resp.child('pc/pcdata').attribute('id')),
'sp_dan': int(resp.child('pc/grade').attribute('sgid')),
'dp_dan': int(resp.child('pc/grade').attribute('dgid')),
'deller': int(resp.child('pc/commonboss').attribute('deller')),
}
def verify_music_getrank(self, extid: int) -> Dict[int, Dict[int, Dict[str, int]]]:
scores: Dict[int, Dict[int, Dict[str, int]]] = {}
for cltype in [0, 1]: # singles, doubles
call = self.call_node()
# Construct node
IIDX21music = Node.void('music')
call.add_child(IIDX21music)
IIDX21music.set_attribute('method', 'getrank')
IIDX21music.set_attribute('iidxid', str(extid))
IIDX21music.set_attribute('cltype', str(cltype))
# Swap with server
resp = self.exchange('', call)
self.assert_path(resp, "response/music/style")
if int(resp.child('music/style').attribute('type')) != cltype:
raise Exception('Returned wrong clear type for IIDX21music.getrank!')
for child in resp.child('music').children:
if child.name == 'm':
if child.value[0] != -1:
raise Exception('Got non-self score back when requesting only our scores!')
music_id = child.value[1]
normal_clear_status = child.value[2]
hyper_clear_status = child.value[3]
another_clear_status = child.value[4]
normal_ex_score = child.value[5]
hyper_ex_score = child.value[6]
another_ex_score = child.value[7]
normal_miss_count = child.value[8]
hyper_miss_count = child.value[9]
another_miss_count = child.value[10]
if cltype == 0:
normal = 0
hyper = 1
another = 2
else:
normal = 3
hyper = 4
another = 5
if music_id not in scores:
scores[music_id] = {}
scores[music_id][normal] = {
'clear_status': normal_clear_status,
'ex_score': normal_ex_score,
'miss_count': normal_miss_count,
}
scores[music_id][hyper] = {
'clear_status': hyper_clear_status,
'ex_score': hyper_ex_score,
'miss_count': hyper_miss_count,
}
scores[music_id][another] = {
'clear_status': another_clear_status,
'ex_score': another_ex_score,
'miss_count': another_miss_count,
}
elif child.name == 'b':
music_id = child.value[0]
clear_status = child.value[1]
scores[music_id][6] = {
'clear_status': clear_status,
'ex_score': -1,
'miss_count': -1,
}
return scores
def verify_pc_save(self, extid: int, card: str, lid: str) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('achi', '449')
IIDX21pc.set_attribute('opt', '8208')
IIDX21pc.set_attribute('gpos', '0')
IIDX21pc.set_attribute('gno', '8')
IIDX21pc.set_attribute('timing', '0')
IIDX21pc.set_attribute('help', '0')
IIDX21pc.set_attribute('sdhd', '0')
IIDX21pc.set_attribute('sdtype', '0')
IIDX21pc.set_attribute('notes', '31.484070')
IIDX21pc.set_attribute('pase', '0')
IIDX21pc.set_attribute('judge', '0')
IIDX21pc.set_attribute('opstyle', '1')
IIDX21pc.set_attribute('hispeed', '5.771802')
IIDX21pc.set_attribute('mode', '6')
IIDX21pc.set_attribute('pmode', '0')
IIDX21pc.set_attribute('lift', '60')
IIDX21pc.set_attribute('judgeAdj', '0')
IIDX21pc.set_attribute('method', 'save')
IIDX21pc.set_attribute('iidxid', str(extid))
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('cid', card)
IIDX21pc.set_attribute('cltype', '0')
IIDX21pc.set_attribute('ctype', '1')
pyramid = Node.void('pyramid')
IIDX21pc.add_child(pyramid)
pyramid.set_attribute('point', '290')
destiny_catharsis = Node.void('destiny_catharsis')
IIDX21pc.add_child(destiny_catharsis)
destiny_catharsis.set_attribute('point', '290')
bemani_summer_collabo = Node.void('bemani_summer_collabo')
IIDX21pc.add_child(bemani_summer_collabo)
bemani_summer_collabo.set_attribute('point', '290')
deller = Node.void('deller')
IIDX21pc.add_child(deller)
deller.set_attribute('deller', '150')
# Swap with server
resp = self.exchange('', call)
self.assert_path(resp, "response/pc")
def verify_music_reg(self, extid: int, lid: str, score: Dict[str, Any]) -> None:
call = self.call_node()
# Construct node
IIDX21music = Node.void('music')
call.add_child(IIDX21music)
IIDX21music.set_attribute('convid', '-1')
IIDX21music.set_attribute('iidxid', str(extid))
IIDX21music.set_attribute('pgnum', str(score['pgnum']))
IIDX21music.set_attribute('pid', '51')
IIDX21music.set_attribute('rankside', '1')
IIDX21music.set_attribute('cflg', str(score['clear_status']))
IIDX21music.set_attribute('method', 'reg')
IIDX21music.set_attribute('gnum', str(score['gnum']))
IIDX21music.set_attribute('clid', str(score['chart']))
IIDX21music.set_attribute('mnum', str(score['mnum']))
IIDX21music.set_attribute('is_death', '0')
IIDX21music.set_attribute('theory', '0')
IIDX21music.set_attribute('shopconvid', lid)
IIDX21music.set_attribute('mid', str(score['id']))
IIDX21music.set_attribute('shopflg', '1')
IIDX21music.add_child(Node.binary('ghost', bytes([1] * 64)))
# Swap with server
resp = self.exchange('', call)
self.assert_path(resp, "response/music/shopdata/@rank")
self.assert_path(resp, "response/music/ranklist/data")
def verify_music_appoint(self, extid: int, musicid: int, chart: int) -> Tuple[int, bytes]:
call = self.call_node()
# Construct node
IIDX21music = Node.void('music')
call.add_child(IIDX21music)
IIDX21music.set_attribute('clid', str(chart))
IIDX21music.set_attribute('method', 'appoint')
IIDX21music.set_attribute('ctype', '0')
IIDX21music.set_attribute('iidxid', str(extid))
IIDX21music.set_attribute('subtype', '')
IIDX21music.set_attribute('mid', str(musicid))
# Swap with server
resp = self.exchange('', call)
self.assert_path(resp, "response/music/mydata/@score")
return (
int(resp.child('music/mydata').attribute('score')),
resp.child_value('music/mydata'),
)
def verify_pc_reg(self, ref_id: str, card_id: str, lid: str) -> int:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
call.add_child(IIDX21pc)
IIDX21pc.set_attribute('lid', lid)
IIDX21pc.set_attribute('pid', '51')
IIDX21pc.set_attribute('method', 'reg')
IIDX21pc.set_attribute('cid', card_id)
IIDX21pc.set_attribute('did', ref_id)
IIDX21pc.set_attribute('rid', ref_id)
IIDX21pc.set_attribute('name', self.NAME)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/pc/@id")
self.assert_path(resp, "response/pc/@id_str")
return int(resp.child('pc').attribute('id'))
def verify_pc_playstart(self) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
IIDX21pc.set_attribute('method', 'playstart')
IIDX21pc.set_attribute('side', '1')
call.add_child(IIDX21pc)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/pc")
def verify_music_play(self, score: Dict[str, int]) -> None:
call = self.call_node()
# Construct node
IIDX21music = Node.void('music')
IIDX21music.set_attribute('opt', '64')
IIDX21music.set_attribute('clid', str(score['chart']))
IIDX21music.set_attribute('mid', str(score['id']))
IIDX21music.set_attribute('gnum', str(score['gnum']))
IIDX21music.set_attribute('cflg', str(score['clear_status']))
IIDX21music.set_attribute('pgnum', str(score['pgnum']))
IIDX21music.set_attribute('pid', '51')
IIDX21music.set_attribute('method', 'play')
call.add_child(IIDX21music)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/music/@clid")
self.assert_path(resp, "response/music/@crate")
self.assert_path(resp, "response/music/@frate")
self.assert_path(resp, "response/music/@mid")
def verify_pc_playend(self) -> None:
call = self.call_node()
# Construct node
IIDX21pc = Node.void('pc')
IIDX21pc.set_attribute('cltype', '0')
IIDX21pc.set_attribute('bookkeep', '0')
IIDX21pc.set_attribute('mode', '1')
IIDX21pc.set_attribute('method', 'playend')
call.add_child(IIDX21pc)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/pc")
def verify_music_breg(self, iidxid: int, score: Dict[str, int]) -> None:
call = self.call_node()
# Construct node
IIDX21music = Node.void('music')
IIDX21music.set_attribute('gnum', str(score['gnum']))
IIDX21music.set_attribute('iidxid', str(iidxid))
IIDX21music.set_attribute('mid', str(score['id']))
IIDX21music.set_attribute('method', 'breg')
IIDX21music.set_attribute('pgnum', str(score['pgnum']))
IIDX21music.set_attribute('cflg', str(score['clear_status']))
call.add_child(IIDX21music)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/music")
def verify_grade_raised(self, iidxid: int, shop_name: str, dantype: str) -> None:
call = self.call_node()
# Construct node
IIDX21grade = Node.void('grade')
IIDX21grade.set_attribute('opname', shop_name)
IIDX21grade.set_attribute('is_mirror', '0')
IIDX21grade.set_attribute('oppid', '51')
IIDX21grade.set_attribute('achi', '50')
IIDX21grade.set_attribute('cflg', '4' if dantype == 'sp' else '3')
IIDX21grade.set_attribute('gid', '5')
IIDX21grade.set_attribute('iidxid', str(iidxid))
IIDX21grade.set_attribute('gtype', '0' if dantype == 'sp' else '1')
IIDX21grade.set_attribute('is_ex', '0')
IIDX21grade.set_attribute('pside', '0')
IIDX21grade.set_attribute('method', 'raised')
call.add_child(IIDX21grade)
# Swap with server
resp = self.exchange('', call)
# Verify nodes that cause crashes if they don't exist
self.assert_path(resp, "response/grade/@pnum")
def verify(self, cardid: Optional[str]) -> None:
# Verify boot sequence is okay
self.verify_services_get(
expected_services=[
'pcbtracker',
'pcbevent',
'local',
'message',
'facility',
'cardmng',
'package',
'posevent',
'pkglist',
'dlstatus',
'eacoin',
'lobby',
'ntp',
'keepalive'
]
)
paseli_enabled = self.verify_pcbtracker_alive()
self.verify_package_list()
self.verify_message_get()
lid = self.verify_facility_get()
self.verify_pcbevent_put()
self.verify_shop_getname(lid)
self.verify_pc_common()
self.verify_music_crate()
self.verify_shop_getconvention(lid)
self.verify_ranking_getranker(lid)
self.verify_shop_sentinfo(lid)
# Verify card registration and profile lookup
if cardid is not None:
card = cardid
else:
card = self.random_card()
print("Generated random card ID {} for use.".format(card))
if cardid is None:
self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled)
ref_id = self.verify_cardmng_getrefid(card)
if len(ref_id) != 16:
raise Exception('Invalid refid \'{}\' returned when registering card'.format(ref_id))
if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
self.verify_pc_reg(ref_id, card, lid)
self.verify_pc_get(ref_id, card, lid)
else:
print("Skipping new card checks for existing card")
ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled)
# Verify pin handling and return card handling
self.verify_cardmng_authpass(ref_id, correct=True)
self.verify_cardmng_authpass(ref_id, correct=False)
if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
if cardid is None:
# Verify score handling
profile = self.verify_pc_get(ref_id, card, lid)
if profile['sp_dan'] != -1:
raise Exception('Somehow has SP DAN ranking on new profile!')
if profile['dp_dan'] != -1:
raise Exception('Somehow has DP DAN ranking on new profile!')
if profile['deller'] != 0:
raise Exception('Somehow has deller on new profile!')
scores = self.verify_music_getrank(profile['extid'])
if len(scores.keys()) > 0:
raise Exception('Somehow have scores on a new profile!')
for phase in [1, 2]:
if phase == 1:
dummyscores = [
# An okay score on a chart
{
'id': 1000,
'chart': 2,
'clear_status': 4,
'pgnum': 123,
'gnum': 123,
'mnum': 5,
},
# A good score on an easier chart of the same song
{
'id': 1000,
'chart': 0,
'clear_status': 7,
'pgnum': 246,
'gnum': 0,
'mnum': 0,
},
# A bad score on a hard chart
{
'id': 1003,
'chart': 2,
'clear_status': 1,
'pgnum': 10,
'gnum': 20,
'mnum': 50,
},
# A terrible score on an easy chart
{
'id': 1003,
'chart': 0,
'clear_status': 1,
'pgnum': 2,
'gnum': 5,
'mnum': 75,
},
]
if phase == 2:
dummyscores = [
# A better score on the same chart
{
'id': 1000,
'chart': 2,
'clear_status': 5,
'pgnum': 234,
'gnum': 234,
'mnum': 3,
},
# A worse score on another same chart
{
'id': 1000,
'chart': 0,
'clear_status': 4,
'pgnum': 123,
'gnum': 123,
'mnum': 35,
'expected_clear_status': 7,
'expected_ex_score': 492,
'expected_miss_count': 0,
},
]
for dummyscore in dummyscores:
self.verify_music_reg(profile['extid'], lid, dummyscore)
self.verify_pc_visit(profile['extid'], lid)
self.verify_pc_save(profile['extid'], card, lid)
scores = self.verify_music_getrank(profile['extid'])
for score in dummyscores:
data = scores.get(score['id'], {}).get(score['chart'], None)
if data is None:
raise Exception('Expected to get score back for song {} chart {}!'.format(score['id'], score['chart']))
if 'expected_ex_score' in score:
expected_score = score['expected_ex_score']
else:
expected_score = (score['pgnum'] * 2) + score['gnum']
if 'expected_clear_status' in score:
expected_clear_status = score['expected_clear_status']
else:
expected_clear_status = score['clear_status']
if 'expected_miss_count' in score:
expected_miss_count = score['expected_miss_count']
else:
expected_miss_count = score['mnum']
if data['ex_score'] != expected_score:
raise Exception('Expected a score of \'{}\' for song \'{}\' chart \'{}\' but got score \'{}\''.format(
expected_score, score['id'], score['chart'], data['ex_score'],
))
if data['clear_status'] != expected_clear_status:
raise Exception('Expected a clear status of \'{}\' for song \'{}\' chart \'{}\' but got clear status \'{}\''.format(
expected_clear_status, score['id'], score['chart'], data['clear_status'],
))
if data['miss_count'] != expected_miss_count:
raise Exception('Expected a miss count of \'{}\' for song \'{}\' chart \'{}\' but got miss count \'{}\''.format(
expected_miss_count, score['id'], score['chart'], data['miss_count'],
))
# Verify we can fetch our own ghost
ex_score, ghost = self.verify_music_appoint(profile['extid'], score['id'], score['chart'])
if ex_score != expected_score:
raise Exception('Expected a score of \'{}\' for song \'{}\' chart \'{}\' but got score \'{}\''.format(
expected_score, score['id'], score['chart'], data['ex_score'],
))
if len(ghost) != 64:
raise Exception('Wrong ghost length {} for ghost!'.format(len(ghost)))
for g in ghost:
if g != 0x01:
raise Exception('Got back wrong ghost data for song \'{}\' chart \'{}\''.format(score['id'], score['chart']))
# Sleep so we don't end up putting in score history on the same second
time.sleep(1)
# Verify that a player without a card can play
self.verify_pc_playstart()
self.verify_music_play({
'id': 1000,
'chart': 2,
'clear_status': 4,
'pgnum': 123,
'gnum': 123,
})
self.verify_pc_playend()
# Verify shop name change setting
self.verify_shop_savename(lid, 'newname1')
newname = self.verify_shop_getname(lid)
if newname != 'newname1':
raise Exception('Invalid shop name returned after change!')
self.verify_shop_savename(lid, 'newname2')
newname = self.verify_shop_getname(lid)
if newname != 'newname2':
raise Exception('Invalid shop name returned after change!')
# Verify beginner score saving
self.verify_music_breg(profile['extid'], {
'id': 1000,
'clear_status': 4,
'pgnum': 123,
'gnum': 123,
})
scores = self.verify_music_getrank(profile['extid'])
if 1000 not in scores:
raise Exception('Didn\'t get expected scores back for song {} beginner chart!'.format(1000))
if 6 not in scores[1000]:
raise Exception('Didn\'t get beginner score back for song {}!'.format(1000))
if scores[1000][6] != {'clear_status': 4, 'ex_score': -1, 'miss_count': -1}:
raise Exception('Didn\'t get correct status back from beginner save!')
# Verify DAN score saving and loading
self.verify_grade_raised(profile['extid'], newname, 'sp')
self.verify_grade_raised(profile['extid'], newname, 'dp')
profile = self.verify_pc_get(ref_id, card, lid)
if profile['sp_dan'] != 5:
raise Exception('Got wrong DAN score back for SP!')
if profile['dp_dan'] != 5:
raise Exception('Got wrong DAN score back for DP!')
else:
print("Skipping score checks for existing card")
# Verify paseli handling
if paseli_enabled:
print("PASELI enabled for this PCBID, executing PASELI checks")
else:
print("PASELI disabled for this PCBID, skipping PASELI checks")
return
sessid, balance = self.verify_eacoin_checkin(card)
if balance == 0:
print("Skipping PASELI consume check because card has 0 balance")
else:
self.verify_eacoin_consume(sessid, balance, random.randint(0, balance))
self.verify_eacoin_checkout(sessid)
| [
"dragonminded@dragonminded.com"
] | dragonminded@dragonminded.com |
5c50e2dbdf7f07019552bfd372b5eb6d587ec5aa | 7c2cb2a564b4401c78f1591f6afdb6001b667357 | /13_force_fields_and_classical_md/4_mm_md/2_atomistic_md/2_Au_slab/case5/run_aa_md_state.py | fd1565ef26aa9a5b9f4c207006f672f8fb3333d8 | [
"CC0-1.0"
] | permissive | compchem-cybertraining/Tutorials_Libra | 9711d06f23d0017fc1cbc25a223e327e9584da96 | 6b004fea7b051b0060820facc7e884b377a32343 | refs/heads/master | 2023-07-06T18:24:07.237364 | 2023-07-02T23:57:33 | 2023-07-02T23:57:33 | 248,611,599 | 14 | 20 | CC0-1.0 | 2023-06-11T16:18:26 | 2020-03-19T21:48:10 | Jupyter Notebook | UTF-8 | Python | false | false | 4,696 | py | #*********************************************************************************
#* Copyright (C) 2016-2021 Alexey V. Akimov
#*
#* This file is distributed under the terms of the GNU General Public License
#* as published by the Free Software Foundation, either version 3 of
#* the License, or (at your option) any later version.
#* See the file LICENSE in the root directory of this distribution
#* or <http://www.gnu.org/licenses/>.
#*
#*********************************************************************************/
###################################################################
# This is a classical all-atomic MD
###################################################################
import sys
import cmath
import math
import os
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
from libra_py import *
def main():
rnd = Random()
#--------------------- Initialization ----------------------
# Create Universe and populate it
U = Universe(); LoadPT.Load_PT(U, os.getcwd()+"/elements.txt")
# Create force field
# uff = ForceField({"mb_functional":"LJ_Coulomb","R_vdw_on":40.0,"R_vdw_off":55.0 }) # this can not be used for PBC
uff = ForceField({"R_vdw_on":10.0,"R_vdw_off":12.0, "mb_functional":"vdw_LJ1","mb_excl_functional":"vdw_LJ1"})
LoadUFF.Load_UFF(uff,"uff.dat")
# Create molecular system and initialize the properties
syst = System()
LoadMolecule.Load_Molecule(U, syst, "au.pdb", "true_pdb")
syst.determine_functional_groups(0) # do not assign rings
syst.init_fragments()
print("Number of atoms in the system = ", syst.Number_of_atoms)
print("Number of bonds in the system = ", syst.Number_of_bonds)
print("Number of angles in the system = ", syst.Number_of_angles)
print("Number of dihedrals in the system = ", syst.Number_of_dihedrals)
print("Number of impropers in the system = ", syst.Number_of_impropers)
atlst1 = list(range(1,syst.Number_of_atoms+1))
T1 = VECTOR(32.6970772436, 0.0, 0.0)
T2 = VECTOR(16.3485386218, 28.3164995224, 0.0)
T3 = VECTOR(0.0, 0.0, 26.6970517757)
syst.init_box(T1, T2, T3)
# Creating Hamiltonian and initialize it
ham = Hamiltonian_Atomistic(1, 3*syst.Number_of_atoms)
ham.set_Hamiltonian_type("MM")
ham.set_interactions_for_atoms(syst, atlst1, atlst1, uff, 1, 0) # 0 - verb, 0 - assign_rings
ham.show_interactions_statistics()
# Bind Hamiltonian and the system
ham.set_system(syst); ham.compute(); print("Energy = ", ham.H(0,0), " a.u.")
# Electronic DOFs
el = Electronic(1,0)
# Nuclear DOFs
mol = Nuclear(3*syst.Number_of_atoms)
# Initialize MD variables
nve_md.nve_md_init(syst, mol, el, ham)
#=================== Propagation ====================
########################## Cooling #################################
md = MD({"max_step":10,"ensemble":"NPT","integrator":"DLML","terec_exp_size":10,"dt":20.0,"n_medium":1,"n_fast":1,"n_outer":1})
md.show_info()
# Thermostat
therm = Thermostat({"Temperature":278.0,"Q":100.0,"thermostat_type":"Nose-Hoover","nu_therm":0.001,"NHC_size":1})
therm.show_info()
# Barostat
baro = Barostat({"W":10000.0,"Pressure":1.0,"nu_baro":0.001})
baro.show_info()
ST = State()
ST.set_system(syst)
ST.set_thermostat(therm)
ST.set_barostat(baro)
ST.set_md(md)
ST.init_md(mol, el, ham, rnd)
f = open("_en_cooling.txt","w")
f.close()
for i in range(10):
syst.set_atomic_q(mol.q)
syst.print_xyz("_mol_cooling.xyz",i)
ST.run_md(mol, el, ham)
ekin = ST.E_kin; epot = ST.E_pot
ST.cool()
f = open("_en_cooling.txt","a")
f.write("i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f H_NP= %8.5f curr_T= %8.5f\n" % (i, ekin, epot, ST.E_tot, ST.H_NP, ST.curr_T ))
f.close()
########################## Production MD #################################
syst.init_atom_velocities(300.0, rnd) # must be this !!!
# syst.init_fragment_velocities(300.0, rnd)
f = open("_en_md.txt","w")
f.close()
md.dt = 40.0
md.max_step = 10
for i in range(1000):
syst.set_atomic_q(mol.q)
syst.print_xyz("_mol_md.xyz",i)
ST.run_md(mol, el, ham)
f = open("_en_md.txt","a")
f.write("i= %3i ekin= %8.5f epot= %8.5f etot= %8.5f H_NP= %8.5f curr_T= %8.5f\n" % (i, ST.E_kin, ST.E_pot, ST.E_tot, ST.H_NP, ST.curr_T ))
f.close()
main()
| [
"alexvakimov@gmail.com"
] | alexvakimov@gmail.com |
81b071b194eaf7cf41f099b4b2ae69ce0fdb01f6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03168/s984146914.py | d29e8d5cf735e05031e772be32479e5b3e032425 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | n = int(input())
ns = list(map(float, input().split()))
dp = [[0 for j in range(n+1)] for i in range(n+1)]
dp[0][0] = 1
for i in range(1,n+1):
for j in range(0, i + 1):
dp[i][j] = dp[i-1][j] * (1-ns[i-1]) + dp[i-1][j-1] * ns[i-1]
print(sum(dp[-1][(n+1)//2:])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
bd9cc1a5c9620aa8b26ec5cdc7435b3997c6cc3c | e4ce5db8f1c6553cd57754220af8927c09a1e060 | /translate_quest_yandextr.py | 4a9bfb292b9a74f2dc828d1e50a55d5bc06a3b66 | [] | no_license | brouhahaha/AutoSweData_LexTyp | 319c510fc4fd931c5d5d0276f5997a7e7f92e0d1 | c5dac050383c421c52e7c44012efc1b818d29084 | refs/heads/master | 2020-05-27T07:42:58.586884 | 2019-05-25T07:51:47 | 2019-05-25T07:51:47 | 188,534,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | import requests
import json
import xlrd
def yandex_translate_single(to_translate, source_language, target_language, reverse=False):
"""
Обёртка яндексового апи
:param to_translate:
:param source_language:
:param target_language:
:param reverse:
:return:
"""
if reverse:
source_language, target_language = target_language, source_language
language_direction = f"{source_language}-{target_language}"
translate_params = {'key': 'trnsl.1.1.20190303T082102Z.f7e151ab791be4f8.b3803e2ac222bee403a5c1e67682be8e9a682065',
'text': to_translate,
'lang': language_direction}
translate_request = requests.get('https://translate.yandex.net/api/v1.5/tr.json/translate', translate_params)
translation_data = json.loads(translate_request.text)
return translation_data['text'][0]
def get_words(filename):
data_to_tr = {'nouns': [], 'adjectives' : []}
rb = xlrd.open_workbook(filename) #'questionnaire_size.xlsx'
sheet = rb.sheet_by_index(2)
for rownum in range(sheet.nrows):
row = sheet.row_values(rownum)
for c_el in row:
if rownum == 0 and c_el != '':
data_to_tr['adjectives'].append(c_el)
elif c_el != '':
data_to_tr['nouns'].append(c_el)
return data_to_tr
def translate_some_words(words):
translations = []
for word in words:
translations.append(yandex_translate_single(word, 'en', 'sv'))
return translations
def new_questionnire(data_to_tr):
new_questionnaire = {}
new_questionnaire['nouns'] = translate_some_words(data_to_tr['nouns'])
new_questionnaire['adjectives'] = translate_some_words(data_to_tr['adjectives'])
return new_questionnaire
data = get_words('анкеты на английском.xlsx')
new = new_questionnire(data)
print(new)
| [
"noreply@github.com"
] | brouhahaha.noreply@github.com |
7e278af269e00141e916b6dc210047193bcf6c78 | b7815ea6afd45613264f2aa65891eb073988a44f | /factorial.py | e732784c41b7a3a47a2124432eacaaadbb96346a | [] | no_license | CHAHAT-10/MINOR_PROGRAMS | 6bd1c60f59d4e884a6cf58c5156ac305d34eea11 | dfdbe46b8f436cacbb52f7322b8dd528078be764 | refs/heads/main | 2023-05-27T00:15:55.934209 | 2021-05-27T09:02:39 | 2021-05-27T09:02:39 | 371,307,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | num = int(input("enter a number"))
factorial = 1
if num < 0:
print("print sorry factorial does not exist for negative numbers")
elif num == 0:
print("the factorial of 0 is 1")
else:
for i in range(1,num + 1):
factorial = factorial*i
print("the factorial of",num,"is",factorial) | [
"noreply@github.com"
] | CHAHAT-10.noreply@github.com |
f8ac4729b75813f0d45b9b4d76d44d6abf8465a5 | 4742c0a1531cfd87bee4f6e4fd61b382ddd18d0c | /ML/JX/RecommendationSystem/gram_matrix.py | 6db234145de2cdf752496efa73172007d62ea831 | [] | no_license | 9321ass2/Assignment2 | 11601340afc2034a3884ed455829200fa0a45d2a | d065296f76875e542e9255202bad8b8cc123520a | refs/heads/master | 2022-07-14T22:48:40.571536 | 2019-11-24T07:21:28 | 2019-11-24T07:21:28 | 220,911,487 | 1 | 2 | null | 2021-05-06T19:45:57 | 2019-11-11T05:58:27 | CSS | UTF-8 | Python | false | false | 2,255 | py | import numpy as np
import pandas as pd
import sys, os
import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def ESRB_cal(x, y):
ESRB_dict = {'Unknown': 0, 'KA': -1, 'T': 13, 'E': -
1, 'AO': 18, 'EC': 3, 'M': 17, 'RP': -1, 'E10': 10}
ESRB_all_age_list = ['Unknown', 'KA', 'E', 'EC', 'RP']
if x in ESRB_all_age_list and y in ESRB_all_age_list:
return 1
else:
temp = abs(ESRB_dict[x] - ESRB_dict[y])
return 1/temp if temp != 0 else 1
def relevance_score_cal():
gram_matrix = np.zeros((3000, 3000))
pg = pd.read_csv('../Data_Visualize/KNN.csv')
pg = pg.head(3000)
for outer in pg.itertuples():
ORK = int(outer[1])
OGE = outer[3]
OES = outer[4]
OPL = outer[5]
OSC = (outer[8], outer[9])
if OSC[0] * OSC[1] == 0:
if OSC[0]+OSC[1] == 0:
OSC = 0
else:
OSC = OSC[0] if OSC[1] == 0 else OSC[1]
else:
OSC = (OSC[0]+OSC[1])/2
OSA = outer[10]
OYE = outer[15]
for inner in pg.itertuples():
IRK = int(inner[1])
IGE = inner[3]
IES = inner[4]
IPL = inner[5]
ISC = (inner[8], inner[9])
if ISC[0] * ISC[1] == 0:
if ISC[0]+ISC[1] == 0:
ISC = 0
else:
ISC = ISC[0] if ISC[1] == 0 else ISC[1]
else:
ISC = (ISC[0]+ISC[1])/2
ISA = inner[10]
IYE = inner[15]
GE = 1 if OGE == IGE else 0
ES = ESRB_cal(OES, IES)
PL = 1 if OPL == IPL else 0.8
SC = sigmoid(1/abs(OSC-ISC)) if OSC != ISC else 1
temp = abs(OSA-ISA)
SA = 1 if temp<10 else 0
YE = 1/abs(OYE-IYE) if OYE != IYE else 1
relevance_score = GE*ES*PL*SC*SA*YE
gram_matrix[ORK-1, IRK-1] = relevance_score
print('Calculation now proceeding...', ORK)
return gram_matrix
final_matrix = relevance_score_cal()
final_matrix = pd.DataFrame(final_matrix)
final_matrix.to_csv('../DataSet/relevance_matrix.csv')
| [
"noreply@github.com"
] | 9321ass2.noreply@github.com |
8b19b20368b93de5e41cabbcfb10f95c3cc21443 | f01fedea632515c6e523939689081e18f8b45699 | /misc_workflow/run_gtools_make_matrices.py | e4919a8c015a54f743853e7342b569b23d860f55 | [] | no_license | WeiSong-bio/Bioinformatics--------------- | 9c6e7a05e9234b03b9471421e8901c7ff65f7dfe | a7ab7b3536bb8cc7fbe902d854694c769a93170c | refs/heads/master | 2020-06-16T01:01:24.602945 | 2018-07-25T22:22:41 | 2018-07-25T22:22:41 | 195,438,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,567 | py | #/usr/bin/env python
'''
This script will run gtools in order to generate coverage matrices for a large number of bedgraphs with corresponding region BED files
'''
import os
import sys
import csv
import toolbox as tb # my custom functions
class projectLocs:
'''
An object class to hold locations of places in the project
'''
def __init__(self):
self.project_dir = "/ifs/home/kellys04/projects/Smithlab_ChIpSeq_2016-03-10/project_notes/methylation_profiles"
self.bedgraphs_dir = os.path.join(self.project_dir, "bedgraphs")
self.regions_dir = os.path.join(self.project_dir, "sample_gene_TSS_5Kbp_regions")
self.matrix_dir = os.path.join(self.project_dir, "matrices")
self.matrix_logdir = tb.mkdir_p(os.path.join(self.matrix_dir, "logs"), return_path=True)
self.samplesheet = os.path.join(self.project_dir, "microarray_methlyation_samplesheet_3.tsv")
def file_match(dir, start_pattern = '', end_pattern = '', contains_pattern = ''):
'''
Find a file in a dir which matches the supplied patterns
NOTE: Doesn't search recursively!
'''
file_match = []
for file in os.listdir(dir):
if file.startswith(start_pattern) and file.endswith(end_pattern) and contains_pattern in file:
file_match.append(os.path.join(dir, file))
return file_match
def samplesheet2dict(samplesheet_file, sample_colname, sep = '\t'):
'''
Create a nested dict for each sample in a standard TSV samplesheet with headers
NOTE: If duplicate column headers exist, latter will overwrite former
'''
import csv
sample_files_dict = {}
with open(samplesheet_file) as csvfile:
reader = csv.DictReader(csvfile, delimiter = sep)
for row_dict in reader:
sample_files_dict[row_dict[sample_colname]] = row_dict
return sample_files_dict
def print_samplesheet_dict(samplesheet_dict):
'''
Pretty printing for a samplesheet dict so its more readable on the console
'''
for key, value in samplesheet_dict.iteritems():
print key
for subkey, subvalue in value.iteritems():
print subkey,':', subvalue
print "\n"
# sample sheet looks like this:
# Sample_Name Methylation_name Methylation_name_D Methylation_name_R Microarray_name_D Microarray_name_R Microarray_name_LR D_Status R_Status Microarray_name_D Microarray_name_R Microarray_name_LR
# AGK PAPAGK PAPAGK.Diagnosis PAPAGK.Relapse AGK-D AGK-R AGK-LR D R AGK-Exp-D AGK-Exp-R AGK-Exp-LR
# create object to hold the project locations
proj_locs = projectLocs()
# create the samplesheet dict
samplesheet_dict = samplesheet2dict(samplesheet_file = proj_locs.samplesheet, sample_colname = 'Sample_Name')
# print it to the console
# print_samplesheet_dict(samplesheet_dict)
# samplesheet IDs whose values match filenames
regions_r_ID_pattern = 'Microarray_name_R' # SPN-R
regions_d_ID_pattern = 'Microarray_name_D'
methyl_r_ID_pattern = 'Methylation_name_R' # PAPSPN.Relapse
methyl_d_ID_pattern = 'Methylation_name_D'
# filename patterns
regions_top_pattern = 'Top_expressed_genes.bed'
regions_bottom_pattern = 'Bottom_expressed_genes.bed'
bedgraph_pattern = '.bedgraph'
# make lists of the items from above to iterate over
region_IDs = [regions_r_ID_pattern, regions_d_ID_pattern]
region_expressions = [regions_top_pattern, regions_bottom_pattern]
methyl_IDs = [methyl_r_ID_pattern, methyl_d_ID_pattern]
def qsub_gtools_matrix(samplesheet_dict, proj_locs, region_IDs, region_expressions, methyl_IDs, bedgraph_pattern):
'''
Submit a qsub job to run every gtools matrix on the combinations of bedgraphs and region files
'''
# parameters for qsub job
job_threads = "1"
job_mem = "4G"
job_options = "-j y" # merge stderr and stdout # job_options="-l mem_free=$job_mem -l h_vmem=$job_mem -l mem_token=$job_mem"
for sampleID, items in samplesheet_dict.iteritems():
print(sampleID)
for region_ID in region_IDs:
print(samplesheet_dict[sampleID][region_ID])
for region_expression in region_expressions:
print(region_expression)
for methyl_ID in methyl_IDs:
print(samplesheet_dict[sampleID][methyl_ID])
# find the BED file for the combination of Sample + genes expression
regions_file = file_match(proj_locs.regions_dir, start_pattern = samplesheet_dict[sampleID][region_ID], end_pattern = region_expression)[0]
# find the bedgraph file with the values from the methylation analysis
bedgraph_file = file_match(proj_locs.bedgraphs_dir, start_pattern = samplesheet_dict[sampleID][methyl_ID], end_pattern = bedgraph_pattern)[0]
# set up the output file naming scheme
output_file_base = '{}_{}_{}'.format(
samplesheet_dict[sampleID][region_ID],
region_expression,
samplesheet_dict[sampleID][methyl_ID]
)
output_file_basename = '{}.matrix'.format(output_file_base)
#
output_file = os.path.join(proj_locs.matrix_dir, output_file_basename)
# the command to run gtools
gtools_command = '''
set -x
head "{}"
head "{}"
gtools-threaded matrix -v -i -o {} -i -nbins 51 --overlap-op value -rpkm -profile sum {} {}
head "{}"
'''.format(bedgraph_file,
regions_file,
output_file,
bedgraph_file,
regions_file,
output_file)
# the command to submit gtools to the cluster; REQUIRES BASH !
qsub_command = '''
mkdir -p "{}" # make sure the log dir exists
qsub -wd "{}" -o :{}/ -e :{}/ -pe threaded {} -N "{}" {} <<E0F
{}
E0F
'''.format(
proj_locs.matrix_logdir,
proj_locs.project_dir,
proj_locs.matrix_logdir, proj_locs.matrix_logdir,
job_threads,
output_file_base,
job_options,
gtools_command)
#
# print(qsub_command)
# tb.my_debugger(globals().copy())
tb.subprocess_cmd(qsub_command)
# run the gtools qsub functions
qsub_gtools_matrix(samplesheet_dict, proj_locs, region_IDs, region_expressions, methyl_IDs, bedgraph_pattern)
sys.exit()
| [
"noreply@github.com"
] | WeiSong-bio.noreply@github.com |
54d8f05ce63ea914d42dba82847ed43f09ac4a41 | 43c99742cdedc23166ad0d7aeeb29b453826768f | /datensparsam/libs/pdf/reportlab_adaptor.py | 44b12f737c2c4caa53409e997013d971e07cd605 | [
"MIT"
] | permissive | mtub/datensparsam | 1173dd9e8f5748aeb9b99d27d9427bba990a4ec0 | 7f746aaae0c384814a52f873d298dee4a2bcca3e | refs/heads/master | 2021-01-18T07:46:45.963652 | 2013-03-08T13:09:06 | 2013-03-08T13:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
class SimplePdf(object):
''' Provides functionality to create simple PDF documents '''
__story = []
__styles = getSampleStyleSheet()
__doc = None
def __init__(self, buff):
self.__doc = SimpleDocTemplate(
buff,
pagesize=letter,
rightMargin=72,
leftMargin=72,
topMargin=72,
bottomMargin=18)
def add_address(self, address):
for part in address:
ptext = '<font size=12>%s</font>' % part.strip()
self.__story.append(Paragraph(ptext, self.__styles["Normal"]))
self._add_spacer(24)
def add_heading(self, heading):
self._add_spacer(48)
self.__story.append(Paragraph(heading, self.__styles["Heading1"]))
self._add_spacer(24)
def add_bulleted_paragraph(self, paragraph):
if paragraph:
self._add_spacer(12)
self.__story.append(Paragraph(paragraph, self.__styles["Normal"], bulletText='-'))
else:
pass
def add_paragraph(self, paragraph, spacer):
if paragraph:
self._add_spacer(spacer)
self.__story.append(Paragraph(paragraph, self.__styles["Normal"]))
else:
pass
def make(self):
self.__doc.build(self.__story)
def _add_spacer(self, height):
self.__story.append(Spacer(1, height))
| [
"jbspeakr@funkblocka.de"
] | jbspeakr@funkblocka.de |
4e9a5e468a1a134f27cf4d23f2697ca995213d16 | 50c4d539719994c376bfef42d677a27c34c3b241 | /Wk6_Hmwk/api_keys.py | dcf600fa5ed94ecfc577fa82604605fefa3f918f | [] | no_license | chell0011/Iban_DataBootCamp_HW | 14799da96609e10ca7569263a4e77a9df20449d0 | a301b4d9221ad6ce8b493db064828c615d210641 | refs/heads/master | 2020-06-01T20:39:23.196207 | 2019-08-24T21:55:07 | 2019-08-24T21:55:07 | 190,919,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | # OpenWeatherMap API Key
api_key = "0ce87a1e44d5c82ea424167e377cda2e"
| [
"chell0011@gmail.com"
] | chell0011@gmail.com |
37cb1a55bf2943e3cb7f46d64c52de8169b903f0 | 11459f6d05c7091537570a63bb2654b3d3a5440c | /mlprodict/plotting/plotting_validate_graph.py | f30922b8ca4cda3cd369f447e8fb26caa3a256dd | [
"MIT"
] | permissive | xhochy/mlprodict | 663e6ac21ae913d5dff59cfab938fc62f59d1c79 | 22ab05af3ff4a67f4390af1c44e349efa454f0d5 | refs/heads/master | 2023-08-11T00:53:24.133020 | 2021-09-24T15:52:46 | 2021-09-24T15:52:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,540 | py | """
@file
@brief Functions to help visualizing performances.
"""
import numpy
import pandas
def _model_name(name):
"""
Extracts the main component of a model, removes
suffixes such ``Classifier``, ``Regressor``, ``CV``.
@param name string
@return shorter string
"""
if name.startswith("Select"):
return "Select"
if name.startswith("Nu"):
return "Nu"
modif = 1
while modif > 0:
modif = 0
for suf in ['Classifier', 'Regressor', 'CV', 'IC',
'Transformer']:
if name.endswith(suf):
name = name[:-len(suf)]
modif += 1
return name
def plot_validate_benchmark(df):
"""
Plots a graph which summarizes the performances of a benchmark
validating a runtime for :epkg:`ONNX`.
@param df output of function @see fn summary_report
@return fig, ax
.. plot::
from logging import getLogger
from pandas import DataFrame
import matplotlib.pyplot as plt
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets, summary_report
from mlprodict.tools.plotting import plot_validate_benchmark
logger = getLogger('skl2onnx')
logger.disabled = True
rows = list(enumerate_validated_operator_opsets(
verbose=0, models={"LinearRegression"}, opset_min=11,
runtime=['python', 'onnxruntime1'], debug=False,
benchmark=True, n_features=[None, 10]))
df = DataFrame(rows)
piv = summary_report(df)
fig, ax = plot_validate_benchmark(piv)
plt.show()
"""
import matplotlib.pyplot as plt
if 'n_features' not in df.columns:
df["n_features"] = numpy.nan # pragma: no cover
if 'runtime' not in df.columns:
df['runtime'] = '?' # pragma: no cover
fmt = "{} [{}-{}|{}] D{}"
df["label"] = df.apply(
lambda row: fmt.format(
row["name"], row["problem"], row["scenario"],
row['optim'], row["n_features"]).replace("-default|", "-**]"), axis=1)
df = df.sort_values(["name", "problem", "scenario", "optim",
"n_features", "runtime"],
ascending=False).reset_index(drop=True).copy()
indices = ['label', 'runtime']
values = [c for c in df.columns
if 'N=' in c and '-min' not in c and '-max' not in c]
try:
df = df[indices + values]
except KeyError as e: # pragma: no cover
raise RuntimeError(
"Unable to find the following columns {}\nin {}".format(
indices + values, df.columns)) from e
if 'RT/SKL-N=1' not in df.columns:
raise RuntimeError( # pragma: no cover
"Column 'RT/SKL-N=1' is missing, benchmark was probably not run.")
na = df["RT/SKL-N=1"].isnull()
dfp = df[~na]
runtimes = list(sorted(set(dfp['runtime'])))
final = None
for rt in runtimes:
sub = dfp[dfp.runtime == rt].drop('runtime', axis=1).copy()
col = list(sub.columns)
for i in range(1, len(col)):
col[i] += "__" + rt
sub.columns = col
if final is None:
final = sub
else:
final = final.merge(sub, on='label', how='outer')
# let's add average and median
ncol = (final.shape[1] - 1) // len(runtimes)
if len(runtimes) + 1 > final.shape[0]:
dfp_legend = final.iloc[:len(runtimes) + 1, :].copy()
while dfp_legend.shape[0] < len(runtimes) + 1:
dfp_legend = pandas.concat([dfp_legend, dfp_legend[:1]])
else:
dfp_legend = final.iloc[:len(runtimes) + 1, :].copy()
rleg = dfp_legend.copy()
dfp_legend.iloc[:, 1:] = numpy.nan
rleg.iloc[:, 1:] = numpy.nan
for r, runt in enumerate(runtimes):
sli = slice(1 + ncol * r, 1 + ncol * r + ncol)
cm = final.iloc[:, sli].mean().values
dfp_legend.iloc[r + 1, sli] = cm
rleg.iloc[r, sli] = final.iloc[:, sli].median()
dfp_legend.iloc[r + 1, 0] = "avg_" + runt
rleg.iloc[r, 0] = "med_" + runt
dfp_legend.iloc[0, 0] = "------"
rleg.iloc[-1, 0] = "------"
# sort
final = final.sort_values('label', ascending=False).copy()
# add global statistics
final = pandas.concat([rleg, final, dfp_legend]).reset_index(drop=True)
# graph beginning
total = final.shape[0] * 0.45
fig, ax = plt.subplots(1, len(values), figsize=(14, total),
sharex=False, sharey=True)
x = numpy.arange(final.shape[0])
subh = 1.0 / len(runtimes)
height = total / final.shape[0] * (subh + 0.1)
decrt = {rt: height * i for i, rt in enumerate(runtimes)}
colors = {rt: c for rt, c in zip(
runtimes, ['blue', 'orange', 'cyan', 'yellow'])}
# draw lines between models
vals = final.iloc[:, 1:].values.ravel()
xlim = [min(0.5, min(vals)), max(2, max(vals))]
while i < final.shape[0] - 1:
i += 1
label = final.iloc[i, 0]
if '[' not in label:
continue
prev = final.iloc[i - 1, 0]
if '[' not in label:
continue # pragma: no cover
label = label.split()[0]
prev = prev.split()[0]
if _model_name(label) == _model_name(prev):
continue
blank = final.iloc[:1, :].copy()
blank.iloc[0, 0] = '------'
blank.iloc[0, 1:] = xlim[0]
final = pandas.concat([final[:i], blank, final[i:]])
i += 1
final = final.reset_index(drop=True).copy()
x = numpy.arange(final.shape[0])
done = set()
for c in final.columns[1:]:
place, runtime = c.split('__')
if hasattr(ax, 'shape'):
index = values.index(place)
if (index, runtime) in done:
raise RuntimeError( # pragma: no cover
"Issue with column '{}'\nlabels={}\nruntimes={}\ncolumns="
"{}\nvalues={}\n{}".format(
c, list(final.label), runtimes, final.columns, values, final))
axi = ax[index]
done.add((index, runtime))
else:
if (0, runtime) in done: # pragma: no cover
raise RuntimeError(
"Issue with column '{}'\nlabels={}\nruntimes={}\ncolumns="
"{}\nvalues={}\n{}".format(
c, final.label, runtimes, final.columns, values, final))
done.add((0, runtime)) # pragma: no cover
axi = ax # pragma: no cover
if c in final.columns:
yl = final.loc[:, c]
xl = x + decrt[runtime] / 2
axi.barh(xl, yl, label=runtime, height=height,
color=colors[runtime])
axi.set_title(place)
def _plot_axis(axi, x, xlim):
axi.plot([1, 1], [0, max(x)], 'g-')
axi.plot([2, 2], [0, max(x)], 'r--')
axi.set_xlim(xlim)
axi.set_xscale('log')
axi.set_ylim([min(x) - 2, max(x) + 1])
def _plot_final(axi, x, final):
axi.set_yticks(x)
axi.set_yticklabels(final['label'])
if hasattr(ax, 'shape'):
for i in range(len(ax)): # pylint: disable=C0200
_plot_axis(ax[i], x, xlim)
ax[min(ax.shape[0] - 1, 2)].legend()
_plot_final(ax[0], x, final)
else: # pragma: no cover
_plot_axis(ax, x, xlim)
_plot_final(ax, x, final)
ax.legend()
fig.subplots_adjust(left=0.25)
return fig, ax
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
659eeb3dcf4964f4ff9fedc3132d59bd627f00c4 | 22c8fcbf0a321cd5a20dd5bf583067c31c6747ef | /blackjack_function.py | d643fc2355b3f8d5ce8809c86d7266f2db55ac61 | [] | no_license | lizoodharo/lizo-test-repo | 5646264d30ce529dd5ae6461ef78880a34b5bc87 | 8f463533529a52b9eee8694b3c1dff2e11902f3f | refs/heads/master | 2020-03-30T04:26:09.600728 | 2019-02-05T20:49:01 | 2019-02-05T20:49:01 | 150,742,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,617 | py |
import random
def blackjack():
black_jack = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10]
card_type = ['of spades', ' of diamonds', ' of hearts', ' of clubs']
specialTen_cards = ["Jack", "King", "Queen"]
House_favor = [1,2,3,3,4,5,6,7,8,9,10]
User_favor = [1, 1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10]
#bet = bet_ammount#<------- if statement where difficulty changes if bet is greater than 1000
#Draw CPU 1st card. Don't show
CPU_1stCard = random.choice(black_jack)
CPU_1stCard_type = random.choice(card_type)
print "| Dealer: Face down card"
#Draw CPU 2nd card & show
CPU_2ndCard = random.choice(black_jack)
CPU_2ndCard_type = random.choice(card_type)
if CPU_2ndCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "| Dealer Draws: ", Rand_TenCard, CPU_2ndCard_type
elif CPU_2ndCard == 1:
print "| Dealer Draws: Ace", CPU_2ndCard_type
else:
print "| Dealer Draws: ", CPU_2ndCard, CPU_2ndCard_type
print "-------------------------------"
#Draw user first card and show
USR_1stCard = random.choice(black_jack)
USR_1stCard_type = random.choice(card_type)
if USR_1stCard == 1:
print "Your 1st card: Ace", USR_1stCard_type
elif USR_1stCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Your 1st Card: ", Rand_TenCard, USR_1stCard_type
elif USR_1stCard == 2 or 3 or 4 or 5 or 6 or 7 or 8 or 9:
print "Your 1st Card: ", USR_1stCard, USR_1stCard_type
#Draw user 2nd card & show
USR_2ndCard = random.choice(black_jack)
USR_2ndCard_type = random.choice(card_type)
if USR_2ndCard == 1:
print "Your 2nd card: Ace", USR_2ndCard_type
elif USR_2ndCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Your 2nd Card: ", Rand_TenCard, USR_2ndCard_type
elif USR_2ndCard == 2 or 3 or 4 or 5 or 6 or 7 or 8 or 9:
print "Your 2nd Card: ", USR_2ndCard, USR_2ndCard_type
CPU_totalCard_value = CPU_1stCard + CPU_2ndCard
USR_totalCard_value = USR_1stCard + USR_2ndCard
Ace_Total = USR_totalCard_value + 10
if USR_1stCard == 1 or USR_2ndCard == 1:
if Ace_Total == 21:
print "Blackjack! You Win!!!"
return 1
print USR_totalCard_value, "or", Ace_Total
else:
print USR_totalCard_value
#Give user the choice to Hit or Stay. Code block draws new card from deck and adds value to total
while USR_totalCard_value < 21:
usr_choice1 = int(input("Enter 1 to Hit or 2 Stand: "))
#If user chooses to stand, display final card value and break out of loop.
if usr_choice1 == 2 and Ace_Total < 21 and (USR_1stCard == 1 or USR_2ndCard == 1):
USR_finalCard_value = Ace_Total
break
elif usr_choice1 == 2 and Ace_Total > 21 and (USR_1stCard == 1 or USR_2ndCard == 1):
USR_finalCard_value = USR_totalCard_value
break
elif usr_choice1 == 2:
USR_finalCard_value = USR_totalCard_value
break
#If user chooses to hit, assign a 3rd card, and add it's value to the total card value.
if usr_choice1 == 1 and USR_totalCard_value < 17:
USR_3rdCard = random.choice(black_jack)
USR_3rdCard_type = random.choice(card_type)
if USR_3rdCard == 1:
print "Your card: Ace", USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
Ace_Total += USR_3rdCard
if Ace_Total == 21 and USR_1stCard == 1 or USR_2ndCard == 1:
print "You win!!!!"
return 1
elif Ace_Total < 21 and USR_1stCard == 1 or USR_2ndCard == 1:
print USR_totalCard_value, "or", Ace_Total
else:
print "Total card value: ", USR_totalCard_value
elif USR_3rdCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Your Card: ", Rand_TenCard, USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
print USR_totalCard_value
else:
print "Your Card: ", USR_3rdCard, USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
Ace_Total += USR_3rdCard
if Ace_Total == 21 and USR_1stCard == 1 or USR_2ndCard == 1:
print "You win!!!!"
return 1
elif Ace_Total < 21 and USR_1stCard == 1 or USR_2ndCard == 1:
print USR_totalCard_value, "or", Ace_Total
else:
print "Total card value: ", USR_totalCard_value
if USR_totalCard_value > 21:
print "You Bust!"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 0
elif USR_totalCard_value == 21:
print "You win!!!!"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 1
#Below elif statement makes game a little easier for user by decreasing odds of drawing a 10. Remove or comment to increase difficulty.
#Programmer can also adjust difficulty by manipulating values in the User_favor list
elif usr_choice1 == 1 and USR_totalCard_value >= 17:
USR_3rdCard = random.choice(User_favor)
USR_3rdCard_type = random.choice(card_type)
if USR_3rdCard == 1:
print "Your card: Ace", USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
print "Total card value: ", USR_totalCard_value
elif USR_3rdCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Your Card: ", Rand_TenCard, USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
print USR_totalCard_value
else:
print "Your Card: ", USR_3rdCard, USR_3rdCard_type
USR_totalCard_value += USR_3rdCard
print USR_totalCard_value
#Test to see if user busts, or draws 21
if USR_totalCard_value > 21:
print "You Bust!"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 0
elif USR_totalCard_value == 21:
print "You win!!!!"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 1
print "----------------------------------------------------------"
#CPU auto play
#CPU decision to hit or stay
if CPU_1stCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Dealer's face down card: ", Rand_TenCard, CPU_1stCard_type
else:
print "Dealer's face down card: ", CPU_1stCard, CPU_1stCard_type
if CPU_totalCard_value == 21:
print "----------------------------------------------------------"
print "Blackjack for Dealer! You lose!"
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
while CPU_totalCard_value < 21:
#CPU checks user total and chooses to stay if it's total card value is greater than the user's final card value at any point in the game.
if CPU_totalCard_value > USR_finalCard_value:
print "Dealer chooses to stay"
CPU_finalCard_value = CPU_totalCard_value
break
elif CPU_totalCard_value >= 18:
print "Dealer chooses to stay"
CPU_finalCard_value = CPU_totalCard_value
break
#Additional elif statement to increase House's chance of winning
#Line 112 will be changed to if CPU_totvalue >= 18:
#Additional elif condition will be if CPU_totvalue >= 15 and CPU_totvalue < 18:
#New list will be created containing these values House_favor = [1,2,2,3,3,4,5,6,7,8,9,10]
#print "Dealer hits!"
# CPU_3rdCard = random.choice(House_favor)
# CPU_3rdCard_type = random.choice(card_type)
elif CPU_totalCard_value >= 15 and CPU_totalCard_value < 18:
print "Dealer hits!"
CPU_3rdCard = random.choice(House_favor)
CPU_3rdCard_type = random.choice(card_type)
if CPU_3rdCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Dealer draws: ", Rand_TenCard, CPU_3rdCard_type
CPU_totalCard_value += CPU_3rdCard
elif CPU_3rdCard == 1:
print "Dealer draws: Ace", CPU_3rdCard_type
CPU_totalCard_value += CPU_3rdCard
else:
print "Dealer draws: ", CPU_3rdCard, CPU_3rdCard_type
CPU_totalCard_value += CPU_3rdCard
if CPU_totalCard_value > 21:
print "----------------------------------------------------------"
print "Dealer busts! You win"
if Ace_Total > USR_totalCard_value and Ace_Total < 21:
print "Your card total: ", Ace_Total
else:
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 1
elif CPU_totalCard_value == 21:
print "----------------------------------------------------------"
print "You lose!"
if Ace_Total > USR_totalCard_value and Ace_Total < 21:
print "Your card total: ", Ace_Total
else:
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 0
#Above elif condition causes a harder version of the game. If lines 124 to 156 are commented or removed, game will be easier
#Programmer can also adjust difficulty by manipulating values in the House_favor list
elif CPU_totalCard_value < 17:
print "Dealer hits!"
CPU_3rdCard = random.choice(black_jack)
CPU_3rdCard_type = random.choice(card_type)
CPU_totalCard_value += CPU_3rdCard
if CPU_3rdCard == 10:
Rand_TenCard = random.choice(specialTen_cards)
print "Dealer draws: ", Rand_TenCard, CPU_3rdCard_type
elif CPU_3rdCard == 1:
print "Dealer draws: Ace", CPU_3rdCard_type
else:
print "Dealer draws: ", CPU_3rdCard, CPU_3rdCard_type
if CPU_totalCard_value > 21:
print "----------------------------------------------------------"
print "Dealer busts! You win"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 1
elif CPU_totalCard_value == 21:
print "----------------------------------------------------------"
print "You lose!"
print "Your card total: ", USR_totalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 0
if USR_finalCard_value == CPU_totalCard_value:
print "----------------------------------------------------------"
print "It's a tie"
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 2
elif USR_finalCard_value > CPU_totalCard_value:
print "----------------------------------------------------------"
print "You win!"
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 1
elif USR_finalCard_value < CPU_totalCard_value:
print "----------------------------------------------------------"
print "You lose!"
print "Your card total: ", USR_finalCard_value
print "Dealer's card total: ", CPU_totalCard_value
print "----------------------------------------------------------"
return 0
if __name__ == "__main__":
blackjack()
| [
"noreply@github.com"
] | lizoodharo.noreply@github.com |
ab25cc9d4cab4c0e131e57540685f03f68c4b4f0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Scaling-nmt_for_Pytorch/fairseq/criterions/label_smoothed_cross_entropy_with_ctc.py | f2e8cdf3bfe0caea99125c6f9607dff9495891cf | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,403 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
from fairseq.data.data_utils import lengths_to_mask
@dataclass
class LabelSmoothedCrossEntropyWithCtcCriterionConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
ctc_weight: float = field(default=1.0, metadata={"help": "weight for CTC loss"})
@register_criterion(
"label_smoothed_cross_entropy_with_ctc",
dataclass=LabelSmoothedCrossEntropyWithCtcCriterionConfig,
)
class LabelSmoothedCrossEntropyWithCtcCriterion(LabelSmoothedCrossEntropyCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size,
report_accuracy,
ctc_weight,
):
super().__init__(
task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy
)
self.ctc_weight = ctc_weight
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
ctc_loss = torch.tensor(0.0).type_as(loss)
if self.ctc_weight > 0.0:
ctc_lprobs, ctc_lens = model.get_ctc_output(net_output, sample)
ctc_tgt, ctc_tgt_lens = model.get_ctc_target(sample)
ctc_tgt_mask = lengths_to_mask(ctc_tgt_lens)
ctc_tgt_flat = ctc_tgt.masked_select(ctc_tgt_mask)
reduction = "sum" if reduce else "none"
ctc_loss = (
F.ctc_loss(
ctc_lprobs,
ctc_tgt_flat,
ctc_lens,
ctc_tgt_lens,
reduction=reduction,
zero_infinity=True,
)
* self.ctc_weight
)
loss += ctc_loss
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data),
"nll_loss": utils.item(nll_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
super().reduce_metrics(logging_outputs)
loss_sum = sum(log.get("ctc_loss", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"ctc_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
a7262e42c628b8f7bfdf1ca4f951541635fd9991 | 782eee72a6f602639c0c9f5d2e98b4456665e0e1 | /SIS/SIS Creator/SIS Creator/main.py | 549b3c2f11fc3e1f41f19dc3d3e349906b0af1ca | [] | no_license | zhestkovda/SIS-PLC | 1a8abb65d5436a9973f16e07dc60c51cded54b52 | 077bb966561d41eaf4a4b81e66c5c5c45887ca4f | refs/heads/master | 2020-07-04T17:19:38.556457 | 2019-08-14T13:21:41 | 2019-08-14T13:21:41 | 202,352,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import dbase
import config
import mainframe
import text
import wx
def main():
app = wx.App()
dbase.CheckDB()
dlg = mainframe.MainFrame(None, title=text.MainTitle)
dlg.CenterOnScreen()
dlg.ShowModal()
dlg.Destroy()
app.MainLoop()
if __name__ == '__main__':
main() | [
"zhestkovda@gmail.com"
] | zhestkovda@gmail.com |
d1f546681794e6a313737c2b6c0ecc037fcc1921 | a79c9afff9afea0c64ce8efac5683bde4ef7eeb8 | /apps/app_two/urls.py | 46e912dd7bc1de957131647bfdebb35d948d591d | [] | no_license | kswelch53/login_registration | 82e8a865125e903efbc624a3b7e3c5a4b6f98a60 | 364cfae780fe9f7e39c11dbe9e53e684d0798f5d | refs/heads/master | 2021-05-12T08:48:04.707930 | 2018-01-13T00:11:50 | 2018-01-13T00:11:50 | 117,300,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
# root route to index method in app_two
url(r'^$', views.index, name='index'),
]
| [
"kswelch53@gmail.com"
] | kswelch53@gmail.com |
0027b8b768f766b7d648c9edeb2a8819a7e2df19 | ca87752023af6d2cb576ddb45915ab64e2bdb3a1 | /Search by word Method.py | 0e1b3bfa8cb4a887597a62b0f118a0f891b69fe8 | [] | no_license | YiqingFan/Music-Playlist-Creation | 7839366790abf8b8dcfe56da93722cc25670bc1d | f333d32fbb8bc518d732b6f529481090ec2166d8 | refs/heads/master | 2021-12-14T09:06:53.104395 | 2021-12-08T02:21:58 | 2021-12-08T02:21:58 | 247,883,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,204 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
data = pd.read_csv('reviews_Digital_Music.csv')
# In[3]:
data[:5]
# 1.Classic Playlist
# In[4]:
search_classic = data[data.iloc[:,3].str.contains('classic')==True]
# In[5]:
random_classic = search_classic.sample(n=6)
random_classic
# In[6]:
classic_song1 = data[(data.iloc[:,1]=='B000X9O7IE')]
rating_classic_song1= np.average(classic_song1.iloc[:,4])
classic_song2 = data[(data.iloc[:,1]=='B000002KH3')]
rating_classic_song2= np.average(classic_song2.iloc[:,4])
classic_song3 = data[(data.iloc[:,1]=='B000002KYU')]
rating_classic_song3= np.average(classic_song3.iloc[:,4])
classic_song4 = data[(data.iloc[:,1]=='B0000024IE')]
rating_classic_song4= np.average(classic_song4.iloc[:,4])
classic_song5 = data[(data.iloc[:,1]=='B004YMS6QU')]
rating_classic_song5= np.average(classic_song5.iloc[:,4])
classic_song6 = data[(data.iloc[:,1]=='B0013832A8')]
rating_classic_song6= np.average(classic_song6.iloc[:,4])
rating_of_classic_playlist = (rating_classic_song1+rating_classic_song2+rating_classic_song3+rating_classic_song4+rating_classic_song5+rating_classic_song6)/6
print('Average rating of classic playlist is', rating_of_classic_playlist)
# In[7]:
print(rating_classic_song1,rating_classic_song2,rating_classic_song3,rating_classic_song4,rating_classic_song5,rating_classic_song6)
# 2.Cowboy
# In[12]:
search_cowboy = data[data.iloc[:,3].str.contains('cowboy')==True]
random_cowboy = search_cowboy.sample(n=6)
random_cowboy
# In[8]:
cowboy_song1 = data[(data.iloc[:,1]=='B000002W7W')]
rating_cowboy_song1= np.average(cowboy_song1.iloc[:,4])
cowboy_song2 = data[(data.iloc[:,1]=='B0015AEKGE')]
rating_cowboy_song2= np.average(cowboy_song2.iloc[:,4])
cowboy_song3 = data[(data.iloc[:,1]=='B004BUFJS8')]
rating_cowboy_song3= np.average(cowboy_song3.iloc[:,4])
cowboy_song4 = data[(data.iloc[:,1]=='B005QJZ5FA')]
rating_cowboy_song4= np.average(cowboy_song4.iloc[:,4])
cowboy_song5 = data[(data.iloc[:,1]=='B002REWLV8')]
rating_cowboy_song5= np.average(cowboy_song5.iloc[:,4])
cowboy_song6 = data[(data.iloc[:,1]=='B00949YGM6')]
rating_cowboy_song6= np.average(cowboy_song6.iloc[:,4])
rating_of_cowboy_playlist = (rating_cowboy_song1+rating_cowboy_song2+rating_cowboy_song3+rating_cowboy_song4+rating_cowboy_song5+rating_cowboy_song6)/6
print('Average rating of cowboy playlist is', rating_of_cowboy_playlist)
# In[9]:
print(rating_cowboy_song1,rating_cowboy_song2,rating_cowboy_song3,rating_cowboy_song4,rating_cowboy_song5,rating_cowboy_song6)
# 3.Dreamy
# In[15]:
search_dreamy = data[data.iloc[:,3].str.contains('dreamy')==True]
random_dreamy = search_dreamy.sample(n=6)
random_dreamy
# In[10]:
dreamy_song1 = data[(data.iloc[:,1]=='B00006L852')]
rating_dreamy_song1= np.average(dreamy_song1.iloc[:,4])
dreamy_song2 = data[(data.iloc[:,1]=='B005QJZ5FA')]
rating_dreamy_song2= np.average(dreamy_song2.iloc[:,4])
dreamy_song3 = data[(data.iloc[:,1]=='B00001OH7M')]
rating_dreamy_song3= np.average(dreamy_song3.iloc[:,4])
dreamy_song4 = data[(data.iloc[:,1]=='B0007TFI56')]
rating_dreamy_song4= np.average(dreamy_song4.iloc[:,4])
dreamy_song5 = data[(data.iloc[:,1]=='B000058DXH')]
rating_dreamy_song5= np.average(dreamy_song5.iloc[:,4])
dreamy_song6 = data[(data.iloc[:,1]=='B002Q11RJO')]
rating_dreamy_song6= np.average(dreamy_song6.iloc[:,4])
rating_of_dreamy_playlist = (rating_dreamy_song1+rating_dreamy_song2+rating_dreamy_song3+rating_dreamy_song4+rating_dreamy_song5+rating_dreamy_song6)/6
print('Average rating of dreamy playlist is', rating_of_dreamy_playlist)
# In[11]:
print(rating_dreamy_song1,rating_dreamy_song2,rating_dreamy_song3,rating_dreamy_song4,rating_dreamy_song5,rating_dreamy_song6)
# 4.Rocks
# In[18]:
search_rocks = data[data.iloc[:,3].str.contains('rocks')==True]
random_rocks = search_rocks.sample(n=6)
random_rocks
# In[12]:
rocks_song1 = data[(data.iloc[:,1]=='B000WCDI5K')]
rating_rocks_song1= np.average(rocks_song1.iloc[:,4])
rocks_song2 = data[(data.iloc[:,1]=='B0000026CZ')]
rating_rocks_song2= np.average(rocks_song2.iloc[:,4])
rocks_song3 = data[(data.iloc[:,1]=='B006PZA7XY')]
rating_rocks_song3= np.average(rocks_song3.iloc[:,4])
rocks_song4 = data[(data.iloc[:,1]=='B005OH1IZ0')]
rating_rocks_song4= np.average(rocks_song4.iloc[:,4])
rocks_song5 = data[(data.iloc[:,1]=='B00137MM8W')]
rating_rocks_song5= np.average(rocks_song5.iloc[:,4])
rocks_song6 = data[(data.iloc[:,1]=='B00GTZ6O2S')]
rating_rocks_song6= np.average(rocks_song6.iloc[:,4])
rating_of_rocks_playlist = (rating_rocks_song1+rating_rocks_song2+rating_rocks_song3+rating_rocks_song4+rating_rocks_song5+rating_rocks_song6)/6
print('Average rating of rocks playlist is', rating_of_rocks_playlist)
# In[13]:
print(rating_rocks_song1,rating_rocks_song2,rating_rocks_song3,rating_rocks_song4,rating_rocks_song5,rating_rocks_song6)
# 5.Christmas
# In[20]:
search_christmas = data[data.iloc[:,3].str.contains('christmas ')==True]
random_christmas = search_christmas.sample(n=6)
random_christmas
# In[14]:
christmas_song1 = data[(data.iloc[:,1]=='B000QPLFZ8')]
rating_christmas_song1= np.average(christmas_song1.iloc[:,4])
christmas_song2 = data[(data.iloc[:,1]=='B006NO2WJ4')]
rating_christmas_song2= np.average(christmas_song2.iloc[:,4])
christmas_song3 = data[(data.iloc[:,1]=='B006NF68FC')]
rating_christmas_song3= np.average(christmas_song3.iloc[:,4])
christmas_song4 = data[(data.iloc[:,1]=='B00123D0CE')]
rating_christmas_song4= np.average(christmas_song4.iloc[:,4])
christmas_song5 = data[(data.iloc[:,1]=='B00A70VWVE')]
rating_christmas_song5= np.average(christmas_song5.iloc[:,4])
christmas_song6 = data[(data.iloc[:,1]=='B00065BYAY')]
rating_christmas_song6= np.average(christmas_song6.iloc[:,4])
rating_of_christmas_playlist = (rating_christmas_song1+rating_christmas_song2+rating_christmas_song3+rating_christmas_song4+rating_christmas_song5+rating_christmas_song6)/6
print('Average rating of christmas playlist is', rating_of_christmas_playlist)
# In[15]:
print(rating_christmas_song1,rating_christmas_song2,rating_christmas_song3,rating_christmas_song4,rating_christmas_song5,rating_christmas_song6)
# In[ ]:
| [
"noreply@github.com"
] | YiqingFan.noreply@github.com |
1367392cb579fabd31cd0a289627c57077a7bda2 | a53998e56ee06a96d59d97b2601fd6ec1e4124d7 | /基础课/jichu/day19/multiple_inherit.py | 97a196a49966ef3c0d0f854d88f992a637e776cf | [] | no_license | zh-en520/aid1901 | f0ec0ec54e3fd616a2a85883da16670f34d4f873 | a56f82d0ea60b2395deacc57c4bdf3b6bc73bd2e | refs/heads/master | 2020-06-28T21:16:22.259665 | 2019-08-03T07:09:29 | 2019-08-03T07:09:29 | 200,344,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | class Car:
def run(self,speed):
print('汽车以',speed,'公里/小时的速度行驶')
class Plane:
def fly(self,height):
print('飞机以海拔',height,'米的高度飞行')
class PlaneCar(Car,Plane):
'''PlaneCar为飞行汽车类'''
p1 = PlaneCar()
p1.fly(10000)
p1.run(300) | [
"zh_en520@163.com"
] | zh_en520@163.com |
9668e756e7eb9c8a7a635f2ff2af5e2a9284737f | d6d225186da3d8c8c834c336777db7b4c6a8f221 | /spoj/power.py | 577e558eb8c6b98d432a70c09b2feae1950b3b56 | [] | no_license | ishantsharma29/Competitive-Programming | 40bc8bbeb157e3b0b3ef234851c0dc617afc6b19 | 1978b21dae8a7cc27b482e11ff094d0b787846b0 | refs/heads/master | 2022-05-27T15:33:24.924740 | 2020-05-01T17:52:46 | 2020-05-01T17:52:46 | 260,514,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import math
def pow ( x , n ) :
aux = 1
while n > 1 :
if n & 1 : #odd
aux *= x
x*=x
n =int (n/2)
return x*aux
2nd method
import math
def pow( x, n):
if n == 0:
return 1
else:
p=int(n/2)
tmp=int(pow(x,p))
if n%2==1:
q=tmp*tmp*x
return (q)
else:
r=tmp*tmp
return (r)
| [
"ishantsharma29@gmail.com"
] | ishantsharma29@gmail.com |
e7e90eb396092cbfdd8804bdc98e246f49e58d27 | f2e70de60adb32014fc718647e00e221687c4055 | /openacademy/models/models.py | 18977c7d29a89790deb59ee8d4506deae8805181 | [] | no_license | fawad4bros/Odoo13_employe_info | 6a28f929cda1a3cb2c1ba1b665ba9565df3d2e37 | 40dd51c011726f49b331f29ffc2f7445877e5624 | refs/heads/master | 2023-04-05T10:01:42.591943 | 2021-04-04T06:58:47 | 2021-04-04T06:58:47 | 352,701,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, exceptions
class Course(models.Model):
_name = 'openacademy.course'
_description = 'OpenAcademy Courses'
name = fields.Char(string="Title", required=True, help="Name of the Course")
description = fields.Text()
responsible_id = fields.Many2one('res.users', ondelete='set null', string="Responsible", index=True)
session_ids = fields.One2many('openacademy.session','course_id',string="Sessions")
_sql_constraints = [
('name_description_check',
'CHECK(name != description)',
"The title of the course should not be the description"),
('name_unique',
'UNIQUE(name)',
"The course title must be unique"),
]
class Session(models.Model):
_name = 'openacademy.session'
_description = "OpenAcademy Sessions"
name = fields.Char(required=True)
start_date = fields.Date(default=fields.Date.today)
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
active = fields.Boolean(default=True)
instructor_id = fields.Many2one('res.partner', string="Instructor")
course_id = fields.Many2one('openacademy.course', ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner',string="Attendees")
taken_seats = fields.Float(string="Taken seats", compute='_taken_seats')
@api.depends('seats', 'attendee_ids')
def _taken_seats(self):
for r in self:
if not r.seats:
r.taken_seats= 0.0
else:
r.taken_seats = 100.0 * len(r.attendee_ids) / r.seats
@api.onchange('seats', 'attendee_ids')
def _verify_valid_seats(self):
if self.seats < 0:
return {
'warning': {
'title': "Incorrect 'seats' value",
'message': "The number of available seats may not be negative",
},
}
if self.seats < len(self.attendee_ids):
return {
'warning': {
'title': "Too many attendees",
'message': "Increase seats or remove excess attendees",
},
}
@api.constrains('instructor_id','attendee_ids')
def _check_instructor_not_in_attendees(self):
for r in self:
if r.instructor_id and r.instructor_id in r.attendee_ids:
raise exceptions.ValidationError("A session's instructor can't be an attendee") | [
"you@example.com"
] | you@example.com |
2d7478ac2aba8c82308042a89be67810d784bb50 | 57c6d71cb82f00d51705e25e1d8b1c2a130cf7f7 | /ex09_02.py | 771b4846027e0ae2fdd11225849903907f44eed2 | [] | no_license | jgramelb/Python-learning | 80fda893324ce13ebe2657f69452c8a41c573ba4 | ecc371178846686a659b06af9b1336b3ee5aaa47 | refs/heads/master | 2021-09-07T16:07:56.935425 | 2018-02-25T20:26:51 | 2018-02-25T20:26:51 | 109,454,537 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #Write a program that categorizes each mail message by which day of the week the commit was done
fname = input('Enter a file name: ')
try:
fhandle = open(fname)
except:
print('File cannot be read', fname)
exit()
#look through lines in fhand
countdct = dict()
for line in fhandle:
#print line
line = line.rstrip()
#look for line that start with "From"
if not line.startswith('From'):
continue
#create words
words = line.split()
words = words[2:3]
for word in words:
if word not in countdct:
countdct[word] = 1
else:
countdct[word] += 1
print(countdct)
| [
"noreply@github.com"
] | jgramelb.noreply@github.com |
81b1a30eefb31497477c176e4f6c736ee7198f99 | 46ebae3a062a9a0622a11e1df6b9765caa5ad546 | /udacify.py | 142cde3f77078f0f3bee3eb98ebef1a8348b8e32 | [] | no_license | bernblend/earlyPython | bcde46ceca59060b635797eb6a098eb398d40025 | daa00b7846643082feba061a099234d6df7dfa56 | refs/heads/master | 2021-01-22T19:48:38.627953 | 2017-03-31T06:00:39 | 2017-03-31T06:00:39 | 85,241,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Define a procedure, udacify, that takes as
# input a string, and returns a string that
# is an uppercase 'U' followed by the input string.
# for example, when you enter
# print udacify('dacians')
# the output should be the string 'Udacians'
def udacify(a):
return "U" + a
# Remove the hash, #, from infront of print to test your code.
print udacify('dacians')
#>>> Udacians
print udacify('turn')
#>>> Uturn
print udacify('boat')
#>>> Uboat
| [
"bernblend@gmail.com"
] | bernblend@gmail.com |
691c277ab69ffaa91064bea5ca920b354ffc9bf3 | 3bb5c0564fe751fc3a564275d2f8d83d12350399 | /run.py | d0df95a28c93f4ca1381472b322bccc0420d8839 | [] | no_license | antonays/dotbot_server | 975ecced9f245e67358ed292c40bcfc97d565ee7 | 2da1bfae7880510495698ff9b668239294ef0a87 | refs/heads/master | 2021-01-13T07:16:23.896374 | 2016-10-30T18:16:23 | 2016-10-30T18:16:23 | 71,796,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from app import app
if __name__ == '__main__':
app.run(debug=True)
| [
"antonays@gmail.ccc"
] | antonays@gmail.ccc |
a7f4897ef8d5a580352bade59d54cd0e8ba6c11e | 557dc6987c75cc6e573519153f6ab304548dbd5d | /falldetect/training_util0506.py | bc4acd1923e81b8f0ca42b2d4eba3963de68c08d | [] | no_license | chanmi168/Fall-Detection-DAT | 47f3b1c12afdbaa5f3f8ab0475192d97e8505174 | 6c2d48275d4271cf8f9e2a2565259a8aeb10a859 | refs/heads/master | 2021-12-23T19:02:00.004622 | 2021-12-20T22:15:24 | 2021-12-20T22:15:24 | 242,771,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,665 | py | import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
from tqdm import tqdm_notebook as tqdm
from IPython.display import display
import os
import sys
sys.path.append('/content/drive/My Drive/中研院/repo/')
from falldetect.utilities import *
from falldetect.models import *
from falldetect.dataset_util import *
from falldetect.eval_util import *
import time
import datetime
from datetime import datetime
import json
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc( 'savefig', facecolor = 'white' )
from sklearn.decomposition import PCA
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
def train_epoch(train_loader, device, model, criterion, optimizer, epoch):
debug = False
model.train()
total_train_loss = 0
TP = 0
FP = 0
TN = 0
FN = 0
for i, (data, labels) in enumerate(train_loader):
data = data.to(device)
labels = labels.to(device).long()
# Forward pass
# feature_out, class_out = model(data)
feature_out, class_out, _ = model(data)
train_loss = criterion(class_out, labels)
# Backward and optimize
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# total_train_loss += train_loss.data.numpy()
total_train_loss += train_loss.data.detach().cpu().numpy()
out_sigmoid = torch.sigmoid(class_out).data.detach().cpu().numpy()
pred = np.argmax(out_sigmoid, 1)
labels_np = labels.data.detach().cpu().numpy()
TP += ((pred==1) & (labels_np==1)).sum()
FP += ((pred==1) & (labels_np==0)).sum()
TN += ((pred==0) & (labels_np==0)).sum()
FN += ((pred==0) & (labels_np==1)).sum()
train_size = train_loader.dataset.labels.detach().cpu().numpy().shape[0]
train_loss = total_train_loss/train_size
acc = (TP+TN)/train_size
sensitivity = TP/(TP+FN)
specificity = TN/(TN+FP)
precision = TP/(TP+FP)
F1 = 2 * (precision * sensitivity) / (precision + sensitivity)
if debug:
print('show train_epoch results')
print('all samples n', train_size)
print('TP', TP)
print('FP', FP)
print('TN', TN)
print('FN', FN)
print('train_size', train_size)
print('acc', acc)
print('sensitivity', sensitivity)
print('precision', precision)
print('specificity', specificity)
print('F1', F1)
performance_dict = {
'loss': train_loss,
'acc': acc,
'sensitivity': sensitivity,
'precision': precision,
'specificity': specificity,
'F1': F1,
}
return performance_dict
def val_epoch(val_loader, device, model, criterion, optimizer, epoch, domain_name):
debug = False
model.eval()
total_val_loss = 0
TP = 0
FP = 0
TN = 0
FN = 0
for i, (data, labels) in enumerate(val_loader):
data = data.to(device)
labels = labels.to(device).long()
#Forward pass
# feature_out, class_out = model(data)
feature_out, class_out, _ = model(data)
val_loss = criterion(class_out, labels)
total_val_loss += val_loss.data.detach().cpu().numpy()
out_sigmoid = torch.sigmoid(class_out).data.detach().cpu().numpy()
pred = np.argmax(out_sigmoid, 1)
labels_np = labels.data.detach().cpu().numpy()
TP += ((pred==1) & (labels_np==1)).sum()
FP += ((pred==1) & (labels_np==0)).sum()
TN += ((pred==0) & (labels_np==0)).sum()
FN += ((pred==0) & (labels_np==1)).sum()
val_size = val_loader.dataset.labels.detach().cpu().numpy().shape[0]
val_loss = total_val_loss/val_size
acc = (TP+TN)/val_size
sensitivity = TP/(TP+FN)
specificity = TN/(TN+FP)
precision = TP/(TP+FP)
F1 = 2 * (precision * sensitivity) / (precision + sensitivity)
if debug:
print('show val_epoch results')
print('all samples n', val_size)
print('TP', TP)
print('FP', FP)
print('TN', TN)
print('FN', FN)
print('val_size', val_size)
print('acc', acc)
print('sensitivity', sensitivity)
print('precision', precision)
print('specificity', specificity)
print('F1', F1)
performance_dict = {
'{}_loss'.format(domain_name): val_loss,
'{}_acc'.format(domain_name): acc,
'{}_sensitivity'.format(domain_name): sensitivity,
'{}_precision'.format(domain_name): precision,
'{}_specificity'.format(domain_name): specificity,
'{}_F1'.format(domain_name): F1,
}
return performance_dict
def train_epoch_dann(src_loader, tgt_loader, device, dann, class_criterion, domain_criterion, optimizer, epoch):
dann.train()
total_src_class_loss = 0
total_tgt_class_loss = 0
total_src_domain_loss = 0
total_tgt_domain_loss = 0
domain_TPTN = 0
src_TP = 0
src_FP = 0
src_TN = 0
src_FN = 0
tgt_TP = 0
tgt_FP = 0
tgt_TN = 0
tgt_FN = 0
for i, (sdata, tdata) in enumerate(zip(src_loader, tgt_loader)):
src_data, src_labels = sdata
tgt_data, tgt_labels = tdata
src_data = src_data.to(device)
src_labels = src_labels.to(device).long()
tgt_data = tgt_data.to(device)
tgt_labels = tgt_labels.to(device).long()
# prepare domain labels
src_domain_labels = torch.zeros(src_data.size()[0]).to(device).long()
tgt_domain_labels = torch.ones(tgt_data.size()[0]).to(device).long()
src_feature, src_class_out, src_domain_out = dann(src_data)
tgt_feature, tgt_class_out, tgt_domain_out = dann(tgt_data)
# compute the class loss of features
src_class_loss = class_criterion(src_class_out, src_labels)
tgt_class_loss = class_criterion(tgt_class_out, tgt_labels)
# make prediction based on logits output class_out
out_sigmoid = torch.sigmoid(src_class_out).data.detach().cpu().numpy()
src_class_pred = np.argmax(out_sigmoid, 1)
out_sigmoid = torch.sigmoid(tgt_class_out).data.detach().cpu().numpy()
tgt_class_pred = np.argmax(out_sigmoid, 1)
# make prediction based on logits output domain_out
out_sigmoid = torch.sigmoid(src_domain_out).data.detach().cpu().numpy()
src_domain_pred = np.argmax(out_sigmoid, 1)
out_sigmoid = torch.sigmoid(tgt_domain_out).data.detach().cpu().numpy()
tgt_domain_pred = np.argmax(out_sigmoid, 1)
src_domain_loss = domain_criterion(src_domain_out, src_domain_labels)
tgt_domain_loss = domain_criterion(tgt_domain_out, tgt_domain_labels)
domain_loss = src_domain_loss + tgt_domain_loss
theta = 1
train_loss = src_class_loss + theta * domain_loss
# Backward and optimize
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
total_src_class_loss += src_class_loss.data.detach().cpu().numpy()
total_tgt_class_loss += tgt_class_loss.data.detach().cpu().numpy()
total_src_domain_loss += src_domain_loss.data.detach().cpu().numpy()
total_tgt_domain_loss += tgt_domain_loss.data.detach().cpu().numpy()
domain_TPTN += (src_domain_pred==src_domain_labels.data.detach().cpu().numpy()).sum()
domain_TPTN += (tgt_domain_pred==tgt_domain_labels.data.detach().cpu().numpy()).sum()
src_labels_np = src_labels.data.detach().cpu().numpy()
src_TP += ((src_class_pred==1) & (src_labels_np==1)).sum()
src_FP += ((src_class_pred==1) & (src_labels_np==0)).sum()
src_TN += ((src_class_pred==0) & (src_labels_np==0)).sum()
src_FN += ((src_class_pred==0) & (src_labels_np==1)).sum()
tgt_labels_np = tgt_labels.data.detach().cpu().numpy()
tgt_TP += ((tgt_class_pred==1) & (tgt_labels_np==1)).sum()
tgt_FP += ((tgt_class_pred==1) & (tgt_labels_np==0)).sum()
tgt_TN += ((tgt_class_pred==0) & (tgt_labels_np==0)).sum()
tgt_FN += ((tgt_class_pred==0) & (tgt_labels_np==1)).sum()
src_size = src_loader.dataset.labels.detach().cpu().numpy().shape[0]
src_class_loss = total_src_class_loss/src_size
src_domain_loss = total_src_domain_loss/src_size
src_class_acc = (src_TP+src_TN)/src_size
src_sensitivity = src_TP/(src_TP+src_FN)
src_specificity = src_TN/(src_TN+src_FP)
src_precision = src_TP/(src_TP+src_FP)
src_F1 = 2 * (src_precision * src_sensitivity) / (src_precision + src_sensitivity)
tgt_size = tgt_loader.dataset.labels.detach().cpu().numpy().shape[0]
tgt_class_loss = total_tgt_class_loss/tgt_size
tgt_domain_loss = total_tgt_domain_loss/tgt_size
tgt_class_acc = (tgt_TP+tgt_TN)/tgt_size
tgt_sensitivity = tgt_TP/(tgt_TP+tgt_FN)
tgt_specificity = tgt_TN/(tgt_TN+tgt_FP)
tgt_precision = tgt_TP/(tgt_TP+tgt_FP)
tgt_F1 = 2 * (tgt_precision * tgt_sensitivity) / (tgt_precision + tgt_sensitivity)
domain_acc = domain_TPTN/(src_size+tgt_size)
performance_dict = {
'src_class_loss': src_class_loss,
'src_domain_loss': src_domain_loss,
'src_class_acc': src_class_acc,
'src_sensitivity': src_sensitivity,
'src_precision': src_precision,
'src_specificity': src_specificity,
'src_F1': src_F1,
'tgt_class_loss': tgt_class_loss,
'tgt_domain_loss': tgt_domain_loss,
'tgt_class_acc': tgt_class_acc,
'tgt_sensitivity': tgt_sensitivity,
'tgt_precision': tgt_precision,
'tgt_specificity': tgt_specificity,
'tgt_F1': tgt_F1,
'domain_acc': domain_acc,
}
return performance_dict
def val_epoch_dann(src_loader, tgt_loader, device,
dann,
class_criterion, domain_criterion, epoch):
dann.eval()
total_val_loss = 0
total_src_class_loss = 0
total_tgt_class_loss = 0
total_src_domain_loss = 0
total_tgt_domain_loss = 0
src_class_TPTN = 0
tgt_class_TPTN = 0
domain_TPTN = 0
src_TP = 0
src_FP = 0
src_TN = 0
src_FN = 0
tgt_TP = 0
tgt_FP = 0
tgt_TN = 0
tgt_FN = 0
for i, (sdata, tdata) in enumerate(zip(src_loader, tgt_loader)):
# for i, sdata in enumerate(src_loader):
src_data, src_labels = sdata
tgt_data, tgt_labels = tdata
src_data = src_data.to(device)
src_labels = src_labels.to(device).long()
tgt_data = tgt_data.to(device)
tgt_labels = tgt_labels.to(device).long()
# prepare domain labels
src_domain_labels = torch.zeros(src_data.size()[0]).to(device).long()
tgt_domain_labels = torch.ones(tgt_data.size()[0]).to(device).long()
src_feature, src_class_out, src_domain_out = dann(src_data)
tgt_feature, tgt_class_out, tgt_domain_out = dann(tgt_data)
# make prediction based on logits output class_out
out_sigmoid = torch.sigmoid(src_class_out).data.detach().cpu().numpy()
src_class_pred = np.argmax(out_sigmoid, 1)
out_sigmoid = torch.sigmoid(tgt_class_out).data.detach().cpu().numpy()
tgt_class_pred = np.argmax(out_sigmoid, 1)
src_class_loss = class_criterion(src_class_out, src_labels)
tgt_class_loss = class_criterion(tgt_class_out, tgt_labels)
# make prediction based on logits output domain_out
out_sigmoid = torch.sigmoid(src_domain_out).data.detach().cpu().numpy()
src_domain_pred = np.argmax(out_sigmoid, 1)
out_sigmoid = torch.sigmoid(tgt_domain_out).data.detach().cpu().numpy()
tgt_domain_pred = np.argmax(out_sigmoid, 1)
src_domain_loss = domain_criterion(src_domain_out, src_domain_labels)
tgt_domain_loss = domain_criterion(tgt_domain_out, tgt_domain_labels)
domain_loss = src_domain_loss + tgt_domain_loss
theta = 1
val_loss = src_class_loss + theta * domain_loss
total_src_class_loss += src_class_loss.data.detach().cpu().numpy()
total_tgt_class_loss += tgt_class_loss.data.detach().cpu().numpy()
total_src_domain_loss += src_domain_loss.data.detach().cpu().numpy()
total_tgt_domain_loss += tgt_domain_loss.data.detach().cpu().numpy()
src_class_TPTN += (src_class_pred==src_labels.data.detach().cpu().numpy()).sum()
tgt_class_TPTN += (tgt_class_pred==tgt_labels.data.detach().cpu().numpy()).sum()
domain_TPTN += (src_domain_pred==src_domain_labels.data.detach().cpu().numpy()).sum()
domain_TPTN += (tgt_domain_pred==tgt_domain_labels.data.detach().cpu().numpy()).sum()
src_labels_np = src_labels.data.detach().cpu().numpy()
src_TP += ((src_class_pred==1) & (src_labels_np==1)).sum()
src_FP += ((src_class_pred==1) & (src_labels_np==0)).sum()
src_TN += ((src_class_pred==0) & (src_labels_np==0)).sum()
src_FN += ((src_class_pred==0) & (src_labels_np==1)).sum()
tgt_labels_np = tgt_labels.data.detach().cpu().numpy()
tgt_TP += ((tgt_class_pred==1) & (tgt_labels_np==1)).sum()
tgt_FP += ((tgt_class_pred==1) & (tgt_labels_np==0)).sum()
tgt_TN += ((tgt_class_pred==0) & (tgt_labels_np==0)).sum()
tgt_FN += ((tgt_class_pred==0) & (tgt_labels_np==1)).sum()
src_size = src_loader.dataset.labels.detach().cpu().numpy().shape[0]
src_class_loss = total_src_class_loss/src_size
src_domain_loss = total_src_domain_loss/src_size
src_class_acc = (src_TP+src_TN)/src_size
src_sensitivity = src_TP/(src_TP+src_FN)
src_specificity = src_TN/(src_TN+src_FP)
src_precision = src_TP/(src_TP+src_FP)
src_F1 = 2 * (src_precision * src_sensitivity) / (src_precision + src_sensitivity)
tgt_size = tgt_loader.dataset.labels.detach().cpu().numpy().shape[0]
tgt_class_loss = total_tgt_class_loss/tgt_size
tgt_domain_loss = total_tgt_domain_loss/tgt_size
tgt_class_acc = (tgt_TP+tgt_TN)/tgt_size
tgt_sensitivity = tgt_TP/(tgt_TP+tgt_FN)
tgt_specificity = tgt_TN/(tgt_TN+tgt_FP)
tgt_precision = tgt_TP/(tgt_TP+tgt_FP)
tgt_F1 = 2 * (tgt_precision * tgt_sensitivity) / (tgt_precision + tgt_sensitivity)
domain_acc = domain_TPTN/(src_size+tgt_size)
performance_dict = {
'src_class_loss': src_class_loss,
'src_domain_loss': src_domain_loss,
'src_class_acc': src_class_acc,
'src_sensitivity': src_sensitivity,
'src_precision': src_precision,
'src_specificity': src_specificity,
'src_F1': src_F1,
'tgt_class_loss': tgt_class_loss,
'tgt_domain_loss': tgt_domain_loss,
'tgt_class_acc': tgt_class_acc,
'tgt_sensitivity': tgt_sensitivity,
'tgt_precision': tgt_precision,
'tgt_specificity': tgt_specificity,
'tgt_F1': tgt_F1,
'domain_acc': domain_acc
}
# display(performance_dict)
return performance_dict
# return val_loss_avg, src_class_loss_avg, tgt_class_loss_avg, src_domain_loss_avg, tgt_domain_loss_avg, src_class_acc, tgt_class_acc, domain_acc
def BaselineModel_fitting(training_params, src_name, tgt_name, inputdir, outputdir):
show_train_log = False
# show_diagnosis_plt = False
if not os.path.exists(outputdir):
os.makedirs(outputdir)
# TODO: don't need to extract training_params
classes_n = training_params['classes_n']
CV_n = training_params['CV_n']
num_epochs = training_params['num_epochs']
channel_n = training_params['channel_n']
batch_size = training_params['batch_size']
learning_rate = training_params['learning_rate']
extractor_type = training_params['extractor_type']
device = training_params['device']
show_diagnosis_plt = training_params['show_diagnosis_plt']
df_performance = pd.DataFrame(0, index=np.arange(CV_n),
columns=['i_CV',
'val_src_acc','val_tgt_acc',
'val_src_sensitivity','val_tgt_sensitivity',
'val_src_precision','val_tgt_precision',
'val_src_F1','val_tgt_F1','PAD'])
src_dataset_name = src_name.split('_')[0]
src_sensor_loc = src_name.split('_')[1]
tgt_dataset_name = tgt_name.split('_')[0]
tgt_sensor_loc = tgt_name.split('_')[1]
src_inputdir = inputdir + '{}/{}/'.format(src_dataset_name, src_sensor_loc)
tgt_inputdir = inputdir + '{}/{}/'.format(tgt_dataset_name, tgt_sensor_loc)
get_src_loader = get_data_loader
get_tgt_loader = get_data_loader
for i_CV in range(CV_n):
print('------------------------------Working on i_CV {}------------------------------'.format(i_CV))
# 1. prepare dataset
src_train_loader, src_val_loader = get_src_loader(src_inputdir, i_CV, batch_size, learning_rate)
tgt_train_loader, tgt_val_loader = get_tgt_loader(tgt_inputdir, i_CV, batch_size, learning_rate)
# the model expect the same input dimension for src and tgt data
src_train_size = src_train_loader.dataset.data.data.detach().cpu().numpy().shape[0]
src_val_size = src_val_loader.dataset.data.data.detach().cpu().numpy().shape[0]
tgt_train_size = tgt_train_loader.dataset.data.data.detach().cpu().numpy().shape[0]
tgt_val_size = tgt_val_loader.dataset.data.data.detach().cpu().numpy().shape[0]
src_input_dim = src_train_loader.dataset.data.data.detach().cpu().numpy().shape[2]
tgt_input_dim = tgt_train_loader.dataset.data.data.detach().cpu().numpy().shape[2]
# 2. prepare model
total_step = len(src_train_loader)
train_performance_dict_list = list( {} for i in range(num_epochs) )
val_src_performance_dict_list = list( {} for i in range(num_epochs) )
val_tgt_performance_dict_list = list( {} for i in range(num_epochs) )
PAD_list = [0] * num_epochs
# model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
if extractor_type == 'CNN':
model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
elif extractor_type == 'CNNLSTM':
dropout = training_params['dropout']
hiddenDim_f = training_params['hiddenDim_f']
hiddenDim_y = training_params['hiddenDim_y']
hiddenDim_d = training_params['hiddenDim_d']
win_size = training_params['win_size']
win_stride = training_params['win_stride']
step_n = training_params['step_n']
model = CnnLstm(device, class_N=classes_n, channel_n=channel_n, dropout=dropout, hiddenDim_f=hiddenDim_f, hiddenDim_y=hiddenDim_y, hiddenDim_d=hiddenDim_d, win_size=win_size, win_stride=win_stride, step_n=step_n).to(device)
model_name = model.__class__.__name__
# loss and optimizer
class_criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.01)
# if show_diagnosis_plt:
# model_output_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(0), i_CV, outputdir)
# model_features_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(0), i_CV, outputdir)
# 3. fit the model
for epoch in range(num_epochs):
train_performance_dict = train_epoch(src_train_loader, device, model, class_criterion, optimizer, epoch)
train_performance_dict_list[epoch] = train_performance_dict
val_src_performance_dict = val_epoch(src_val_loader, device, model, class_criterion, optimizer, epoch, 'src')
val_src_performance_dict_list[epoch] = val_src_performance_dict
val_tgt_performance_dict = val_epoch(tgt_val_loader, device, model, class_criterion, optimizer, epoch, 'tgt')
val_tgt_performance_dict_list[epoch] = val_tgt_performance_dict
PAD = get_PAD(src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, model, device, c=3000)
PAD_list[epoch] = PAD
# fig = plt.figure(figsize=(5, 3), dpi=80)
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.set_title('PAD')
# ax1.set_xlabel('epoch')
# ax1.plot(np.arange(num_epochs), PAD_list, label='PAD')
# ax1.legend(loc="upper right")
df_performance.loc[i_CV,['i_CV',
'val_src_acc','val_tgt_acc',
'val_src_sensitivity','val_tgt_sensitivity',
'val_src_precision','val_tgt_precision',
'val_src_F1','val_tgt_F1', 'PAD']] = [i_CV,
val_src_performance_dict_list[epoch]['src_acc'], val_tgt_performance_dict_list[epoch]['tgt_acc'],
val_src_performance_dict_list[epoch]['src_sensitivity'], val_tgt_performance_dict_list[epoch]['tgt_sensitivity'],
val_src_performance_dict_list[epoch]['src_precision'], val_tgt_performance_dict_list[epoch]['tgt_precision'],
val_src_performance_dict_list[epoch]['src_F1'], val_tgt_performance_dict_list[epoch]['tgt_F1'], PAD_list[epoch]]
if show_diagnosis_plt:
baseline_learning_diagnosis(num_epochs, train_performance_dict_list, val_src_performance_dict_list, val_tgt_performance_dict_list, PAD_list, i_CV, outputdir)
print('-----------------Exporting pytorch model-----------------')
if extractor_type == 'CNN':
loaded_model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
elif extractor_type == 'CNNLSTM':
dropout = training_params['dropout']
hiddenDim_f = training_params['hiddenDim_f']
hiddenDim_y = training_params['hiddenDim_y']
hiddenDim_d = training_params['hiddenDim_d']
win_size = training_params['win_size']
win_stride = training_params['win_stride']
step_n = training_params['step_n']
loaded_model = CnnLstm(device, class_N=classes_n, channel_n=channel_n, dropout=dropout, hiddenDim_f=hiddenDim_f, hiddenDim_y=hiddenDim_y, hiddenDim_d=hiddenDim_d, win_size=win_size, win_stride=win_stride, step_n=step_n).to(device)
export_model(model, loaded_model, outputdir+'model_CV{}'.format(i_CV))
print('-----------------Evaluating trained model-----------------')
if show_diagnosis_plt:
model_output_diagnosis_trainval(loaded_model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(epoch), i_CV, outputdir)
model_features_diagnosis_trainval(loaded_model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(epoch), i_CV, outputdir)
# 5. export model performance as df
# TODO: probably remove this
print('---------------Exporting model performance---------------')
export_perofmance(df_performance, CV_n, outputdir)
# print('src val loss: {:.4f}±{:.4f}'.format(df_performance.loc['mean']['val_loss'], df_performance.loc['std']['val_loss']))
print('src val acc: {:.4f}±{:.4f}'.format(df_performance.loc['mean']['val_src_acc'], df_performance.loc['std']['val_src_acc']))
# print('tgt val loss: {:.4f}±{:.4f}'.format(df_performance.loc['mean']['tgt_val_loss'], df_performance.loc['std']['tgt_val_loss']))
print('tgt val acc: {:.4f}±{:.4f}'.format(df_performance.loc['mean']['val_tgt_acc'], df_performance.loc['std']['val_tgt_acc']))
# print('=========================================================')
# 6. export notebook parameters as dict
# datetime object containing current date and time
# TODO: probably remove this
print('--------------Exporting notebook parameters--------------')
now = datetime.now()
dt_string = now.strftime("%Y/%m/%d %H:%M:%S")
samples_n = src_train_size + src_val_size
# TODO: don't need to make param_dict
param_dict = {
'CV_n': CV_n,
'samples_n': samples_n,
'classes_n': classes_n,
'model_name': model_name,
'dataset_name': src_dataset_name,
'num_epochs': num_epochs,
'channel_n': channel_n,
'batch_size': batch_size,
'learning_rate': learning_rate,
'sensor_loc': src_sensor_loc,
'date': dt_string,
'input_dim': (batch_size, src_train_loader.dataset.data.size()[1], src_train_loader.dataset.data.size()[2]),
'output_dim': src_train_loader.dataset.labels[0:batch_size].data.detach().cpu().numpy().shape,
}
print(param_dict)
with open(outputdir+'notebook_param.json', 'w') as fp:
json.dump(param_dict, fp)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return df_performance, num_params
def DannModel_fitting(training_params, src_name, tgt_name, inputdir, outputdir):
# show_diagnosis_plt = False
if not os.path.exists(outputdir):
os.makedirs(outputdir)
# TODO: don't need to assign training_params values
classes_n = training_params['classes_n']
CV_n = training_params['CV_n']
num_epochs = training_params['num_epochs']
channel_n = training_params['channel_n']
batch_size = training_params['batch_size']
learning_rate = training_params['learning_rate']
extractor_type = training_params['extractor_type']
device = training_params['device']
show_diagnosis_plt = training_params['show_diagnosis_plt']
df_performance = pd.DataFrame(0, index=np.arange(CV_n),
columns=['i_CV',
'val_src_class_acc','val_tgt_class_acc',
'val_src_class_sensitivity','val_tgt_class_sensitivity',
'val_src_class_precision','val_tgt_class_precision',
'val_src_class_F1','val_tgt_class_F1',
'val_domain_acc','PAD'])
src_dataset_name = src_name.split('_')[0]
src_sensor_loc = src_name.split('_')[1]
tgt_dataset_name = tgt_name.split('_')[0]
tgt_sensor_loc = tgt_name.split('_')[1]
src_inputdir = inputdir + '{}/{}/'.format(src_dataset_name, src_sensor_loc)
tgt_inputdir = inputdir + '{}/{}/'.format(tgt_dataset_name, tgt_sensor_loc)
get_src_loader = get_data_loader
get_tgt_loader = get_data_loader
for i_CV in range(CV_n):
print('------------------------------Working on i_CV {}------------------------------'.format(i_CV))
# 1. prepare dataset
src_train_loader, src_val_loader = get_src_loader(src_inputdir, i_CV, batch_size, learning_rate)
tgt_train_loader, tgt_val_loader = get_tgt_loader(tgt_inputdir, i_CV, batch_size, learning_rate)
# the model expect the same input dimension for src and tgt data
src_train_size = src_train_loader.dataset.data.data.detach().cpu().numpy().shape[0]
src_val_size = src_val_loader.dataset.data.data.detach().cpu().numpy().shape[0]
tgt_train_size = tgt_train_loader.dataset.data.data.detach().cpu().numpy().shape[0]
tgt_val_size = tgt_val_loader.dataset.data.data.detach().cpu().numpy().shape[0]
src_input_dim = src_train_loader.dataset.data.data.detach().cpu().numpy().shape[2]
tgt_input_dim = tgt_train_loader.dataset.data.data.detach().cpu().numpy().shape[2]
# 3. fit the model
total_step = len(src_train_loader)
train_performance_dict_list = list( {} for i in range(num_epochs) )
val_performance_dict_list = list( {} for i in range(num_epochs) )
PAD_list = [0] * num_epochs
if extractor_type == 'CNN':
model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
elif extractor_type == 'CNNLSTM':
dropout = training_params['dropout']
hiddenDim_f = training_params['hiddenDim_f']
hiddenDim_y = training_params['hiddenDim_y']
hiddenDim_d = training_params['hiddenDim_d']
win_size = training_params['win_size']
win_stride = training_params['win_stride']
step_n = training_params['step_n']
model = CnnLstm(device, class_N=classes_n, channel_n=channel_n, dropout=dropout, hiddenDim_f=hiddenDim_f, hiddenDim_y=hiddenDim_y, hiddenDim_d=hiddenDim_d, win_size=win_size, win_stride=win_stride, step_n=step_n).to(device)
# model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
model_name = model.__class__.__name__
train_size = src_train_size+tgt_train_size
# loss and optimizer
class_criterion = nn.CrossEntropyLoss()
domain_criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.01)
# if show_diagnosis_plt:
# model_output_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(0), i_CV, outputdir)
# model_features_diagnosis_trainval(model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(0), i_CV, outputdir)
for epoch in range(num_epochs):
train_performance_dict_list[epoch] = train_epoch_dann(src_train_loader, tgt_train_loader, device,
model,
class_criterion, domain_criterion, optimizer, epoch)
val_performance_dict_list[epoch] = val_epoch_dann(src_val_loader, tgt_val_loader, device,
model,
class_criterion, domain_criterion, epoch)
PAD = get_PAD(src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, model, device, c=3000)
PAD_list[epoch] = PAD
# fig = plt.figure(figsize=(5, 3), dpi=80)
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.set_title('PAD')
# ax1.set_xlabel('epoch')
# ax1.plot(np.arange(num_epochs), PAD_list, label='PAD')
# ax1.legend(loc="upper right")
df_performance.loc[i_CV,['i_CV',
'val_src_class_acc','val_tgt_class_acc',
'val_src_class_sensitivity','val_tgt_class_sensitivity',
'val_src_class_precision','val_tgt_class_precision',
'val_src_class_F1','val_tgt_class_F1',
'val_domain_acc', 'PAD']] = [i_CV,
val_performance_dict_list[epoch]['src_class_acc'], val_performance_dict_list[epoch]['tgt_class_acc'],
val_performance_dict_list[epoch]['src_sensitivity'], val_performance_dict_list[epoch]['tgt_sensitivity'],
val_performance_dict_list[epoch]['src_precision'], val_performance_dict_list[epoch]['tgt_precision'],
val_performance_dict_list[epoch]['src_F1'], val_performance_dict_list[epoch]['tgt_F1'],
val_performance_dict_list[epoch]['domain_acc'], PAD_list[epoch]]
if show_diagnosis_plt:
dann_learning_diagnosis(num_epochs, train_performance_dict_list, val_performance_dict_list, PAD_list, i_CV, outputdir)
print('-----------------Exporting pytorch model-----------------')
# loaded_model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
if extractor_type == 'CNN':
loaded_model = DannModel(device, class_N=classes_n, domain_N=2, channel_n=channel_n, input_dim=src_input_dim).to(device).float()
elif extractor_type == 'CNNLSTM':
dropout = training_params['dropout']
hiddenDim_f = training_params['hiddenDim_f']
hiddenDim_y = training_params['hiddenDim_y']
hiddenDim_d = training_params['hiddenDim_d']
win_size = training_params['win_size']
win_stride = training_params['win_stride']
step_n = training_params['step_n']
loaded_model = CnnLstm(device, class_N=classes_n, channel_n=channel_n, dropout=dropout, hiddenDim_f=hiddenDim_f, hiddenDim_y=hiddenDim_y, hiddenDim_d=hiddenDim_d, win_size=win_size, win_stride=win_stride, step_n=step_n).to(device)
loaded_model.eval()
export_model(model, loaded_model, outputdir+'model_CV{}'.format(i_CV))
print('-----------------Evaluating trained model-----------------')
if show_diagnosis_plt:
model_output_diagnosis_trainval(loaded_model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(epoch), i_CV, outputdir)
model_features_diagnosis_trainval(loaded_model, src_train_loader, tgt_train_loader, src_val_loader, tgt_val_loader, device, '_epoch{}'.format(epoch), i_CV, outputdir)
# 5. export model performance as df
print('---------------Exporting model performance---------------')
export_perofmance(df_performance, CV_n, outputdir)
# 6. export notebook parameters as dict
# datetime object containing current date and time
print('--------------Exporting notebook parameters--------------')
now = datetime.now()
dt_string = now.strftime("%Y/%m/%d %H:%M:%S")
samples_n = src_train_size + src_val_size
param_dict = {
'CV_n': CV_n,
'samples_n': samples_n,
'classes_n': classes_n,
'model_name': model_name,
'src_dataset_name': src_dataset_name,
'tgt_dataset_name': tgt_dataset_name,
'src_sensor_loc': src_sensor_loc,
'tgt_sensor_loc': tgt_sensor_loc,
'date': dt_string,
'num_epochs': num_epochs,
'channel_n': channel_n,
'batch_size': batch_size,
'learning_rate': learning_rate,
'input_dim': (batch_size, src_train_loader.dataset.data.size()[1], src_train_loader.dataset.data.size()[2]),
'output_dim': 2,
'label_dim': src_train_loader.dataset.labels[0:batch_size].data.detach().cpu().numpy().shape,
}
print(param_dict)
with open(outputdir+'notebook_param.json', 'w') as fp:
json.dump(param_dict, fp)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return df_performance, num_params
def performance_table(src_name, tgt_name, training_params, inputdir, outputdir):
task_name = src_name+'_'+tgt_name
start_time = time.time()
print('\n==========================================================================================================================')
print('====================== train on source, val on target(source={} to target={}) ======================'.format(src_name, tgt_name))
print('==========================================================================================================================\n')
source_outputs = BaselineModel_fitting(training_params, src_name, tgt_name, inputdir, outputdir+'source/')
print('\n==========================================================================================================================')
print('====================== train on target, val on target(source={} to target={}) ======================'.format(src_name, tgt_name))
print('==========================================================================================================================\n')
target_outputs = BaselineModel_fitting(training_params, tgt_name, src_name, inputdir, outputdir+'target/')
print('\n==========================================================================================================================')
print('====================== DANN training transferring knowledge(source={} to target={}) ======================'.format(src_name, tgt_name))
print('==========================================================================================================================\n')
dann_outputs = DannModel_fitting(training_params, src_name, tgt_name, inputdir, outputdir+'dann/')
time_elapsed = time.time() - start_time
print('time elapsed:', time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))
df_performance_src, num_params = source_outputs
df_performance_tgt, num_params = target_outputs
df_performance_dann, num_params = dann_outputs
def get_df_performance_table(df_performance_dann, df_performance_src, df_performance_tgt, training_params, metric_name, time_elapsed):
df_performance_table = pd.DataFrame('', index=['channel_n', 'batch_size', 'learning_rate',
'source', 'DANN', 'target', 'domain', 'PAD_source', 'PAD_DANN', 'time_elapsed', 'num_params'], columns=[])
df_performance_table.loc['channel_n',training_params['HP_name']] = training_params['channel_n']
df_performance_table.loc['batch_size',training_params['HP_name']] = training_params['batch_size']
df_performance_table.loc['learning_rate',training_params['HP_name']] = training_params['learning_rate']
df_performance_table.loc['source',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_src.loc['mean']['val_tgt_{}'.format(metric_name)], df_performance_src.loc['std']['val_tgt_{}'.format(metric_name)])
df_performance_table.loc['DANN',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_dann.loc['mean']['val_tgt_class_{}'.format(metric_name)], df_performance_dann.loc['std']['val_tgt_class_{}'.format(metric_name)])
df_performance_table.loc['target',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_tgt.loc['mean']['val_src_{}'.format(metric_name)], df_performance_tgt.loc['std']['val_src_{}'.format(metric_name)])
df_performance_table.loc['domain',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_dann.loc['mean']['val_domain_acc'], df_performance_dann.loc['std']['val_domain_acc'])
df_performance_table.loc['PAD_source',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_src.loc['mean']['PAD'], df_performance_dann.loc['std']['PAD'])
df_performance_table.loc['PAD_DANN',training_params['HP_name']] = '{:.3f}±{:.3f}'.format(df_performance_dann.loc['mean']['PAD'], df_performance_dann.loc['std']['PAD'])
df_performance_table.loc['time_elapsed',training_params['HP_name']] = time_elapsed
df_performance_table.loc['num_params',training_params['HP_name']] = num_params
return df_performance_table
# TODO combine the get function and dict
df_performance_table_acc = get_df_performance_table(df_performance_dann, df_performance_src, df_performance_tgt, training_params, 'acc', time_elapsed)
df_performance_table_sensitivity = get_df_performance_table(df_performance_dann, df_performance_src, df_performance_tgt, training_params, 'sensitivity', time_elapsed)
df_performance_table_precision = get_df_performance_table(df_performance_dann, df_performance_src, df_performance_tgt, training_params, 'precision', time_elapsed)
df_performance_table_F1 = get_df_performance_table(df_performance_dann, df_performance_src, df_performance_tgt, training_params, 'F1', time_elapsed)
df_dict = {
'df_acc': df_performance_table_acc,
'df_sensitivity': df_performance_table_sensitivity,
'df_precision': df_performance_table_precision,
'df_F1': df_performance_table_F1,
}
torch.cuda.empty_cache()
return df_dict
| [
"chanmi168@gmail.com"
] | chanmi168@gmail.com |
b5c67701e84dfa41e36333c8a985bf9e72b01b6a | 5ad6b1b7fead932860d7a1f5d1281c339ca4d487 | /papermerge/contrib/admin/migrations/0001_initial.py | d389247d6f2852eb8cd071971030e581fc0c895c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] | permissive | zhiliangpersonal/papermerge | da51fc7c6674a53e1c007e2eb70e5cc05de5da9e | 56c10c889e1db4760a3c47f2374a63ec12fcec3b | refs/heads/master | 2022-12-18T15:20:42.747194 | 2020-09-28T05:09:32 | 2020-09-28T05:09:32 | 299,205,072 | 1 | 0 | Apache-2.0 | 2020-09-28T06:04:35 | 2020-09-28T06:04:34 | null | UTF-8 | Python | false | false | 1,256 | py | # Generated by Django 3.0.8 on 2020-08-28 11:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('level', models.PositiveIntegerField(choices=[(10, 'Debug'), (20, 'Info'), (30, 'Warning'), (40, 'Error'), (50, 'Critical')], default=20)),
('action_time', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='action time')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
'ordering': ('-action_time',),
},
),
]
| [
"eugen@papermerge.com"
] | eugen@papermerge.com |
fea9f9355ffa68b4bed0ac5ce47c1766bc14e42c | 740fbcea3be380d665d315654bfad501b3fdba5e | /download_pbp_stats.py | 3eb5e115bff4bde72943b37ddd17fc28a98c0b6c | [] | no_license | astromberg/igwip | d6134db4098a3396775bc63eed4a2602698173ef | d9ce795e10c538a7efc96fa3f9e9f7027f2d7564 | refs/heads/master | 2021-01-17T12:47:51.391363 | 2017-10-02T20:59:34 | 2017-10-02T20:59:34 | 59,514,013 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | import json
import os
import requests
base_pbp_url = 'http://stats.nba.com/stats/playbyplayv2?StartPeriod=0&EndPeriod=0&tabView=playbyplay&GameID={game_id}'
base_summary_url = 'http://stats.nba.com/stats/boxscoresummaryv2?GameID={game_id}'
def make_all_game_ids():
game_ids = []
# 2002 -> 2015
for year in range(14, 15):
for season_code in [2, 4]:
# 001: Pre Season
# 002: Regular Season
# 003: All - Star
# 004: Post Season
max_game_number = 1231
if season_code == 4:
max_game_number = 400
for game_number in range(1, max_game_number):
game_id = '{season_code:0>3}{season_year:0>2}{game_number:0>5}'\
.format(season_code=season_code, season_year=year, game_number=game_number)
game_ids.append(game_id)
return game_ids
empty_game_string = ''
with open(os.getcwd() + '/data/nba_stats/000_empty_game.json', 'r') as f:
empty_game_string = json.dumps(json.load(f))
for game_id in make_all_game_ids():
pbp_file_name = os.getcwd() + '/data/nba_stats/{game_id}_pbp.json'.format(game_id=game_id)
summary_file_name = os.getcwd() + '/data/nba_stats/{game_id}_summary.json'.format(game_id=game_id)
if os.path.isfile(pbp_file_name) and os.path.isfile(summary_file_name):
print('already have')
continue
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
r = requests.get(base_pbp_url.format(game_id=game_id), headers=headers)
print(base_pbp_url.format(game_id=game_id))
if r.status_code != 200:
continue
if len(r.json()['resultSets'][0]['rowSet']) == 0:
continue
with open(pbp_file_name, 'w') as outfile:
json.dump(r.json(), outfile)
r = requests.get(base_summary_url.format(game_id=game_id), headers=headers)
with open(summary_file_name, 'w') as outfile:
json.dump(r.json()['resultSets'][0], outfile)
| [
"andrew.stromberg@gmail.com"
] | andrew.stromberg@gmail.com |
26a9de6da724ab0fc3139d25be7993a62422766b | d30ab5fec56c3b4dae987571b28a93c04788f208 | /pytool/base/database.py | 86384222bc746b87b35e817658fefa5a03e7d8e7 | [] | no_license | Wizardyi/MyFirstProject | 800d7d974c48fa444964235083432b7f04a34c15 | a092f719c0e168e1ea65d3820a89e2d4313af737 | refs/heads/master | 2023-03-09T03:57:53.607241 | 2023-03-06T03:40:02 | 2023-03-06T03:40:02 | 227,306,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,546 | py | #encoding=utf-8
import pymysql
class DbInfo:
def __init__(self, id, host, port, user, password, dbName):
self.id = id
self.host = host
self.port = port
self.user = user
self.password = password
self.dbName = dbName
self.connection = None
def getConnection(self):
if ( self.connection == None ):
self.connection = pymysql.connect(host=self.host, port=self.port, user=self.user, passwd=self.password, db=self.dbName)
return self.connection
def close(self):
if ( self.connection != None ):
self.connection.close()
self.connection = None
class Database:
def __init__(self, shardingCount):
self.allDb = None
self.shardingCount = shardingCount
def addShardingDb(self, dbPrefix, host, port, user, password, minSuffix, maxSuffix):
if self.shardingCount <= 1:
print(u"该连接池中不支持分库配置!")
return
if self.allDb == None:
self.allDb = {}
for i in range(minSuffix, maxSuffix) :
dbName = dbPrefix + str(i).zfill(2)
newInstance = DbInfo(i, host, port, user, password, dbName)
if ( self.allDb.has_key(i) ):
print(u"发现重复的数据库:" + dbName)
return
else:
self.allDb[i] = newInstance
def addSingleDb(self, host, port, user, password, dbName):
if self.shardingCount > 1:
print(u"该连接池使用了分库配置!")
return
if self.allDb == None:
self.allDb = DbInfo(-1, host, port, user, password, dbName)
def getConnection(self, shardingId=None):
if self.allDb <> None:
if self.shardingCount > 1:
dbIndex = shardingId % self.shardingCount
if ( self.allDb.has_key(dbIndex) ):
return self.allDb[dbIndex].getConnection()
else:
return self.allDb.getConnection()
return None
def closeAll(self):
if self.allDb <> None:
if self.shardingCount > 1:
for i in self.allDb:
self.allDb[i].close()
else:
self.allDb.close()
self.allDb = None
def queryOne(self, sql, shardingId = None):
connection = self.getConnection(shardingId)
cursor = connection.cursor(cursor=pymysql.cursors.DictCursor)
try:
cursor.execute(sql)
record = cursor.fetchone()
except Exception as e:
record = None
cursor.close()
return record
def queryAll(self, sql, shardingId = None):
connection = self.getConnection(shardingId)
cursor = connection.cursor(cursor=pymysql.cursors.DictCursor)
try:
cursor.execute(sql)
records = cursor.fetchall()
except Exception as e:
print(e)
records = None
cursor.close()
return records
def execute(self, sql, shardingId = None):
connection = self.getConnection(shardingId)
cursor = connection.cursor(cursor=pymysql.cursors.DictCursor)
try:
cursor.execute(sql)
connection.commit()
except Exception as e:
pass
cursor.close()
_GAME_TABLE_COUNT = {
'guild_apply_info' : 10,
'guild_member_info' : 10,
'team_cluster' : 10,
'user_attr' : 100,
'user_battle' : 100,
'user_biography' : 10,
'user_contact' : 10,
'user_cycle_battle' : 10,
'user_dispatch_team' : 10,
'user_equip' : 100,
'user_friend' : 100,
'user_gift_code' : 10,
'user_guild_info' : 10,
'user_hero' : 100,
'user_info' : 10,
'user_item' : 100,
'user_littlewar' : 100,
'user_mail' : 100,
'user_quest' : 100,
'user_resource' : 100,
'user_shared_quest' : 10,
'user_shop' : 10
}
def getGameTableName(tableName, userId):
if _GAME_TABLE_COUNT.has_key(tableName):
tableCount = _GAME_TABLE_COUNT[tableName]
tableIndex = ((userId - (userId % 100)) / 100) % 100
suffix = str(tableIndex).zfill(2)
if tableCount == 10:
tableIndex = (tableIndex - (tableIndex % 10)) / 10
suffix = str(tableIndex)
return tableName + '_' + suffix
return tableName | [
"yangzhengxiong@hy.com"
] | yangzhengxiong@hy.com |
4a666eed78a6aec1138beeb2dc875cc6e3a4aa25 | a126dc77578447514baef588079faf47686a0d2d | /tmux_menu.py | decc545d4afed38315b4db6f553af1457495af82 | [
"MIT"
] | permissive | dannyn/tmux_menu | 51efea2a65edd6bcedcf6c78937393f2b2e0a044 | 219cb3e0350224c4007a4568309cc3bc23f5c088 | refs/heads/master | 2021-01-10T12:48:12.829071 | 2015-12-16T17:19:04 | 2015-12-16T17:19:04 | 48,123,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | #!/usr/bin/env python2
import subprocess
from config import attached_icon, dettached_icon, execute_str
def output_menu_entry(tinfo):
label = "{} ({})".format(tinfo['name'], tinfo['num'])
execute = execute_str.format(tinfo['name'], tinfo['attached'])
if tinfo['attached']:
icon = attached_icon
else:
icon = dettached_icon
print '<item label="{}" icon="{}">'.format(label, icon)
print ' <action name="Execute">'
print ' <command>'+execute+'</command>'
print ' </action>'
print '</item>'
def get_tmux_info():
output = subprocess.check_output(['tmux', 'ls'])
tmux_windows = []
for t in output.splitlines():
tinfo = {}
tinfo['name'] = t.split(':')[0]
tinfo['num'] = t.split(':')[1].strip().split(' ')[0]
at = t.split(' ')[-1]
if at == '(attached)':
tinfo['attached'] = True
else:
tinfo['attached'] = False
tmux_windows.append(tinfo)
return tmux_windows
if __name__ == '__main__':
print '<openbox_pipe_menu>'
print '<separator label="Tmux Windows" />'
for tinfo in get_tmux_info():
output_menu_entry(tinfo)
print '</openbox_pipe_menu>'
| [
"daniel.noest@isprime.com"
] | daniel.noest@isprime.com |
776a79e06db168075a74b60c56f6dbc81684f543 | b5276565331a749270d1b8e03d98037d746cb2d1 | /goodbye.py | e2f22f06ef48de3d3baf119ad88f8cac2787ef08 | [
"MIT"
] | permissive | mustious/iGlass-infinity | 588a416736f35038efe366ce8ebcb10cacf0794b | 57f7fe90d34db37bef80432025a258960ad95c45 | refs/heads/master | 2023-08-14T23:44:39.092972 | 2021-10-11T11:13:05 | 2021-10-11T11:13:05 | 316,072,071 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import os
import time
import random
import speak_out
good_night_greetings = ["Have a good rest", "Sleep well Champ!"]
good_day_greetings = ["Have a great day"]
project_root_path = os.path.dirname(os.path.abspath(__file__))
goodbye_tone_path = ".tones/goodbye.wav"
full_goodbye_tone_path = os.path.join(project_root_path, goodbye_tone_path)
print(full_goodbye_tone_path)
current_time = time.localtime().tm_hour
if current_time > 20 or current_time < 5:
greeting = random.choice(good_night_greetings)
else:
greeting = random.choice(good_day_greetings)
speaker = speak_out.SpeakOut()
try:
speaker.speak("Good night")
time.sleep(0.5)
speaker.speak(greeting)
if os.path.exists(full_goodbye_tone_path):
os.system(f"aplay {full_goodbye_tone_path}")
else:
pass
except:
print("path does not exist")
#os.system("sudo shutdown down")
| [
"mustious@gmail.com"
] | mustious@gmail.com |
8739e22a33dda6ae085cc5b72409662c7872f05e | ce552e7f63b653bf41e3e14f8432fd7c277abe2e | /47.py | 0e31e6f538f9b7d5025a21d2dcd0c71f67b0634c | [] | no_license | patrickgaskill/project-euler | 6fe3b96697fa5aee4586b30084c758110f60d6f5 | 3d4bef30e58587a0748e6e2183302f1c01580c0c | refs/heads/master | 2021-01-09T06:14:13.487634 | 2017-02-28T23:06:43 | 2017-02-28T23:06:43 | 80,942,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from functools import reduce
from patrick import is_prime, factors
ORDER = 4
def consec(n, k):
return [n + i for i in range(k)]
def prime_factors(n):
return [f for f in factors(n) if is_prime(f)]
n = 2
while True:
if all(len(prime_factors(n)) >= ORDER for n in consec(n, ORDER)):
print(n)
break
n += 1
| [
"patrick.gaskill@gmail.com"
] | patrick.gaskill@gmail.com |
e3591ee47798db1dd1279f68760fd997bd52b117 | 817241e046e181d794bac9743187618ca39e8ffe | /dojodon/main/apps/store/views.py | ce86fde041033c7240c1b91ee168874a201c15a8 | [] | no_license | theresa-e/python-django | 0caac01d441331cbae6974644c730a0be5748f12 | 6d6206d5675f5e6b006b580d5ee406328ac8c0de | refs/heads/master | 2020-03-18T13:07:49.311807 | 2018-06-21T20:12:42 | 2018-06-21T20:12:42 | 134,762,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | from django.shortcuts import render, HttpResponse, redirect
def index(request):
print('*'*10, 'User reached store.', '*'*10)
return render(request, 'store/index.html')
def buy(request):
print('*'*10, 'The form was submitted.', '*'*10)
if 'price' not in request.session:
request.session['price'] = 0
if 'total_due' not in request.session:
request.session['total_due'] = 0
if 'total_items' not in request.session:
request.session['total_items'] = 0
# If user is buying blankets
if int(request.POST['product_id']) == 1:
request.session['total_due'] += int(request.POST['blanket']) * 19.99
request.session['price'] = int(request.POST['blanket']) * 19.99
request.session['total_items'] += int(request.POST['blanket'])
if 'blanket' not in request.session:
request.session['blanket'] = request.POST['blanket']
request.session['blanket'] = request.POST['blanket']
# If user is buying whiteboards.
elif int(request.POST['product_id']) == 2:
request.session['total_due'] += int(request.POST['whiteboard']) * 199.99
request.session['price'] += int(request.POST['whiteboard']) * 199.99
request.session['total_items'] += int(request.POST['whiteboard'])
if 'whiteboard' not in request.session:
request.session['whiteboard'] = request.POST['whiteboard']
request.session['whiteboard'] = request.POST['whiteboard']
# If user is buying textbooks
elif int(request.POST['product_id']) == 3:
request.session['total_due'] += int(request.POST['textbook']) * 29.99
request.session['price'] += int(request.POST['textbook']) * 29.99
request.session['total_items'] += int(request.POST['textbook'])
if 'textbook' not in request.session:
request.session['textbook'] = request.POST['textbook']
request.session['textbook'] = request.POST['textbook']
# If user is buying mugs
elif int(request.POST['product_id']) == 4:
request.session['total_due'] += int(request.POST['mug']) * 9.99
request.session['price'] += int(request.POST['mug']) * 9.99
request.session['total_items'] += int(request.POST['mug'])
if 'mug' not in request.session:
request.session['mug'] = request.POST['mug']
request.session['mug'] = request.POST['mug']
return redirect('/checkout')
| [
"910theresa@gmail.com"
] | 910theresa@gmail.com |
f3101592843dec3b611e67273cc858134270ad80 | 7cfbb765e4eea3f292ba5b6c18bfc74465b15a6e | /Python3/199.binary-tree-right-side-view.py | fc6dcda84ad75417d04f3628ca6e56fc686649cd | [
"MIT"
] | permissive | canhetingsky/LeetCode | 1f7aa6e40d7cbfd8d856a2ea32b0cb400f054bf7 | 67f4eaeb5746d361056d08df828c653f89dd9fdd | refs/heads/master | 2021-06-23T17:15:27.363959 | 2020-05-05T07:01:01 | 2020-05-05T07:01:01 | 210,280,887 | 1 | 0 | MIT | 2021-03-25T23:08:16 | 2019-09-23T06:31:57 | Python | UTF-8 | Python | false | false | 744 | py | #
# @lc app=leetcode id=199 lang=python3
#
# [199] Binary Tree Right Side View
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
res = []
level = [root]
while root and level:
res += level[-1].val,
level = [child for node in level for child in (
node.left, node.right) if child]
return res
# @lc code=end
# Accepted
# 211/211 cases passed(32 ms)
# Your runtime beats 90.65 % of python3 submissions
# Your memory usage beats 100 % of python3 submissions(12.7 MB)
| [
"2380348167@qq.com"
] | 2380348167@qq.com |
bfb7729d6baa835c6f3512eb120c4f8deb00e490 | 473facf83b8249730a16930f82dcea1148bd4762 | /code/jetbrains.py | ab1a923e2ebed25228876431a0febecc414b6fe2 | [
"Unlicense"
] | permissive | JeongJuhyeon/knausj_talon | ee070dbe9df32a5f8ffce3459fd1817a474d1916 | 621e8ff718513af943e8fd282688183cf50518a4 | refs/heads/master | 2022-11-02T00:59:25.163331 | 2020-03-26T10:26:45 | 2020-03-26T10:26:45 | 250,298,215 | 0 | 0 | Unlicense | 2020-03-26T15:33:57 | 2020-03-26T15:33:56 | null | UTF-8 | Python | false | false | 5,912 | py | import os
import os.path
import requests
import time
from pathlib import Path
from talon import ctrl, ui, Module, Context, actions, clip
# Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py
extendCommands = []
# Each IDE gets its own port, as otherwise you wouldn't be able
# to run two at the same time and switch between them.
# Note that MPS and IntelliJ ultimate will conflict...
port_mapping = {
"com.jetbrains.intellij": 8653,
"com.jetbrains.intellij-EAP": 8653,
"com.jetbrains.intellij.ce": 8654,
"com.jetbrains.AppCode": 8655,
"com.jetbrains.CLion": 8657,
"com.jetbrains.datagrip": 8664,
"com.jetbrains.goland": 8659,
"com.jetbrains.goland-EAP": 8659,
"com.jetbrains.PhpStorm": 8662,
"com.jetbrains.pycharm": 8658,
"com.jetbrains.rider": 8660,
"com.jetbrains.rubymine": 8661,
"com.jetbrains.WebStorm": 8663,
"com.google.android.studio": 8652,
"jetbrains-idea": 8653,
"jetbrains-idea-eap": 8653,
"jetbrains-idea-ce": 8654,
"jetbrains-appcode": 8655,
"jetbrains-clion": 8657,
"jetbrains-datagrip": 8664,
"jetbrains-goland": 8659,
"jetbrains-goland-eap": 8659,
"jetbrains-phpstorm": 8662,
"jetbrains-pycharm": 8658,
"jetbrains-pycharm-ce": 8658,
"jetbrains-rider": 8660,
"jetbrains-rubymine": 8661,
"jetbrains-webstorm": 8663,
"google-android-studio": 8652,
"IntelliJ IDEA": 8653,
"PyCharm": 8658
}
select_verbs_map = {
"select": [],
"copy": ["action EditorCopy"],
"cut": ["action EditorCut"],
"clear": ["action EditorBackSpace"],
"comment": ["action CommentByLineComment"],
"replace": ["action EditorPaste"],
"expand": ["action ExpandRegion"],
"collapse": ["action CollapseRegion"],
"refactor": ["action Refactorings.QuickListPopupAction"],
"rename": ["action RenameElement"],
"indent": ["action EditorIndentLineOrSelection"],
"unindent": ["action EditorUnindentSelection"],
}
movement_verbs_map = {
"go": [],
"fix": ["action ShowIntentionActions"],
"paste": ["action EditorPaste"],
}
def set_extend(*commands):
def set_inner(_):
global extendCommands
extendCommands = commands
return set_inner
def _get_nonce(port):
try:
with open(os.path.join("/tmp", "vcidea_" + str(port)), "r") as fh:
return fh.read()
except FileNotFoundError as e:
try:
home = str(Path.home())
with open(os.path.join(home, "vcidea_" + str(port)), "r") as fh:
return fh.read()
except IOError:
print("Could not find nonce in tmp or home")
return None
except IOError as e:
print(e)
return None
def send_idea_command(cmd):
print("Sending {}".format(cmd))
bundle = ui.active_app().name
port = port_mapping.get(bundle, None)
nonce = _get_nonce(port)
print(f"sending {bundle} {port} {nonce}")
if port and nonce:
response = requests.get(
"http://localhost:{}/{}/{}".format(port, nonce, cmd), timeout=(0.05, 3.05)
)
response.raise_for_status()
return response.text
def get_idea_location():
return send_idea_command("location").split()
def idea_commands(commands):
command_list = commands.split(",")
print("executing jetbrains", commands)
global extendCommands
extendCommands = command_list
for cmd in command_list:
if cmd:
send_idea_command(cmd.strip())
time.sleep(0.1)
ctx = Context()
mod = Module()
mod.list('select_verbs', desc='Verbs for selecting in the IDE')
mod.list('movement_verbs', desc='Verbs for navigating the IDE')
@mod.capture
def select_verbs(m) -> list:
"""Returns a list of verbs"""
@mod.capture
def movement_verbs(m) -> list:
"""Returns a list of verbs"""
@mod.action_class
class Actions:
def idea(commands: str):
"""Send a command to Jetbrains product"""
idea_commands(commands)
def idea_select(select_verb: str, commands: str):
"""Do a select command, then the specified commands"""
command_list = ','.join(commands.split(",") + select_verbs_map[select_verb])
print(command_list)
idea_commands(command_list)
def idea_movement(movement_verb: str, commands: str):
"""Do a select movement, then the specified commands"""
command_list = ','.join(commands.split(",") + movement_verbs_map[movement_verb])
print(command_list)
idea_commands(command_list)
def idea_grab(times: str = "1"):
"""Copies specified number of words to the left"""
old_clip = clip.get()
try:
original_line, original_column = get_idea_location()
for _ in range(int(times)):
send_idea_command("action EditorSelectWord")
send_idea_command("action EditorCopy")
send_idea_command("goto {} {}".format(original_line, original_column))
send_idea_command("action EditorPaste")
finally:
clip.set(old_clip)
global extendCommands
extendCommands = []
def extend_action(number: str):
"""Repeat previous actions up to number of times"""
global extendCommands
count = max(int(number), 1)
for _ in range(count):
for cmd in extendCommands:
send_idea_command(cmd)
def set_extended_actions(commands: str):
"""Adds specified commands to the list of commands to repeat"""
set_extend(commands.split(","))
@ctx.capture(rule='{self.select_verbs}')
def select_verbs(m):
return m.select_verbs
@ctx.capture(rule='{self.movement_verbs}')
def movement_verbs(m):
print(m)
return m.movement_verbs
ctx.lists['self.select_verbs'] = select_verbs_map.keys()
ctx.lists['self.movement_verbs'] = movement_verbs_map.keys()
| [
"hbk619@users.noreply.github.com"
] | hbk619@users.noreply.github.com |
afb4a6776800c824b79d049e407096a1afae0081 | 420cc006446a55fdfdbe2f4717e8a3347b83f2d1 | /src/profile.py | ba383cffe53f53f99f76e9022ebc79d359f69ec3 | [
"MIT"
] | permissive | alagroy-42/ShellProfiler | 63a6a58ab04e4142e65f92780189efd02f6b9fd8 | b65519a4753f8fea657e4a9eabacafe90ce6fe33 | refs/heads/master | 2023-01-27T11:34:39.208066 | 2020-12-09T14:52:51 | 2020-12-09T14:52:51 | 319,961,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | class Profile:
def __init__(self, yaml, name):
default = {
'dir': '~',
'shell': True,
'process': None,
}
self.name = name
for key in default:
exists = False
if key in list(yaml.keys()):
setattr(self, key, yaml[key])
exists = True
if exists == False:
setattr(self, key, default[key])
def loadProcesses(self):
self.shellExec = []
keys = ['cmd', 'args', 'depends_on', 'silent']
for process in self.process:
id = process
for key in keys:
if not key in self.process[id].keys():
if key == 'cmd':
print('SyntaxError: process[' + id + '] does not have a cmd argument')
exit(1)
self.process[id][key] = ''
self.shellExec.append({
'cmd': self.process[id]['cmd'] + ' ' + ' '.join(self.process[id]['args']),
'id': id,
'depends_on': self.process[id]['depends_on'],
'silent': self.process[id]['silent']
})
def loadDir(self):
self.shellExec.append({
'id': 'dir',
'dir': self.dir,
'depends_on': ''
})
def orderInstructions(self):
ordered = []
treated = []
while len(self.shellExec) != 0:
elem = self.shellExec[0]
idBeg = elem['id']
depends_pass = False
for dep in elem['depends_on']:
if not dep in treated:
depends_pass = True
while elem['depends_on'] != '' and depends_pass:
for tmp in self.shellExec:
if tmp['id'] in elem['depends_on']:
elem = tmp
if tmp['id'] == idBeg:
print('Error: Infinite "depends_on" loop.')
exit(1)
break
if elem['id'] == idBeg:
print('Error: undefined "depends_on" for elem ' + idBeg)
exit(1)
ordered.append(elem)
treated.append(elem['id'])
self.shellExec.pop(self.shellExec.index(elem))
return ordered
def loadProfile(self):
self.loadProcesses()
self.loadDir()
return self.orderInstructions()
| [
"alagroy-@student.42.fr"
] | alagroy-@student.42.fr |
7921a32b84ddb44c349ec5b8e0673ffa0c836199 | ad5d454001a57b772c92df7ec53f21f100bdbee3 | /src/finitestate/firmware/cve_matching.py | 1469dacd0de89dc486bbe25344bb3455762f1c5b | [
"Apache-2.0"
] | permissive | FiniteStateInc/clearcode-toolkit | 7c9a22880e40671ad3a1f287a5028c2358c5ee65 | 521c3a2ab9d9fa6d7b9059227c6af9d09b031c33 | refs/heads/master | 2023-02-23T00:08:21.468083 | 2020-12-29T22:03:54 | 2020-12-29T22:03:54 | 323,440,429 | 0 | 1 | null | 2020-12-29T22:03:55 | 2020-12-21T20:26:26 | null | UTF-8 | Python | false | false | 5,923 | py | from cpe import CPE
from collections import namedtuple
from functools import lru_cache
from typing import List
import re
Match = namedtuple('Match', ['vulnerable', 'cpes'])
# Empty/wildcard CPE parts:
EMPTY_WILDCARD_CPE_SET = ('*', '-', '')
def is_vulnerable(candidate_cpes, configurations):
'''
This is the main entry point for this library. External code should only
need to worry about calling this function, and none of the others.
Inputs:
- an iterable of cpe strings
- a list of dictionaries as contained within the "configurations" > "nodes"
sections of the NVD CVE JSON data feed.
'''
vulnerable = False
match_cpes = set()
for node in configurations:
match = handle_node(candidate_cpes, node)
if match.vulnerable:
vulnerable = True
for match_cpe in match.cpes:
match_cpes.add(match_cpe)
result = Match(vulnerable=vulnerable, cpes=list(match_cpes))
return result
def and_func(matches):
'''
This function takes an iterable of match results and checks to make sure
all of the match results are "truthy," combining and returning the results
if so.
'''
all_vulnerable = all(m.vulnerable for m in matches)
match_cpes = set()
if all_vulnerable:
for match in matches:
for cpe in match.cpes:
match_cpes.add(cpe)
result = Match(vulnerable=all_vulnerable, cpes=list(match_cpes))
return result
def or_func(matches):
'''
This function takes an iterable of match results and checks to make sure
at least one of the match results is "truthy," returning **all** of the
matching results if so (not just the first one to match).
'''
any_vulnerable = False
match_cpes = set()
for match in matches:
if match.vulnerable:
any_vulnerable = True
for cpe in match.cpes:
match_cpes.add(cpe)
result = Match(vulnerable=any_vulnerable, cpes=list(match_cpes))
return result
def not_op(match):
'''
This function returns a cpe match result with no cpes, and a truthiness
value opposite that of its input.
'''
if match.vulnerable:
return Match(vulnerable=False, cpes=set())
else:
return Match(vulnerable=True, cpes=set())
def handle_node(candidate_cpes, node):
'''
This function handles checking a set of device CPEs against a CVE
configuration node, including recursion and boolean operations.
'''
debug = []
if node['operator'] == 'AND':
and_or_op = and_func
debug.insert(0, 'AND')
elif node['operator'] == 'OR':
and_or_op = or_func
debug.insert(0, 'OR')
else:
raise NotImplementedError(node)
if node.get('negate', False):
# TODO: HANDLE NEGATION
final_op = lambda x: not_op(and_or_op(x)) # noqa: E731
debug.insert(0, 'NOT')
else:
final_op = and_or_op
child_results = [handle_node(candidate_cpes, child) for child in node.get('children', [])]
versionRangeAttrs = ['versionstartincluding', 'versionstartexcluding', 'versionendincluding', 'versionendexcluding']
# Filter out any cpe match entries that rely on version ranges, just in
# case. These should have all been replaced in the data by the NVD Updater
# before ever reaching this point, but if they snuck through somehow, they
# would wreak havoc and cause a ton of false positives, so it's better to
# be defensive.
match_results = [
handle_match(candidate_cpes, cpe_match) for cpe_match in node.get('cpe_match', [])
if not any(attr in cpe_match for attr in versionRangeAttrs)
]
result = final_op(child_results + match_results)
return result
@lru_cache(maxsize=20000)
def cpe_to_regex(cpe):
'''
This function converts a CPE to a regular expression, attempting to
compensate for the weird escaping nuances of both CPEs and regular
expressions.
'''
s = re.escape(cpe)
s = s.replace('\\?', '[^:]?')
s = s.replace('\\*', '[^:]*')
return re.compile(s)
def handle_match(candidate_cpes, match):
'''
This function handles the lowest-level details of matching two sets of CPEs
against each other.
'''
vulnerable = False
match_cpes = set()
cpe_str = match.get('cpe23uri', match.get('cpe23Uri'))
# This conditional statement excludes CPEs that accept all versions.
# This regrettable hack is necessitated by overly-broad wildcarding within
# dumpsterfires such as https://nvd.nist.gov/vuln/detail/CVE-2017-8244
cpe_version = cpe_str.split(':')[5]
if cpe_version != '*':
cve_cpe_regex = cpe_to_regex(cpe_str)
for candidate_cpe in candidate_cpes:
if cve_cpe_regex.match(candidate_cpe):
vulnerable = True
match_cpes.add(candidate_cpe)
match = Match(vulnerable=vulnerable, cpes=list(match_cpes))
return match
def filter_generic_cpes(cpe_list: List[str]) -> List[str]:
'''
This function takes in a list of CPE strings and filters out any
CPEs with any specific information past the version number (edition, lang, etc).
Returns a new list of CPE strings.
'''
filtered_cpes = []
for cpe in cpe_list:
c = CPE(cpe)
# yapf: disable
if (c.get_update()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_edition()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_language()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_software_edition()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_target_software()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_target_hardware()[0] in EMPTY_WILDCARD_CPE_SET and
c.get_other()[0] in EMPTY_WILDCARD_CPE_SET):
# yapf: enable
filtered_cpes.append(cpe)
return filtered_cpes
| [
"samuel.vidovich@gmail.com"
] | samuel.vidovich@gmail.com |
f7ec9262556c781b9b6012b564217ce1539327e2 | 6cb833a1af5e3b9972f14b185cf32cfd67a14011 | /tests/test_prototypes.py | f775504a7d497cf5f30614b748b271820c9d9859 | [
"Apache-2.0"
] | permissive | claytondaley/basecrm-client | a83decd965b6e7cbcd32d33966536a84d280b028 | b2fc056770cdf3e5b18eaaefe113e422c27f6791 | refs/heads/master | 2020-12-11T06:00:40.171967 | 2015-04-24T20:17:27 | 2015-04-24T20:17:27 | 16,652,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,112 | py | #!/usr/bin/env python
"""Test the functionality of Prototypes"""
import logging
logger = logging.getLogger(__name__)
from mock import Mock
from nose.tools import assert_raises, eq_
from prototype import Resource
from tests.test_common import SAMPLES
__author__ = 'Clayton Daley III'
__copyright__ = "Copyright 2015, Clayton Daley III"
__license__ = "Apache License 2.0"
__version__ = "2.0.0"
__maintainer__ = "Clayton Daley III"
__status__ = "Development"
def getattr_attributeerror(object_, attribute):
assert_raises(AttributeError, getattr, object_, attribute)
def getattr_keyerror(object_, attribute):
assert_raises(KeyError, getattr, object_, attribute)
def getattr_typeerror(object_, attribute):
assert_raises(TypeError, getattr, object_, attribute)
def setattr_eq(object_, attribute, value):
object_.__setattr__(attribute, value)
eq_(getattr(object_, attribute), value)
def setattr_attributeerror(object_, attribute, value):
assert_raises(AttributeError, object_.__setattr__, attribute, value)
def setattr_keyerror(object_, attribute, value):
assert_raises(KeyError, object_.__setattr__, attribute, value)
def setattr_typeerror(object_, attribute, value):
assert_raises(TypeError, object_.__setattr__, attribute, value)
class PropertiesStub(Resource):
PROPERTIES = {
'_readonly': object,
'editable': object,
}
def test_resource_setattr_readonly():
"""
Readonly attributes are indicated by a leading underscore and should throw a KeyError
"""
stub = PropertiesStub()
mock = Mock()
yield setattr_keyerror, stub, 'readonly', mock
def test_resource_setattr_editable():
"""
Editable attributes do not have a leading underscore and are stored inside the 'dirty' table
"""
stub = PropertiesStub()
mock = Mock()
yield setattr_eq, stub, 'editable', mock
def test_resource_setattr_nonproperty():
"""
If an attribute is not a member of properties, an AttributeError should be generated
"""
stub = PropertiesStub()
mock = Mock()
yield setattr_attributeerror, stub, 'nonproperty', mock
def test_generator_setattr_typechecking():
"""
setattr should provide type checking based on PROPERTIES definition
"""
for type in SAMPLES:
mock = Mock(Resource)
object.__setattr__(mock, 'PROPERTIES', {'key': type})
object.__setattr__(mock, '_dirty', dict())
for t2, samples in SAMPLES.iteritems():
if not isinstance(t2, type):
for sample in samples:
yield setattr_eq, mock, 'key', sample
else:
for sample in samples:
yield setattr_typeerror, mock, 'key', sample
def is_attribute_unchanged_data(value):
mock = Mock(Resource)
object.__setattr__(mock, 'PROPERTIES', {'key': object})
object.__setattr__(mock, '_data', {'key': value})
object.__setattr__(mock, '_dirty', dict())
mock.key = value
assert 'key' not in mock._dirty
def test_generator_is_attribute_unchanged():
"""
If an attribute is unchanged, we should not store it in 'dirty'. This should use the 'is' operator to preserve
mutability.
"""
for value in [v[0] for k, v in SAMPLES.iteritems()]:
yield is_attribute_unchanged_data, value
EQ_NOT_IS = [
[1000000, 10 ** 6],
[[0, 1], [0, 1]],
[{}, {}],
[{'key': 'value'}, {'key': 'value'}]
]
def eq_attribute_changed_data(value, compare):
# sample values are equal
assert value == compare
# sample values are not the same
assert value is not compare
mock = Resource()
object.__setattr__(mock, 'PROPERTIES', {'key': object})
mock.key = compare
assert 'key' in mock._dirty
# Confirm that the key is updated
assert mock.key is compare
assert mock.key is not value
def test_generator_equal_attribute_replace():
"""
If an attribute is equal but not "is", we need to update it to preserve mutability.
"""
for values in EQ_NOT_IS:
yield eq_attribute_changed_data, values[0], values[1] | [
"clayton.daley@gmail.com"
] | clayton.daley@gmail.com |
84eab8503c1c534f656258ec0d6655c6fe03026a | 4286f55c2b9509de9f5612c30a683fd4f226ebb5 | /auth.py | 9c3e7f9495125d5ddb2729d6208db490840fa98d | [] | no_license | teledima/slack-api-lab | 19fc1d8c94b558461ea85af7ad82a31fa7e6733d | ca90d9bb12756e6e988614161056890ac9a7d667 | refs/heads/master | 2023-08-15T03:58:50.234452 | 2021-10-06T05:40:43 | 2021-10-06T05:40:43 | 407,602,541 | 0 | 0 | null | 2021-10-06T05:39:35 | 2021-09-17T16:05:38 | Python | UTF-8 | Python | false | false | 2,522 | py | from flask import Blueprint, render_template, request
from slack_sdk.oauth import AuthorizeUrlGenerator, RedirectUriPageRenderer
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from utils import set_document, encrypt_data
import config
auth_blueprint = Blueprint(name='auth_blueprint', import_name=__name__)
@auth_blueprint.route('/auth', methods=['GET'])
def auth():
user_scopes = ['channels:write', 'channels:read', 'groups:write', 'groups:read']
url_generator = AuthorizeUrlGenerator(client_id=config.client_id, user_scopes=user_scopes)
return render_template('add_to_slack.html', url=url_generator.generate(state=''))
@auth_blueprint.route('/install', methods=['GET'])
def install_app():
response = None
finish_page = RedirectUriPageRenderer(install_path='/auth', redirect_uri_path='')
# Get temporary code
code = request.args.get('code')
error = request.args.get('error')
if code:
# Exchange a temporary OAuth verifier code for an access token
try:
response = WebClient().oauth_v2_access(client_id=config.client_id,
client_secret=config.client_secret,
code=code).data
except SlackApiError as slack_error:
return finish_page.render_failure_page(reason=slack_error.response['error'])
if response:
nonce, encoded_data, tag = encrypt_data(key=config.encryption_key, data=bytes(response['authed_user']['access_token'], encoding='utf-8'))
parsed_response = dict(app_id=response['app_id'],
user=dict(id=response['authed_user']['id'],
scope=response['authed_user']['scope'],
access_token=dict(nonce=list(nonce), encoded_data=list(encoded_data), tag=list(tag)),
token_type=response['authed_user']['token_type']),
team=dict(id=response['team']['id'],
name=response['team']['name']))
set_document(db=config.db,
collection_id='authed_users',
document_id=response['authed_user']['id'],
content=parsed_response)
return finish_page.render_success_page(app_id=response['app_id'], team_id=response['team']['id'])
if error:
return finish_page.render_failure_page(reason=error)
| [
"teledima00@gmail.com"
] | teledima00@gmail.com |
388bc894a96fc3f7fbbc3681698a2bad2de91d24 | 7c04fd0eae04ee10be2cc2353eca2fe05f03aca9 | /test/test_yahoo.py | 2feacb3be2c55a85b14402bed5018364510ffe17 | [
"Apache-2.0"
] | permissive | seizetheday/chinese-stock-api | 61ef5a5eea63dc252953db007b44b253d4bef438 | 7b5e4ebf6d8b2d5c4988a4f16c58c789cd210696 | refs/heads/master | 2023-06-24T06:32:50.243329 | 2022-02-15T15:18:31 | 2022-02-15T15:18:31 | 39,428,642 | 8 | 4 | null | 2022-02-15T15:18:32 | 2015-07-21T06:38:51 | Python | UTF-8 | Python | false | false | 1,459 | py | # Copyright (c) 2015 Walt Chen
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# system library
import unittest
import datetime
# project library
from cstock.yahoo_engine import YahooEngine
class TestEngine(unittest.TestCase):
def setUp(self):
self.engine = YahooEngine()
def test_get_url(self):
url = self.engine.get_url('600010', ('2014-03-04', '2014-03-05'))
self.assertEqual(url, "http://ichart.yahoo.com/table.csv?s=600010.ss&a=2&b=4&c=2014&d=2&e=5&f=2014")
def test_parse(self):
data = ("Date,Open,High,Low,Close,Volume,Adj Close\n"
"2014-08-22,34.20,34.22,33.49,33.70,2222200,33.70\n"
"2014-08-21,33.81,34.29,33.15,34.21,3544800,34.21")
stocks = self.engine.parse(data, 'foo_id')
self.assertEqual(len(stocks), 2)
self.assertEqual(
stocks[0].as_dict(),
{'close': '33.70',
'code': 'foo_id',
'date': '2014-08-22',
'high': '34.22',
'low': '33.49',
'name': None,
'open': '34.20',
'price': None,
'time': None,
'turnover': None,
'yesterday_close': None,
'volume': '2222200'}
)
| [
"godsarmycy@gmail.com"
] | godsarmycy@gmail.com |
a6b542e7e511f724ca9bd8be618b0f99fd0e6316 | abc0abc576e374ed1ba87f87444c32a17800dcd3 | /reference.py | 58a0f9845294922571e2e2cb3d8e31620be36355 | [] | no_license | AndNieves/RW3Translator | 7181e5ca6edab69cdd49ff291495fec3e88e41db | 2fb552c5c20167d57d8a0fb7dcc0742e49f4b4c7 | refs/heads/master | 2020-03-26T08:53:22.557471 | 2018-08-14T13:31:25 | 2018-08-14T13:31:25 | 144,724,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | createUniqueString = '''
--Create Unique String {str_key}
EXEC dbo.CreateUniqueString '{str_key}'
'''
createTranslation = '''
--Create {lang} Translation
EXEC dbo.CreateTranslation '{str_key}', '{lang}', N'{lang_text}'
'''
createFileString = '''
--Associating key str_key with page {page}
EXEC dbo.CreateFileString '{str_key}','{page}'
'''
languages = ['Chinese',
'ChineseTW',
'French',
'German',
'Italian',
'Japanese',
'Korean',
'Spanish',
'English',
'Russian',
'Catalan']
from openpyxl import load_workbook
wb = load_workbook('./LRW_Translations.xlsx')
ws = wb['original']
first = True
#print ('SET QUOTED_IDENTIFIER OFF;')
for row in ws.iter_rows():
if first:
firstRow = row;
first = False;
continue;
key = row[1].value
if key is not None:
print (createUniqueString.format(str_key=key.strip()))
pages = row[2].value.split(',')
for page in pages:
print (createFileString.format(str_key=key,page=page.strip()))
for i in range(0, len(languages)):
currentIndex = i + 3
languageName = firstRow[currentIndex].value
value = row[currentIndex].value
if value is not None:
valueEscaped = value.replace('\'', '\'\'\'\'')
print(createTranslation.format(str_key=key,lang=languageName,lang_text=valueEscaped))
print('GO')
#print ('SET QUOTED_IDENTIFIER ON;') | [
"andres.nieves@proquest.com"
] | andres.nieves@proquest.com |
233b7aea0829594f9586d1fd8bf5686dee0fc188 | ad62be112183a792200acf925c8119ece0908f6c | /Models/004 RNN Coins/BMK Coins.py | 4520ebaefeb317474800540fd752bda3b4953d75 | [] | no_license | rayjk21/TestPyCharm | 24e6c108bf8f064d7ea072ba711f2a84a2725cec | aabb27a77e5630c4fe89bf127173e5b76485aa9b | refs/heads/master | 2021-04-26T22:16:01.889688 | 2018-06-05T10:13:55 | 2018-06-05T10:13:55 | 124,057,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,570 | py |
import numpy as np
import pandas as pa
import pandas as pd
import aptecoPythonUtilities.utils_explore as my_exp
import matplotlib.pyplot as plt
import MyUtils.Embeddings as my_emb
import MyUtils.utils_base as my
import MyUtils.utils_prep as my_prep
import MyUtils.utils_nn as my_nn
model_path = r"C:\Users\rkirk\Documents\GIT\Python\TestPyCharm\Models\004 RNN Coins"
############## Raw Data
def get_raw_data(data_info=None):
raw = pa.read_csv(r"D:\FastStats\PUBLISH\BMK\project\All Transactions.csv")
# type(raw['TransDate'][0])
# Slow to parse all the dates, so convert later
# raw = pa.read_csv(r"D:\FastStats\PUBLISH\BMK\project\All Transactions.csv", parse_dates=['TransDate'], dayfirst=True)
# my_exp.overview(raw)
# my_exp.detail(raw)
# Keep by volume
# my_prep.freq_hist(raw, "Product")
sub = my_prep.freq_cut(raw, "Product", 20, 1000, show=False)[["CustId", "Product", "TransDate"]]
#my_prep.freq_hist(sub, "Product")
#my_prep.freq_hist(sub, "CustId")
#my_exp.overview(sub)
#my_exp.detail(sub)
sub2 = my_prep.freq_cut(sub, "Product", 100, 1000, show=False)
#my_exp.overview(sub2)
#my_exp.detail(sub2)
# Convert dates for just the kept volumes
df = sub2.copy()
df['TransDt'] = pa.to_datetime(df['TransDate'], dayfirst=True)
df.sort_values(by=['CustId', 'TransDt'], inplace=True)
# Check dates have parsed correctly (2018-02-12 showld be 12th Feb)
#print(type(df['TransDt'].iloc[0]))
#print(df[df['CustId'] == 285158].dropna)
# Group by cust
date_lines = my_prep.toGroupsDf(df, 'CustId', 'TransDt')
return date_lines
def time_line_plot(lines):
def set_x_axis(ax, min, max):
# min = pa.Timestamp('2016-01-20')
# max = pa.Timestamp('2018-02-20')
import matplotlib.dates as mdates
if (min is None) or (max is None): return
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years...
min_year = np.datetime64(min, 'Y')
max_year = np.datetime64(max, 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(min_year, max_year)
def show_quarters(ax, min, max):
min_year = pa.Timestamp(np.datetime64(min, 'Y'))
max_year = pa.Timestamp(np.datetime64(max, 'Y') + np.timedelta64(1, 'Y'))
for q in my.daterange(min_year, max_year, quarters=2):
w = 90 # days
ax.barh(left=q, width=w, y=0, height=1, align='edge', color='grey')
def timeplot(ts, ax, show_xlabels=False, min=None, max=None):
ts = np.array(ts)
set_x_axis(ax, min, max)
show_quarters(ax, min, max)
# Draw actual lines
ax.vlines(ts, [0], [1])
if (not show_xlabels): ax.set_xticklabels([], visible=False)
ax.set_yticklabels([], visible=False)
ax.get_yaxis().set_ticks([])
def print_values(i, timeline):
print ("Timeline {}: ".format(i), end="")
ts = sorted(timeline)
for t in ts: print(t.date(), end=", ")
print("\n")
def multiple_lines(timelines, print=False):
if (type(timelines) == pa.DataFrame):
timelines = timelines.iloc[:,0]
if (type(timelines) == pa.Series):
timelines = timelines.values
num_lines = len(timelines)
all_points = np.hstack(timelines)
min = np.min(all_points)
max = np.max(all_points)
for i in range (num_lines):
ax = plt.subplot(num_lines, 1, i+1)
ts = timelines[i]
# Set labels on last plot
show_xlables = True if (i==num_lines-1) else False
timeplot(ts, ax, show_xlables, min, max)
if print: print_values(i, timelines[i])
plt.show()
multiple_lines(lines)
def get_lengths(lines, plot=False):
f = my_prep.vectorizeA(lambda line: np.shape(line))
lengths = np.squeeze(f(lines))
if plot:
print(lengths[0:10])
plt.hist(lengths)
return lengths
########## X, Y Data #################################
def dates_diff(dt_line , start):
'''
Returns multiple values for each point on the date line:
(Converts array of DateTimes to an array of integer-arrays)
- each integer array is the [difference from 'start', day of month]
:param dt_line:
:param start:
:return:
'''
def date_diff2(t): return [(t-start).days, t.day]
f = my_prep.vectorizeR(date_diff2)
diff_line = f(dt_line)
return diff_line
def time_line_gaps(dt_line):
'''
Returns the differences (gaps) between a line of dates
'''
diff = np.diff(dt_line)
f = my_prep.vectorizeA(lambda t: t.days)
diff_days = f(diff)
# Insert 0 at start
diff0 = np.insert(diff_days, 0, 0)
return diff0
def shift_line(line):
from scipy.ndimage.interpolation import shift
return shift(line, [-1], mode='constant', cval=0)
def get_xy_data(raw_data , data_info=None, n_time=10, remove_zeros=True, start=None, printing=True):
# Make start date Now if none is set
# if start is None: start = pa.Timestamp.now()
# if type(start) is str: start = pa.Timestamp(start)
if data_info is not None:
n_time = data_info.get('n_time') or n_time
remove_zeros = data_info.get('remove_zeros') or remove_zeros
print("n_time:{}".format(n_time))
print("remove_zeros:{}".format(remove_zeros))
# Find the gaps between the datelines
lines = raw_data['TransDt'].values
if printing: print("Calculating gaps between transactions")
f = my_prep.vectorizeA(lambda line: time_line_gaps(line))
lines = f(lines)
if remove_zeros:
lines = my_prep.remove_zeros(lines, printing)
# Cut up into samples if longer than length, or pad out with zeros
X = np.array(list(my_prep.samples_of_length(lines, n_time, printing)))
f = my_prep.vectorizeA(shift_line)
Y = f(X)
# Could also find the difference from start
return X, Y
def checkXY(X, Y, i=0, n=3):
for i in range(i, i+n):
print()
print(X[i])
print(Y[i])
############################################################################################
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Embedding, Input, Flatten, Dropout, TimeDistributed, BatchNormalization, Reshape, Lambda
from keras.layers import LSTM
from keras.optimizers import RMSprop, Adam, SGD
from keras import backend as K
import keras.utils
import keras
def set_shape(M, dims, label="", printing=False):
m_shape = M.shape
if (len(m_shape)==1):
m_reshape = [1, m_shape[0], 1][0:dims]
if (len(m_shape)==2):
m_reshape = [m_shape[0], m_shape[1], 1][0:dims]
if (m_shape != m_reshape):
if printing: print("Reshaping {} from {} to {}".format(label, m_shape, m_reshape))
M = M.reshape(m_reshape)
return M
def create_model_1(model_info=None, n_time=20, hidden_units=10, embedding_size=10, dropout=0.2, mask_zero=True, model_name='Temp_Model_1' ):
'''
Input has to be 2 dimensions: n_obs * n_time_stamp (with no n_features)
Output is categorical
:return:
'''
print("Creating model 1")
data_info = model_info.get('data_info')
if data_info:
n_time = data_info.get('n_time') or n_time
hidden_units = model_info.get('hidden_units') or hidden_units
embedding_size = model_info.get('embedding_size') or embedding_size
dropout = model_info.get('dropout') or dropout
mask_zero = model_info.get('mask_zero') or mask_zero
model_name = model_info.get('model_name') or model_name
model = Sequential(name=model_name)
model.add(LSTM(hidden_units, input_shape=(n_time,1), return_sequences=True, stateful=False))
model.add(Dense(1, name="Output"))
model.compile(loss=keras.losses.mse, optimizer='adam', metrics=['accuracy'])
model_info.update({'model':model})
print("Created model: {}".format(model_name))
print(model.summary())
return model
def model_fit(model_or_model_info, X, Y, epochs, batch_size=8, stateful=False, shuffle=True, save=True, x_dims=3, y_dims=3, model_path=model_path):
import time
if (type(model_or_model_info) is dict):
model_info = model_or_model_info
data_info = model_info.get('data_info')
model = model_info.get('model')
model_path = model_info.get('model_path') or model_path
model_name = model_info.get('model_name') or model.name
batch_size = model_info.get('batch_size') or batch_size
stateful = model_info.get('stateful') or stateful
else:
model = model_or_model_info
model_name = model.name
# When running as stateful, the whole training set is the single large sequence, so must not shuffle it.
# When not stateful, each item in the training set is a different individual sequence, so can shuffle these
if stateful:
shuffle = False
batch_size = 1
lbl = "Iteration"
timesteps = X.shape[1]
if (timesteps != 1):
raise ValueError("When using stateful it is assumed that each X value has a single time-step but there are {}".format(timesteps))
else:
lbl = "Epoch"
if data_info:
x_dims = data_info.get('x_dims') or x_dims
y_dims = data_info.get('x_dims') or y_dims
X = set_shape(X, x_dims,"X")
Y = set_shape(Y, y_dims,"Y")
print("Fitting model '{}' over {} epochs with batchsize {}".format(model_name,epochs,batch_size))
print("X shape: {}".format(X.shape))
print("y shape: {}".format(Y.shape))
print()
# metrics = n_hot_accuracy_metric()
precision = []
accuracy = []
loss = []
for epoch in range(epochs):
# if the shuffle argument in model.fit is set to True (which is the default),
# the training data will be randomly shuffled at each epoch
h = model.fit(X, Y, epochs=1, batch_size=batch_size, verbose=0, shuffle=shuffle
, validation_split=0.25
# ,callbacks=[metrics]
).history
# Got error on callback with dropout i
print("{} {:4d} : loss {:.04f}, accuracy {:0.4f} - {}".format(lbl, epoch, h['loss'][0], h['acc'][0], time.ctime()))
#print("{} {:4d} : loss {:.04f}, accuracy {:0.4f}, Precision {:0.4f} - {}".format(lbl, epoch, h['loss'][0], h['acc'][0], h['MyPrecision'][0], time.ctime()))
accuracy += h['acc']
loss += h['loss']
# When not stateful, state is reset automatically after each input
# When stateful, this is suppressed, so must manually reset after the epoch (effectively the one big sequence)
if stateful: model.reset_states()
if save: my_nn.model_save(model, model_path , model_name, "latest")
if not (epoch % 10):
if save: my_nn.model_save(model, model_path , model_name, epoch)
if save: my_nn.model_save(model, model_path , model_name, "final", echo=True, temp=False)
if model_info:
model_info.update({'model':model})
return precision
def predict(Xt, model_info=None, n_time=15, output='last'):
'''
:param Xt:
:param model_info:
:param n_time:
:param output: 'last' or 'full'
:return:
'''
if model_info:
model = model_info.get('model')
data_info = model_info.get('data_info')
n_time = data_info.get('n_time') or n_time
Xt = my_prep.pad(Xt, n_time)
Xt = set_shape(Xt, 3)
pred = model.predict(Xt, verbose=0)
f = np.vectorize(lambda x: round(x))
pred = my_prep.remove_zeros(f(pred), printing=False)
pred = np.squeeze(pred)
if (output=='last'):
last = my_prep.vectorizeA(lambda line: line[-1])
pred = last(pred)
return pred
def predict_rank(X, model_info, n=100):
Xt = X[0:n]
Yt = predict(Xt, model_info)
Xs = my_prep.remove_zeros(Xt, printing=False)
df = pa.DataFrame({'Timeline':list(Xs), 'Next':Yt })
df.sort_values(by='Next', inplace=True, ascending=False)
return df[df['Next']>0]
def plot_gaps(gaps):
offset = (lambda x: pa.Timestamp.now() - np.timedelta64(np.asscalar(x),'D'))
offsetV = my_prep.vectorizeA(offset)
offsetVV = my_prep.vectorizeA(offsetV)
gaps_to_points = my_prep.vectorizeA(lambda gaps: gaps[::-1].cumsum()[::-1])
points = gaps_to_points(gaps)
times = offsetVV(points)
#print(gaps)
#print(points)
#print(times)
time_line_plot(times)
#################### Using ModelInfo to specify models ##################################
raw_data = get_raw_data()
time_line_plot(raw_data[0:10])
get_lengths(raw_data['TransDt'], plot=True)
def data_with(data_settings):
data_info = {'n_time':20, 'remove_zeros':True, 'x_dims':3 }
data_info.update(data_settings)
return data_info
def model_with(model_settings):
model_info = {'create_fn': create_model_1, 'model_name':"default_model", 'model_path':model_path,
'stateful':False,
'dropout':0.0, 'hidden_units':10, 'embedding_size':50,
'loss':keras.losses.mse,
'mask_zero':True}
model_info.update(model_settings)
return model_info
def prep_data(model_or_data_info, load=False):
if (model_or_data_info.get('data_info') is None):
data_info = model_or_data_info
else:
data_info = model_or_data_info['data_info']
if load: use_data = get_raw_data(data_info)
else: use_data = raw_data
X, Y = get_xy_data(use_data, data_info=data_info)
return X,Y,raw_data
def build_model(model_info, data_only = False, load=False, prep=True, create=False, fit=True, epochs=10):
if load | prep | data_only:
X, Y, raw_data = prep_data(model_info, load)
if data_only:
return X, Y, raw_data
if create:
print("Creating new model {}".format(model_info['model_name']))
model = model_info['create_fn'](model_info)
else:
print ("Reusing existing model {}".format(model_info['model_name']))
model = model_info['model']
if fit:
model_fit(model_info, X, Y, epochs=epochs, batch_size=5)
#model.evaluate(X, Y, batch_size=5)
data_info1 = data_with({'n_time':15})
model_info1 = model_with({'create_fn' : create_model_1, 'model_name':"model1", 'data_info':data_info1, 'batch_size':50})
model_info1b = model_with({'create_fn': create_model_1, 'model_name':"model1b", 'data_info':data_info1, 'batch_size':50})
X,Y,raw_data = prep_data(model_info1)
checkXY(X, Y)
get_lengths(X, plot=True)
build_model(model_info1b, prep=True, create=True, fit=True, epochs=10)
my_nn.model_load(model_info1, suffix='final')
Xt = [
[1,2,3],
[100,100,100],
[1,2,3,100,200],
[10,20,30,40,50,60]
]
pred = predict_rank(X, model_info1, n=100)
print(pred)
low = pred['Timeline'][-20:-10].values
hi = pred['Timeline'][0:10].values
hi_low = np.hstack([hi, low])
plot_gaps(hi_low)
##############################################################################
##############################################################################
| [
"ray.kirk@apteco.com"
] | ray.kirk@apteco.com |
2ab102f6b1f0987a79727c0244b361238f7271ca | 6699aa437df9fe4fdeb9669941f70b335353a5e8 | /setup.py | e56704d9b0a4f57b56892e9850a45996042a5a21 | [
"MIT"
] | permissive | IRANKUND/Benefactor-challenge | ae203c19cad88e1df4d39f05714ccba1012519c6 | 78c364fcd9e776f50319e2de4eaf269d64a6508f | refs/heads/master | 2023-03-01T18:59:19.283668 | 2021-02-10T13:45:48 | 2021-02-10T13:45:48 | 337,422,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in benefactor_challenge/__init__.py
from benefactor_challenge import __version__ as version
setup(
name='benefactor_challenge',
version=version,
description='this app is a challenge to take in order to have clear understanding of frappe',
author='patrick',
author_email='patrick@info.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| [
"andela@andelas-MBP-4"
] | andela@andelas-MBP-4 |
401fbe5d99ca5b697cf6e4628551e0606bc494b2 | 88c3f6dd1e62da124a9718f745ced22e28491d62 | /fftEz.py | c25b9b2e6dcabd845d055ab68204f82a982161c7 | [] | no_license | zhazhajust/THzScript | df79edfb72665074ec79684be17d8f63fdabaa49 | 005c4206c870aca430ffa794bfe3a485fff2b9c6 | refs/heads/main | 2023-07-15T18:43:43.169484 | 2021-08-20T13:11:29 | 2021-08-20T13:11:29 | 398,280,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | # -- coding: utf-8 --
import sdf
import numpy as np
import constant as const
###
txtdir=const.txtdir
xt=np.load(txtdir+"xtEz.npy")
savedir = txtdir + "xfEz.npy"
###
c = 3e8
micron = 1e-6
lamada = const.lamada #10.6 * micron
gridnumber = const.Nx #2400
stop = const.stop #5889 #17000
dt_snapshot= const.dt_snapshot #9e-15
dt = dt_snapshot*1e15 #fs
x_max = const.x_max #80 * lamada #60 * lamada #micron
x_min = 0 * micron
x_end = x_max - x_min
window_start_time = (x_max - x_min) / c
delta_x = x_end/gridnumber
t_end = stop * dt_snapshot
x_interval=const.x_interval #10
t_total=1e15*x_end/c #fs
t_size=t_total/(dt_snapshot*1e15)+1 #t_grid_number
if t_end-window_start_time<0:
xgrid = int(gridnumber)
else:
xgrid = int(gridnumber + c*(t_end-window_start_time)/delta_x)
#####fft freqs
N0 = t_size
T=t_size*dt #fs #dt_snapshot*1e15 #t[x][t_size-1]-t[x][0]
fs=N0*1e3/T
rfft=np.fft.rfft(xt)
rfft=np.abs(rfft)
#f_size=rfft.shape
#xf=np.zeros((int(xgrid/x_interval)+1,f_size))
#for x in range(1,xf.shape[0]):
# xf[x] = np.fft.rfft(xt[x])/N0
#xf[0] = xf[0]/2
#xf[N0/2] = xf[N0/2]/2
# xf=np.abs(xf)
print("writed")
np.save(savedir, rfft)
print("saved")
| [
"251338258@qq.com"
] | 251338258@qq.com |
ae5ba2d5697bab3012e10ccebf8f6dc189520c79 | ae219380fea7652ce9cf33888250cc61594375a5 | /calculator.py | 54ed16e484a451387dd6dd23d2f5e8ff1934c946 | [] | no_license | SauravKanchan/workshop | 8b192e3c1af2420e61f329261f3d9e1ad698365e | 59edef37c97cccccad23aa990d7510299e6f59de | refs/heads/master | 2020-07-25T18:57:07.976262 | 2019-09-15T09:56:23 | 2019-09-15T09:56:23 | 208,393,221 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | choice = -1
'''
1 is addition
2 is subtraction
3 is division
0
'''
'''
Task:
Ensure that b is not 0
if user enters b as 0
then tell him that it is not possible.
'''
while choice != 0:
a=int(input("Enter your first number"))
b=int(input("Enter your second number"))
choice = int(input("Enter your choice"))
if choice == 1:
print(a+b)
elif choice==2:
print(a-b)
elif choice == 3:
try:
print(a/b)
except ZeroDivisionError:
print("b cannot be 0")
elif choice == 0:
break
else:
print("Invalid option")
# Bonus code
# print(eval(input()))
| [
"sauravnk30@gmail.com"
] | sauravnk30@gmail.com |
d0b24ef29387157deca60a94fb044d19327cb3f1 | 4c53b8cbdf9e900dd4adbedd0e2167890e373aa3 | /tomato_control/tomato_control_three_version_two/tomato_controlPy.py | 48948991be206b9c9a763b083732a9fdf84dac22 | [] | no_license | AdrianSalcedo/Python_Code | 08667ca57bf179fcccb51b46117264e19485e815 | c3c10765aa8725552b5dac875f82585365d6711a | refs/heads/master | 2020-07-10T10:04:11.307987 | 2019-11-25T17:42:12 | 2019-11-25T17:42:12 | 204,237,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | from forward_backward_sweep import ForwardBackwardSweep
from matplotlib import rcParams
# rcParams['font.family'] = 'sans-serif'
# rcParams['font.sans-serif'] = ['Tahoma']
params = {
'figure.titlesize': 10,
'axes.titlesize': 10,
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True
}
rcParams.update(params)
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib as mpl
import numpy as np
#
#
#
beta = 0.01
a = 0.1
b = 0.075
psi = 0.003
gamma = 0.06
theta = 0.2
mu = 0.3
#
#
# Initial conditions
s_p_zero = 0.9992
l_p_zero = 0.0
i_p_zero = 0.0008
s_v_zero = 0.84
i_v_zero = 0.16
# Functional Cost
A_1 = .5
A_2 = 0.3
A_3 = 0.0
c_1 = 0.1
c_2 = 0.1
c_3 = 0.1
name_file_1 = 'figure_1_sir_log.eps'
name_file_2 = 'figure_2_sir_log.eps'
name_file_3 = 'figure_3_sir_log.eps'
#
fbsm = ForwardBackwardSweep()
fbsm.set_parameters(beta, a, b, psi, gamma, theta, mu,
A_1, A_2, A_3, c_1, c_2, c_3,
s_p_zero, l_p_zero, i_p_zero, s_v_zero, i_v_zero)
t = fbsm.t
x_wc = fbsm.runge_kutta_forward(fbsm.u)
#
[x, lambda_, u] = fbsm.forward_backward_sweep()
mpl.style.use('ggplot')
# plt.ion()
# n_whole = fbsm.n_whole
ax1 = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax1.plot(t, x_wc[:, 2],
label="Without control",
color='darkgreen'
)
ax1.plot(t, x[:, 2],
label="Optimal controlled",
color='orange')
ax1.set_ylabel(r'Infected plants ratio $I_p$')
ax1.set_xlabel(r'Time (days)')
ax1.legend(loc=0)
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax2.plot(t, u[:, 0],
label="$u_1(t)$ : Remove latent plant",
color='orange')
ax2.plot(t, u[:, 1],
label="$u_2(t)$ : Remove infected plant",
color='darkgreen')
ax2.plot(t, u[:, 2],
label="$u_3(t)$ : insecticide",
color='darkred')
ax2.set_ylabel(r'Controls')
ax2.legend(loc=0)
#
ax3 = plt.subplot2grid((2, 2), (1, 1))
ax3.plot(t, x_wc[:, 1],
label="Without control",
color='darkgreen'
)
ax3.plot(t, x[:, 1],
label="Optimal controlled",
color='orange')
ax3.set_ylabel(r'$L_p(t)$')
plt.tight_layout()
#
fig = mpl.pyplot.gcf()
fig.set_size_inches(5.5, 5.5 / 1.618)
fig.savefig(name_file_1,
# additional_artists=art,
bbox_inches="tight")
| [
"noreply@github.com"
] | AdrianSalcedo.noreply@github.com |
34757da4411d0823effcccd1a88b96cea9eb401a | 1ef536d93c6616f9793e57a9ebc6b44248d50202 | /Unit of Measure/models/res_partner.py | 6d1e384ef6259b088b57ea48681fdce6824c20e1 | [] | no_license | mohamed4185/Express | 157f21f8eba2b76042f4dbe09e4071e4411342ac | 604aa39a68bfb41165549d605d40a27b9251d742 | refs/heads/master | 2022-04-12T17:04:05.407820 | 2020-03-09T14:02:17 | 2020-03-09T14:02:17 | 246,014,712 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
from odoo import api, fields ,models
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit="uom.category"
measure_type = fields.Selection([
('unit', 'Units'),
('weight', 'Weight'),
('time', 'Time'),
('length', 'Length'),
('volume', 'Volume'),
], string="نوع وحدة القياس")
| [
"mohamed.abdelrahman@businessborderlines.com"
] | mohamed.abdelrahman@businessborderlines.com |
401502d8d8b40bd00eb29a1d486ea9d4465a8eb3 | a0cc82bef6e39617f88d5b64ad960593615bfa59 | /wsgi.py | ed8a8c9bc5854819e56be7b2f9007832c9f63486 | [] | no_license | RevengeComing/RatSnake | 624b2563234daf83f62667160483bde5eb7e56a0 | e8fbc3b58c2c1fb9e029da979baa51b5fae38a85 | refs/heads/master | 2021-01-11T17:43:46.816389 | 2018-03-05T16:04:24 | 2018-03-05T16:04:24 | 79,827,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from ratsnake.app import create_app
app = create_app()
if __name__ == "__main__":
app.run(debug=True) | [
"s.hamzelooy@gmail.com"
] | s.hamzelooy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.