hexsha
stringlengths
40
40
size
int64
3
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
972
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
972
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
972
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
3
1.03M
avg_line_length
float64
1.13
941k
max_line_length
int64
2
941k
alphanum_fraction
float64
0
1
3b8068efc0f900211a91c373f17a6e5f967edb31
2,210
py
Python
jobs/models.py
Qlwentt/qually
550efa326532d6fbb154f9d244905e957f30f3ea
[ "MIT" ]
1
2017-06-20T23:18:15.000Z
2017-06-20T23:18:15.000Z
jobs/models.py
Qlwentt/qually
550efa326532d6fbb154f9d244905e957f30f3ea
[ "MIT" ]
8
2017-02-05T05:51:48.000Z
2017-08-27T15:44:05.000Z
jobs/models.py
Qlwentt/qually
550efa326532d6fbb154f9d244905e957f30f3ea
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from django.db import models from django.contrib.auth.models import User from django.db.models.signals import post_save from django.dispatch import receiver import uuid # Create your models here. class Keyword(models.Model): name = models.CharField(max_length=255) category = models.CharField(max_length=255, blank=True) def __unicode__(self): return unicode(self.name) class Resume(models.Model): name = models.CharField(max_length=255) text = models.TextField() def __unicode__(self): return unicode(self.name) class CachedJob(models.Model): keywords = models.ManyToManyField(Keyword, blank=True) key = models.CharField(max_length=255) title = models.CharField(max_length=255) url = models.TextField() snippet = models.TextField() content = models.TextField() exp_req = models.IntegerField(default=0) def __unicode__(self): return unicode(self.title) class SavedJob(models.Model): cached_job = models.ForeignKey(CachedJob, on_delete=models.CASCADE, default=1) key = models.CharField(max_length=255) date = models.CharField(max_length=255) company = models.CharField(max_length=255) location = models.CharField(max_length=255) score = models.IntegerField(blank=True, null=True) def __unicode__(self): return unicode(self.cached_job.title) # Create your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) jobs = models.ManyToManyField(SavedJob, blank=True) # last_name = models.CharField(max_length=100) # first_name = models.CharField(max_length=100) yrs_exp = models.IntegerField(blank=True, null=True) job_title = models.CharField(max_length=100, blank=True) city = models.CharField(max_length=255, blank=True) state = models.CharField(max_length=100, blank=True) resume = models.TextField(blank=True) def __unicode__(self): return self.user.username @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: Profile.objects.create(user=instance) @receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): instance.profile.save()
30.273973
79
0.762443
1b32cd00cd9ea96807bc71b44d71ea25fb6dd75c
2,856
py
Python
projects/cats/tests/abstraction_check.py
suvix/cs61a-fa19
53a0ecf29d13fc13113894255b6422eba0f82832
[ "MIT" ]
null
null
null
projects/cats/tests/abstraction_check.py
suvix/cs61a-fa19
53a0ecf29d13fc13113894255b6422eba0f82832
[ "MIT" ]
null
null
null
projects/cats/tests/abstraction_check.py
suvix/cs61a-fa19
53a0ecf29d13fc13113894255b6422eba0f82832
[ "MIT" ]
null
null
null
"""Infrastructure for detecting abstraction barrier violations.""" class AbstractionViolation(Exception): pass def datatype(obj): return type(obj).__name__ # Generic abstract data type class Abstract: def __add__(self, other): raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other))) def __radd__(self, other): raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other))) def __eq__(self, other): if isinstance(other, type(self)): return other is self raise AbstractionViolation("Can't use == on {} object and {}".format(datatype(self), datatype(other))) def __ne__(self, other): if isinstance(other, type(self)): return other is not self raise AbstractionViolation("Can't use != on {} object and {}".format(datatype(self), datatype(other))) def __bool__(self): raise AbstractionViolation("Can't use {} object as a boolean".format(datatype(self))) def __getitem__(self, index): raise AbstractionViolation("Can't use [] notation on {} object".format(datatype(self))) def __contains__(self, other): raise AbstractionViolation("Can't use contains notation on {} object".format(datatype(self))) def __delitem__(self, other): raise AbstractionViolation("Can't use del notation on {} object".format(datatype(self))) def __iter__(self): raise AbstractionViolation("Can't iterate on {} object".format(datatype(self))) def __len__(self): raise AbstractionViolation("Can't use len notation on {} object".format(datatype(self))) def __setitem__(self, key, item): raise AbstractionViolation("Can't use setitem notation on {} object".format(datatype(self))) def __call__(self, *args, **kwargs): raise AbstractionViolation("Can't call {} object".format(datatype(self))) def __hash__(self): return id(self) class Match(Abstract): def __init__(self, words, times_per_player): self.a, self.b = words, times_per_player def __repr__(self): return '<Match {} {}>'.format(self.a, self.b) match = Match word_at = lambda u, v: u.a[v] get_words = lambda u: u.a get_times = lambda u: u.b time = lambda u, v, w: u.b[v][w] old = {} def swap_implementations(impl): # save other implementations old['match'] = impl.match, impl.word_at, impl.get_words, impl.get_times, impl.time # save our implementations new_match = match, word_at, get_words, get_times, time # replace impl's implementations with ours impl.match, impl.word_at, impl.get_words, impl.get_times, impl.time = match, word_at, get_words, get_times, time def restore_implementations(impl): impl.match, impl.word_at, impl.get_words, impl.get_times, impl.time = old['match']
36.151899
116
0.681373
c47a4c8095a3955c4c76893762d1fcea4a571378
130
py
Python
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/training_loop.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
1
2020-06-21T13:45:26.000Z
2020-06-21T13:45:26.000Z
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/training_loop.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
null
null
null
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/training_loop.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
3
2020-09-02T03:18:45.000Z
2021-01-27T08:24:05.000Z
version https://git-lfs.github.com/spec/v1 oid sha256:4d3b31063f0346a76cd3b26d891549ec589c4b0651904fce336e2c7131960cb9 size 15522
32.5
75
0.884615
f08ca577199b5b75ab8087cc511e2b0a602d3808
1,316
py
Python
tests/util/test_streamable.py
nondejus/chia-blockchain
67373400e7f88adff0c86e3bae2ddeadb49429ae
[ "Apache-2.0" ]
null
null
null
tests/util/test_streamable.py
nondejus/chia-blockchain
67373400e7f88adff0c86e3bae2ddeadb49429ae
[ "Apache-2.0" ]
null
null
null
tests/util/test_streamable.py
nondejus/chia-blockchain
67373400e7f88adff0c86e3bae2ddeadb49429ae
[ "Apache-2.0" ]
null
null
null
import unittest from dataclasses import dataclass from typing import List, Optional from src.util.ints import uint32 from src.util.streamable import Streamable, streamable class TestStreamable(unittest.TestCase): def test_basic(self): @dataclass(frozen=True) @streamable class TestClass(Streamable): a: uint32 b: uint32 c: List[uint32] d: List[List[uint32]] e: Optional[uint32] f: Optional[uint32] a = TestClass(24, 352, [1, 2, 4], [[1, 2, 3], [3, 4]], 728, None) # type: ignore b: bytes = bytes(a) assert a == TestClass.from_bytes(b) def test_variablesize(self): @dataclass(frozen=True) @streamable class TestClass2(Streamable): a: uint32 b: uint32 c: bytes a = TestClass2(uint32(1), uint32(2), b"3") try: bytes(a) assert False except NotImplementedError: pass @dataclass(frozen=True) @streamable class TestClass3(Streamable): a: int b = TestClass3(1) try: bytes(b) assert False except NotImplementedError: pass if __name__ == "__main__": unittest.main()
23.5
89
0.549392
8c5d1dfa43232513f54a92596fc7f5cd93e3f237
69
py
Python
TextOnScreen/__init__.py
RonNofar/TextOnScreen
8101882cd43c264b8500a6dadf516ac7ea455a52
[ "MIT" ]
null
null
null
TextOnScreen/__init__.py
RonNofar/TextOnScreen
8101882cd43c264b8500a6dadf516ac7ea455a52
[ "MIT" ]
null
null
null
TextOnScreen/__init__.py
RonNofar/TextOnScreen
8101882cd43c264b8500a6dadf516ac7ea455a52
[ "MIT" ]
null
null
null
''' __init__.py ''' from TextOnScreen import tos __all__ = ["tos"]
8.625
28
0.652174
b9f4520c5ef94e8bdab68ce736bcf9b3971b145f
2,374
py
Python
eq_max.py
ArsalanSahab/EA_Equ_Max_Python
74840127c9fc525c5f5e72a18ab17422027aba3f
[ "MIT" ]
null
null
null
eq_max.py
ArsalanSahab/EA_Equ_Max_Python
74840127c9fc525c5f5e72a18ab17422027aba3f
[ "MIT" ]
null
null
null
eq_max.py
ArsalanSahab/EA_Equ_Max_Python
74840127c9fc525c5f5e72a18ab17422027aba3f
[ "MIT" ]
null
null
null
####### IMPORTS ######## import random import genetic_algorithm as ga ''' Problem : Maxmise Output of given equation Given Equation : x^2 + y^2 Techniques : Evolutionary Algortihm > Genetic Algorithm Sub-Techniques Required : 1. Parent Selection > Binary Tournament 2. Crossover 3. Mutation 4. Survivor Selection > Truncation Extra Info : 1. Produce 4 Off Springs 2. CrossOver = x(Parent 1), y(Parent 2) 3. Mutation Rate = 50% 4. 20 Generations Initial Generation : Chromosome 1 = {1,2} Chromosome 2 = {-2,3} Chromosome 3 = {4,-1} Chromosome 4 = {5,2} Chromosome 5 = {-3,3} ''' def function_init(chromosome): return ((chromosome[0]*chromosome[0]) + (chromosome[1]*chromosome[1])) def ga_driver(population): new_generation = [] for i in range(20): for j in range(3): # Get 4 Random numbers index_1 = random.randint(0,3) index_2 = random.randint(0,3) index_3 = random.randint(0,3) index_4 = random.randint(0,3) #print("Index 1 = " + str(index_1)) # Perform Binary Tournament parent_1 = ga.binary_tournament(population[index_1],population[index_3]) parent_2 = ga.binary_tournament(population[index_4],population[index_2]) # Generate OffSprings offspring_1 = ga.crossover(parent_1,parent_2) offspring_2 = ga.crossover(parent_2,parent_1) new_generation.append(offspring_1) new_generation.append(offspring_2) population = new_generation ga.truncate(population) print("Current Generation Statistics : " ) print("Generation = " + str((i+1))) print("Elements : " ,population) print("Highest Fitness Value = " + str(ga.fitness(ga.function,population[0]))) def main(): ###### INITIALISATIONS ####### population = [[1,2],[-2,3],[4,-1],[5,2],[-3,3]] #function = lambda x,y : (x)**2 + (y)**2 population.sort(key=function_init,reverse=True) print("Current Generation Statistics : " ) print("Generation = 0") print("Elements : " ,population) print("Highest Fitness Value = " + str(ga.fitness(ga.function,population[0]))) ga_driver(population) if __name__ == "__main__": main()
18.261538
86
0.598989
af7a9561369062f4bc3b3cc7feed9860838ab850
3,481
py
Python
bindings/python/ensmallen/datasets/string/marinobactersalarius.py
AnacletoLAB/ensmallen_graph
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
5
2021-02-17T00:44:45.000Z
2021-08-09T16:41:47.000Z
bindings/python/ensmallen/datasets/string/marinobactersalarius.py
AnacletoLAB/ensmallen_graph
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
18
2021-01-07T16:47:39.000Z
2021-08-12T21:51:32.000Z
bindings/python/ensmallen/datasets/string/marinobactersalarius.py
AnacletoLAB/ensmallen
b2c1b18fb1e5801712852bcc239f239e03076f09
[ "MIT" ]
3
2021-01-14T02:20:59.000Z
2021-08-04T19:09:52.000Z
""" This file offers the methods to automatically retrieve the graph Marinobacter salarius. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def MarinobacterSalarius( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Marinobacter salarius graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Marinobacter salarius graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="MarinobacterSalarius", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
33.152381
223
0.678541
82e8bb9aa03890801807fed0468aea44423aa72e
148
py
Python
server/src/tests/samples/import8.py
jhutchings1/pyright
2b8593a58a2aecc95dac49cce92fc16678cd4e14
[ "MIT" ]
1
2020-12-28T16:58:24.000Z
2020-12-28T16:58:24.000Z
server/src/tests/samples/import8.py
jhutchings1/pyright
2b8593a58a2aecc95dac49cce92fc16678cd4e14
[ "MIT" ]
3
2022-03-03T03:03:24.000Z
2022-03-25T14:43:54.000Z
server/src/tests/samples/import8.py
jhutchings1/pyright
2b8593a58a2aecc95dac49cce92fc16678cd4e14
[ "MIT" ]
null
null
null
# This sample is imported by import9.py. # Implement __getattr__ function as described in PEP 562. def __getattr__(name: str): return None
14.8
57
0.736486
237d9c1214d079a549f7def5d35cb43b3a7f2735
372
py
Python
Mycroft/mircetete-indication-skill/__init__.py
wyutong1997/SmartMedicineDispenser
5033b355f4a1fec688a1bd5dd6987611afb8feaa
[ "MIT" ]
5
2020-03-22T22:49:24.000Z
2020-04-08T14:56:23.000Z
Mycroft/mircetete-indication-skill/__init__.py
mcboyd-bu/EC544-Smart-Pill-Dispenser
f1267c5ba2b11c6e0c61ccf3d050dc63cff48091
[ "MIT" ]
1
2021-09-12T21:01:09.000Z
2021-09-12T21:01:09.000Z
Mycroft/mircetete-indication-skill/__init__.py
wyutong1997/SmartMedicineDispenser
5033b355f4a1fec688a1bd5dd6987611afb8feaa
[ "MIT" ]
2
2020-03-28T16:30:23.000Z
2020-04-14T00:03:46.000Z
from mycroft import MycroftSkill, intent_file_handler class MirceteteIndication(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('indication.mircetete.intent') def handle_indication_mircetete(self, message): self.speak_dialog('indication.mircetete') def create_skill(): return MirceteteIndication()
23.25
55
0.760753
3d6c45137feab53cec3191a63356f10877e1dead
169,383
py
Python
languages/prs.py
PeterDaveHello/eden
26174a9dde2f19cd3bc879694f373ad5f765b6ed
[ "MIT" ]
1
2015-01-24T04:31:51.000Z
2015-01-24T04:31:51.000Z
languages/prs.py
PeterDaveHello/eden
26174a9dde2f19cd3bc879694f373ad5f765b6ed
[ "MIT" ]
null
null
null
languages/prs.py
PeterDaveHello/eden
26174a9dde2f19cd3bc879694f373ad5f765b6ed
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- { '# of International Staff': 'کارمندان بین المللی #', '# of National Staff': 'کارمندان ملی #', '# selected': 'انتخاب شده #', '%(app)s not installed. Ask the Server Administrator to install on Server.': '.نصب نشدند. از مدیر سرور تقاضا نمایید تا در سرور نصب کند %(app)s', '%(count)s Roles of the user removed': 'نقش استعمال کننده ها حذف شد %(count)s', '%(count)s Users removed from Role': 'استعمال کننده ها از نقش حذف شدند %(count)s', '%(label)s contains %(values)s': '%(values)s دارای %(label)s هستند', '%(label)s contains any of %(values)s': '%(values)s دارای هر یک از %(label)s هستند', '%(label)s does not contain %(values)s': '%(values)s دارای %(label)s نیستند', '%(label)s is %(values)s': '%(values)s هستند %(label)s', '%(label)s like %(values)s': '%(values)s مشابه با %(label)s', '%(label)s not like %(values)s': '%(values)s باهای غیر مشابه %(label)s', '%(module)s not installed': 'نصب نشده اند %(module)s', '%(pe)s in %(location)s': '%(location)s در %(pe)s', '%(proj4js)s definition': 'تعریف %(proj4js)s', '%(resource)s Filter': ' فلتر %(resource)s', '%(site_label)s Status': '%(site_label)s جایگاه', '%(site_label)s Status added': 'ها اضافه شد %(site_label)s جایگاه', '%(site_label)s Status deleted': 'ها حذف شد %(site_label)s جایگاه', '%(site_label)s Status updated': 'ها تجدید شد %(site_label)s جایگاه', '%(system_name)s - New User Registered': 'استعمال کننده جدید ثبت شد - %(system_name)s', '%(system_name)s - New User Registration Approval Pending': 'ثبت استعمال کننده جدید در حالت تعلیق - %(system_name)s', '%(system_name)s has sent an email to %(email)s to verify your email address.\\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '.برای اینکه آدرس ایمیل شما را تثبیت نماید %(email)s ها یک ایمیل فرستاده است %(system_name)s\\nلطفاً آدرس ایمیل تان را بررسی نمایید تا این آدرس را تثبیت نمایید. اگر این ایمیل را دریافت نه نمودید لطفاً سپم یا ایمیل های اضافی تان را بررسی نمایید.', '%s and %s': '%s و %s', '%s or %s': '%s یا %s', '& then click on the map below to adjust the Lat/Lon fields': 'و بعداً بالای نقشه کلیک نمایید تا زمینه Lat/Lon را تغییر دهید', '(filtered from _MAX_ total entries)': '(فلتر شده از _MAX_ مجموع معلومات داخل شده)', '* Required Fields': 'زمینه های لازم *', '...or add a new bin': 'یا یک bin جدید را اضافه نمایید...', '1 location, shorter time, can contain multiple Tasks': 'موقعیت، وقت کمتر، شامل چندین فعالیت بوده می تواند‎ ١', '1. Fill the necessary fields in BLOCK CAPITAL letters.': 'جاهای ضروری را توسط حروف کلان خانه پری نمایید ‎.1', '2. Always use one box per letter and leave one box space to separate words.': 'همیشه برای هر حرف از یک باکس و برای فاصله دادن هر لغت یک باکس را خالی رها کنید ‎.2', '3. Fill in the circles completely.': 'دوایر را کاملاً خانه پری نمایید ‎.3', '3W Report': '3W گزارش', 'A brief description of the group (optional)': 'تشریحات مختصری از گروپ (اختیاری)', 'A file in GPX format taken from a GPS.': 'یک فایل دریافت شده از GPX که توسط GPS گرفته شده است', "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "یک موقعیت که منطقه جغرافیایی را برای این ساحه مشخص می کند. این یک موقعیت از سلسله موقعیت ها یا 'موقعیت گروپ' یا موقعیت است که دارای سرحد با منطقه می باشد.", 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'یک نشانگر تطبیق شده در یک موقعیت فردی تعیین می شود، اگر لغو نمودن نشانگر تطبیق شده به طبقه قابلیت لازم باشد.', 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'یک مرحله مهم پروژه یک تاریخ مشخص را در جنتری نشان می دهد که جریان آن به طرف هدف کلی باشد.', 'A strict location hierarchy cannot have gaps.': 'یک سلسله موقعیت محکم دارای خلاء بوده نمی تواند', 'A task is a piece of work that an individual or team can do in 1-2 days.': 'یک وظیفه عبارت از مقدار کاری است که یک شخص یا تیم می تواند آن را در مدت 1-2 روز انجام دهد', "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": 'یک داوطلب فعال گفته می شود اگر حد اوسط در برنامه های کاری یا تربیوی 8 ساعته یا بیشتر از 8 ساعته در یک ماه در سال گذشته اشتراک کرده باشد.', 'Abbreviation': 'اختصار', 'About': 'درمورد', 'About Us': 'در مورد ما', 'Academic': 'اکادمیک', 'Access denied': 'امکان دسترسی وجود ندارد', 'Account Registered - Please Check Your Email': 'حساب ثبت شد. لطفاً ایمیل خود را چک کنید.', 'Acronym': 'سر نام', "Acronym of the organization's name, eg. IFRC.": 'سر نام اسم موسسه برای مثال: IFRC', 'ACTION REQUIRED': 'ضرورت به فعالیت', 'Activate': 'فعال کردن', 'activate to sort column ascending': 'فعال نمایید تا ستون را به شکل صعودی ترتیب نمایید', 'activate to sort column descending': 'فعال نمایید تا ستون را به شکل نزولی ترتیب نمایید', 'Active': 'فعال', 'Active Missions': 'عملیات فعال', 'Active?': 'فعال؟', 'Activities': 'فعالیت ها', 'Activities matching Assessments': 'فعالیت های مطابق با ارزیابی ', 'Activity': 'فعالیت', 'Activity Added': 'فعالیت اضافه گردید', 'Activity Deleted': 'فعالیت حذف گردید', 'Activity Details': 'جزئیات فعالیت', 'Activity Organization': 'سازمان فعالیت', 'Activity Organization Added': 'سازمان فعالیت اضافه گردید', 'Activity Organization Deleted': 'سازمان فعالیت حذف گردید', 'Activity Organization Updated': 'سازمان فعالیت تجدید گردید', 'Activity Organizations': 'سازمان های فعالیت', 'Activity Report': 'گزارش فعالیت', 'Activity Type': 'نوع فعالیت', 'Activity Type Added': 'نوع فعالیت اضافه گردید', 'Activity Type added to Activity': 'نوع فعالیت در فعالیت اضافه گردید', 'Activity Type added to Project Location': 'نوع فعالیت در موقعیت پروژه اضافه گردید', 'Activity Type Deleted': 'نوع فعالیت حذف گردید', 'Activity Type removed from Activity': 'نوع فعالیت از فعالیت حذف گردید', 'Activity Type removed from Project Location': 'نوع فعالیت از موقعیت پروژه حذف گردید', 'Activity Type Updated': 'نوع فعالیت تجدید گردید', 'Activity Types': 'انواع فعالیت', 'Activity Updated': 'فعالیت تجدید گردید', 'Add': 'اضافه نمودن', 'Add %(site_label)s Status': '%(site_label)s وضعیت نمودن وضعیت', 'Add a new certificate to the catalog.': 'یک تصدیق نامه جدید را به فهرست اضافه نمایید', 'Add a new competency rating to the catalog.': 'میزان شایسته گی جدید را به فهرست اضافه نمایید', 'Add a new membership type to the catalog.': 'یک نوع عضویت جدید را به فهرست اضافه نمایید', 'Add a new program to the catalog.': 'یک برنامه جدید را به فهرست اضافه نمایید', 'Add a new skill type to the catalog.': 'یک نوع مهارت جدید را به فهرست اضافه نمایید', 'Add a Person': 'شخص را اضافه نمایید', 'Add Activity Type': 'نوع فعالیت جدید', 'Add Activity Type to Activity': 'نوع فعالیت را در فعالیت اضافه کنید', 'Add Activity Type to Project Location': 'نوع فعالیت را در موقعیت پروژه اضافه کنید', 'Add Address': 'آدرس را اضافه نمایید', 'Add Affiliation': 'ارتباط را اضافه نمایید', 'Add Award': 'جایزه را اضافه نمایید', 'Add all organizations which are involved in different roles in this project': 'تمام موسسه هایی که به نقش های مختلف درین پروژه شامل بوده اند را اضافه نمایید', 'Add Annual Budget': 'بودجه سالیانه جدید', 'Add Appraisal': 'ارزیابی را اضافه نمایید', 'Add Beneficiaries': 'بهره برداران را اضافه نمایید', 'Add Branch Organization': 'نماینده گی موسسه را اضافه نمایید', 'Add Certificate for Course': 'تصدیق نامه دوره را اضافه نمایید', 'Add Certification': 'تصدیق را اضافه نمایید', 'Add Contact': 'تماس را اضافه نمایید', 'Add Contact Information': 'معلومات تماس را اضافه نمایید', 'Add Credential': 'اعتبار نامه را اضافه نمایید', 'Add Data to Theme Layer': 'معلومات را در بخش موضوع اضافه نمایید', 'Add Deployment': 'گسترش را اضافه نمایید', 'Add Education Detail': 'جزئیات آموزشی را اضافه نمایید', 'Add Group Member': 'اعضای گروپ را اضافه نمایید', 'Add Hazard to Project': 'خطر به پروژه را اضافه نمایید', 'Add Hours': 'ساعات را اضافه نمایید', 'Add Identity': 'هوویت را اضافه نمایید', 'Add Image': 'عکس را اضافه نمایید', 'Add Keyword': 'کلمه کلیدی را اضافه نمایید', 'Add Layer from Catalog': 'لایه را از فهرست اضافه نمایید', 'Add Layer to this Profile': 'لایه را درین پروفایل اضافه نمایید', 'Add Line': 'سطر را اضافه نمایید', 'Add Location to Organization': 'موقعیت را به موسسه اضافه نمایید', 'Add Log Entry': 'ورودی را اضافه نمایید', 'Add Member': 'عضو را اضافه نمایید', 'Add Membership': 'عضویت را اضافه نمایید', 'Add New Address': 'آدرس جدید را اضافه نمایید', 'Add New Affiliation': 'پیوسته گی جدید را اضافه نمایید', 'Add New Appraisal': 'تعیین قیمت جدید را اضافه نمایید', 'Add New Award': 'جایزه جدید را اضافه نمایید', 'Add New Beneficiaries': 'فایده گیرنده جدید را اضافه نمایید', 'Add New Beneficiary Type': 'نوع فایده گیرنده جدید را اضافه نمایید', 'Add New Branch': 'نماینده گی جدید را اضافه نمایید', 'Add New Branch Organization': 'نماینده گی جدید موسسه را اضافه نمایید', 'Add New Campaign': 'کمپاین جدید را اضافه نمایید', 'Add New Certificate': 'تصدیق نامه جدید را اضافه نمایید', 'Add New Certification': 'تصدیق جدید را اضافه نمایید', 'Add New Cluster': 'گروه جدید را اضافه نمایید', 'Add New Coalition': 'وابسته گی جدید را اضافه نمایید', 'Add New Community': 'انجمن جدید را اضافه نمایید', 'Add New Competency Rating': 'میزان شایسته گی جدید را اضافه نمایید', 'Add New Contact': 'تماس جدید را اضافه نمایید', 'Add New Course': 'دوره جدید را اضافه نمایید', 'Add New Course Certificate': 'تصدیق نامه جدید دوره را اضافه نمایید', 'Add New Credential': 'اعتبار نامه جدید را اضافه نمایید', 'Add New Data to Theme Layer': 'معلومات جدید را درزمینه لایه اضافه نمایید', 'Add New Department': 'اداره جدید را اضافه نمایید', 'Add New Deployment': 'گسترش جدید را اضافه نمایید', 'Add New Donor': 'کمک کننده جدید را اضافه نمایید', 'Add New Entry': 'ورودی جدید را اضافه نمایید', 'Add New Facility': 'تسهیلات جدید را اضافه نمایید', 'Add New Facility Type': 'نوع تسهیلات جدید را اضافه نمایید', 'Add New Feature Layer': 'مشخصات جدید لایه را اضافه نمایید', 'Add New Group': 'گروپ جدید را اضافه نمایید', 'Add New Hazard': 'خطر جدید را اضافه نمایید', 'Add New Hours': 'ساعات جدید را اضافه نمایید', 'Add New Identity': 'هوویت جدید را اضافه نمایید', 'Add New Image': 'عکس جدید را اضافه نمایید', 'Add New Job Title': 'عنوان وظیفه جدید را اضافه نمایید', 'Add New Keyword': 'کلمه کلیدی جدید را اضافه نمایید', 'Add New Layer': 'لایه جدید را اضافه نمایید', 'Add New Layer to Symbology': 'لایه جدید را در سمبول شناسی اضافه نمایید', 'Add New Location': 'موقعیت جدید را اضافه نمایید', 'Add New Location Hierarchy': 'سلسله مراتب موقعیت جدید را ضافه نمایید', 'Add New Log Entry': 'ورودی جدید را اضافه نمایید', 'Add New Mailing List': 'لست ایمیل جدید را اضافه نمایید', 'Add New Map Profile': 'تنظیمات جدید نقشه را اضافه نمایید', 'Add New Marker': 'علامت گذار جدید را اضافه نمایید', 'Add New Member': 'عضو جدید را اضافه نمایید', 'Add New Membership': 'عضویت جدید را اضافه نمایید', 'Add New Milestone': 'مرحله مهم جدید را اضافه نمایید', 'Add New Network': 'شبکه جدید را اضافه نمایید', 'Add New Office': 'اداره جدید را اضافه نمایید', 'Add New Office Type': 'نوع اداره جدید را اضافه نمایید', 'Add New Organization': 'موسسه جدید را اضافه نمایید', 'Add New Organization Type': 'نوع موسسه جدید را اضافه نمایید', 'Add New Output': 'خروجی جدید را اضافه نمایید', 'Add New Participant': 'شریک جدید را اضافه نمایید', "Add New Person's Details": 'مشخصات شخص جدید را اضافه نمایید', 'Add New PoI Type': 'نوع POL جدید را اضافه نمایید', 'Add New Point of Interest': 'نقطه مورد نظر جدید را اضافه نمایید', 'Add New Policy or Strategy': 'استراتیژی یا پالیسی جدید را اضافه نمایید', 'Add New Professional Experience': 'تجربه مسلکی جدید را اضافه نمایید', 'Add New Profile Configuration': 'تنظیمات پروفایل جدید را اضافه نمایید', 'Add New Program': 'برنامه جدید را اضافه نمایید', 'Add New Project': 'پروژه جدید را اضافه نمایید', 'Add New Projection': 'طرح جدید را اضافه نمایید', 'Add New Record': 'ثبت جدید را اضافه نمایید', 'Add New Region': 'منطقه جدید را اضافه نمایید', 'Add New Resource': 'منبع جدید را اضافه نمایید', 'Add New Response Summary': 'خلاصه پاسخ جدید را اضافه نمایید', 'Add New Role': 'نقش جدید را اضافه نمایید', 'Add New Room': 'اتاق جدید را اضافه نمایید', 'Add New Sector': 'بخش جدید را اضافه نمایید', 'Add New Service': 'خدمت جدید را اضافه نمایید', 'Add New Skill': 'مهارت جدید را اضافه نمایید', 'Add New Skill Equivalence': 'تعادل مهارت جدید را اضافه نمایید', 'Add New Skill Type': 'نوعیت مهارت را اضافه نمایید', 'Add New Staff Assignment': 'ماموریت کارمند جدید را اضافه نمایید', 'Add New Staff Member': 'عضو کارمند جدید را اضافه نمایید', 'Add New Status': 'حالات جدید را اضافه نمایید', 'Add New Symbology': 'سمبول شناسی جدید را اضافه نمایید', 'Add New Symbology for Layer': 'سمبول شناسی جدید را برای لایه اضافه نمایید', 'Add New Task': 'وظیفه جدید را اضافه نمایید', 'Add New Team': 'تیم جدید را اضافه نمایید', 'Add New Team Member': 'عضو جدید تیم را اضافه نمایید', 'Add New Theme': 'موضوع جدید را اضافه نمایید', 'Add New Training': 'آموزش جدید را اضافه نمایید', 'Add New Training Event': 'رویداد آموزشی جدید را اضافه نمایید', 'Add New Volunteer': 'داوطلب جدید را اضافه نمایید', 'Add New Volunteer Cluster': 'گروه داوطلب جدید را اضافه نمایید', 'Add New Volunteer Cluster Position': 'منصب گروه داوطلب جدید را اضافه نمایید', 'Add New Volunteer Cluster Type': 'نوع گروه داوطلب جدید را اضافه نمایید', 'Add New Volunteer Role': 'نقش داوطلب جدید را اضافه نمایید', 'Add Office': 'اداره را اضافه نمایید', 'Add Organization': 'موسسه را اضافه نمایید', 'Add Organization to Activity': 'سازمان فعالیت را اضافه کنید', 'Add Organization to Project': 'پروژه موسسه را اضافه نمایید', 'Add Participant': 'شریک را اضافه نمایید', 'Add Person': 'شخص را اضافه نمایید', "Add Person's Details": 'مشخصات شخص را اضافه نمایید', 'Add PoI Type': 'نوع POL را اضافه نمایید', 'Add Point': 'نقطه را اضافه نمایید', 'Add Point of Interest': 'نقطه مورد نظر را اضافه نمایید', 'Add Policy or Strategy': 'استراتیژی یا پالیسی را اضافه نمایید', 'Add Polygon': 'چند ضلعی را اضافه نمایید', 'Add Professional Experience': 'تجربه مسلکی را اضافه نمایید', 'Add Profile Configuration': 'تنظیمات پروفایل را اضافه نمایید ', 'Add Profile Configuration for this Layer': 'تنظیمات پروفایل را برای این لایه اضافه نمایید', 'Add Project': 'پروژه را اضافه نمایید', 'Add Role': 'نقش را اضافه نمایید', 'Add Room': 'اتاق را اضافه نمایید', 'Add saved search': 'جستجوی ذخیره شده را اضافه نمایید', 'Add search': 'جستجو را اضافه نمایید', 'Add Sector': 'بخش را اضافه نمایید', 'Add Sector to Organization': 'بخش را به موسسه اضافه نمایید', 'Add Sector to Project': 'بخش را به پروژه اضافه نمایید', 'Add Sector to Theme': 'بخش را به موضوع اضافه نمایید', 'Add Service': 'خدمت را اضافه نمایید', 'Add Service to Organization': 'خدمت را به موسسه اضافه نمایید', 'Add Skill': 'مهارت را اضافه نمایید', 'Add Skill Equivalence': 'تعادل مهارت را اضافه نمایید', 'Add Skill Type': 'نوع مهارت را اضافه نمایید', 'Add Staff Assignment': 'ماموریت کارمند را اضافه نمایید', 'Add Staff Member to Project': 'عضو کارمند را به پروژه اضافه نمایید', 'Add Status': 'حالات را اضافه نمایید', 'Add Symbology': 'سمبول شناسی را اضافه نمایید', 'Add Symbology for Layer': 'سمبول شناسی را به لایه اضافه نمایید', 'Add Task': 'وظیفه را اضافه نمایید', 'Add Team': 'تیم را اضافه نمایید', 'Add Team Member': 'عضو تیم را اضافه نمایید', 'Add Theme': 'موضوع را اضافه نمایید', 'Add Theme to Activity': 'موضوع را به فعالیت اضافه نمایید', 'Add Theme to Project': 'موضوع را به پروژه اضافه نمایید', 'Add Theme to Project Location': 'موضوع را به موقعیت پروژه اضافه نمایید', 'Add this entry': 'این ورودی را اضافه نمایید', 'Add to a Team': 'به یک تیم اضافه نمایید', 'Add Training': 'آموزش را اضافه نمایید', 'Add Volunteer to Project': 'داوطلب را به پروژه اضافه نمایید', 'Add...': 'اضافه کنید...', 'Address': 'آدرس', 'Address added': 'آدرس اضافه گردید', 'Address deleted': 'آدرس حذف گردید', 'Address Details': 'جزئیات آدرس', 'Address Mapped': 'آدرس نقشه برداری شد', 'Address NOT Mapped': 'آدرس نقشه برداری نشد', "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'آدرس یک تصویر برای استفاده این لایه در رهنمای نقشه. این استفاده تصویر کنترول شده ثابت را بجای پرسیدن از سرور طور اتومات (که دیگر از طریق GeoWebCache کار نخواهد کرد) اجازه می دهد', 'Address Type': 'نوع آدرس', 'Address updated': 'آدرس تجدید شد', 'Addresses': 'آدرس ها', 'Adjust Stock Levels': 'درجات موجودی را تنظیم کنید', 'Admin': 'معاون', 'Admin Assistant': 'نائب معاون', 'Administrador Database': 'دیتابیس مدیر', 'Adolescent (12-20)': 'بالغ (12-20)', 'Adult (21-50)': 'بالغ (21-50)', 'Advanced Search': 'جستجوی پیشرفته', 'Advocacy': 'وکالت', 'Affiliation added': 'پیوسته گی اضافه گردید', 'Affiliation deleted': 'پیوسته گی حذف گردید', 'Affiliation Details': 'جزئیات پیوسته گی', 'Affiliation updated': 'پیوسته گی تمدید گردید', 'Affiliations': 'پیوسته گی ها', 'Age': 'سن', 'Age Group': 'گروپ سنی', 'Airport': 'میدان هوایی', 'Alerts': 'هشدار ها', 'All': 'تمام', 'All Entities': 'تمام موجودی ها', 'All Open Tasks': 'تمام وظایف را باز کنید', 'All Records': 'تمام ثبت ها', 'All selected': 'همه انتخاب گردید', 'All Tasks': 'تمام وظایف', 'Amount': 'مقدار', 'Amount of the Project Budget spent at this location': 'مقداری از بودجه پروژه درین موقعیت مصرف گردید', 'An error occured, please %(reload)s the page.': 'یک خطا رخ داده است، لطفا %(reload)s این صفحه را.', 'An ESRI Shapefile (zipped)': 'یک فایل شکلی ظیپ شده ESRI', 'an individual/team to do in 1-2 days': 'یک تیم/مفرد برای اجرای 2-1 روز', 'and': 'و ', 'Annual Budget': 'بودجه سالیانه', 'Annual Budget deleted': 'بودجه سالیانه حذف گردید', 'Annual Budget updated': 'بودجه سالیانه تمدید گردید', 'Annual Budgets': 'بودجه های سالیانه', 'Anonymous': 'بی نام', 'anonymous user': 'استفاده کننده بی نام', 'ANY': 'هر', 'Any': 'هر', 'Appeal Code': 'درخواست کود', 'Applicable to projects in Pacific countries only': 'فقط قابل اجرا در پروژه های کشور های صلح جو', 'Application': 'درخواستی', 'Application Permissions': 'اجازه های درخواستی', 'Appraisal added': 'ارزیابی اضافه گردید', 'Appraisal deleted': 'ارزیابی حذف گردید', 'Appraisal Details': 'جزئیات ارزیابی', 'Appraisal updated': 'ارزیابی تمدید گردید', 'Appraisals': 'ارزیابی ها', 'Approve': 'تصویب کردن', 'Approver': 'تصویب کننده', 'ArcGIS REST Layer': 'ArcGIS لایه باقیمانده', 'Are you sure you want to delete this record?': 'آیا شما مطمئن هستید که می خواهید این ثبت را حذف نمایید؟', 'Assessment and Community/Beneficiary Identification': 'ارزیابی و انجمن/هوویت فایده گیرنده', 'Assessments': 'ارزیابی', 'Asset': 'سرمایه', 'Assets': 'سرمایه ها', 'Assign %(staff)s': '%(staff)s تقرر', 'Assign another Role': 'تعیین یک نقش دیگر', 'Assign Asset': 'تقرر سرمایه', 'Assign Role to a User': 'تقرر یک نقش برای یک استفاده کننده', 'Assign Staff': 'تقرر کارمند', 'Assigned': 'تعیین گردید', 'Assigned To': 'تعیین گردید به', 'Assigned to': 'تعیین گردید به', 'Association': 'انجمن', 'Attachments': 'ضمیمه ها', 'Attributes': 'خواص', 'Attribution': 'تخصیص', 'Australian Dollars': 'دالر استرالیایی', 'Authentication Required': 'به سند ضرورت است', 'Auxiliary Role': 'نقش کمکی', 'Availability': 'موجودیت', 'Available Forms': 'اشکال موجود', 'Available in Viewer?': 'موجو در نمایشگر؟', 'average': 'اوسط', 'Average Rating': 'نرخ گذاری متوسط', 'Award': 'جایزه', 'Award added': 'جایزه اضافه گردید', 'Award deleted': 'جایزه حذف گردید', 'Award updated': 'جایزه تجدید گردید', 'Awards': 'جوایز', 'Awareness Raising': 'بلند بردن سطح اطلاعات', 'Back to Roles List': 'دوباره به لست نقش ها', 'Back to Top': 'دوباره به طرف بالا', 'Back to Users List': 'دوباره به طرف لست استفاده کننده گان', 'Background Color': 'رنگ زمینه', 'Bahai': 'بهایی', 'Baldness': 'کل (بی موی)', 'Base Layer?': 'لایه مرکزی؟', 'Base Layers': 'سطوح مرکزی', 'Base Location': 'موقعیت مرکز', 'Basic Details': 'مشخصات ابتدایی', 'Basic Search': 'جستجوی ابتدایی', 'Bdrt (Branch Disaster Response Teams)': 'Bdrt ( شاخه تیم های مقابله با حادثات)', 'Behaviour Change Communication': 'رفتار ارتباطات را تغییر می آورد', 'Beneficiaries': 'فایده کننده گان', 'Beneficiaries Added': 'فایده کننده گان اضافه گردید', 'Beneficiaries Deleted': 'فایده کننده گان حذف گردید', 'Beneficiaries Details': 'مشخصات فایده کننده گان', 'Beneficiaries Updated': 'فایده کننده گان تجدید گردید', 'Beneficiary Report': 'گزارش فایده گیرنده', 'Beneficiary Type': 'نوع فایده گیرنده', 'Beneficiary Type Added': 'نوع فایده گیرنده اضافه گردید', 'Beneficiary Type Deleted': 'نوع فایده گیرنده حذف گردید', 'Beneficiary Type Updated': 'نوع فایده گیرنده تجدید گردید', 'Beneficiary Types': 'نوع فایده گیرنده', 'Better Programming Initiative Guidance': 'برنامه ریزی بهتر هدایت ابتکار', 'Bilateral': 'دوجانبه', 'Bing Layer': 'لایه بینگ', 'black': 'سیاه', 'Blocked': 'مسدود گردید', 'blond': 'سفید', 'Blood Banking': 'بانک خون', 'Blood Donor Recruitment': 'جمع آوری کمک های خون', 'Blood Type (AB0)': 'نوع خون (AB0)', 'blue': 'آبی', 'Body': 'بدن', 'Body Hair': 'موی بدن', 'Boq and Cost Estimation': 'برآورد مصارف', 'Both': 'هردو', 'Branch': 'شاخه (نماینده گی)', 'Branch Coordinator': 'هماهنگ کننده نماینده گی', 'Branch Organization added': 'نماینده گی موسسه اضافه گردید', 'Branch Organization deleted': 'نماینده گی موسسه حذف گردید', 'Branch Organization Details': 'مشخصات نماینده گی موسسه', 'Branch Organization updated': 'نماینده گی موسسه تجدید گردید', 'Branch Organizations': 'نماینده گی موسسه ها', 'Branch Planning': 'پلانگذاری نماینده گی ها', 'Branches': 'نماینده گی ها', 'Breakdown': 'شکست', 'brown': 'نصواری', 'Buddhist': 'بودایی', 'Budget': 'بودجه', 'Buffer': 'واسطه', 'Building Name': 'اسم ساختمان', 'by': 'توسط', 'by %(person)s': '%(person)s توسط', 'By selecting this you agree that we may contact you.': 'با انتخاب کردن این شما موافقت می کنید که شاید ما با شما به تماس شویم.', 'Calendar': 'جنتری (تقویم)', 'Camp': 'اردوگاه', 'Campaign': 'کمپاین', 'Campaign Added': 'کمپاین اضافه گردید', 'Campaign Deleted': 'کمپاین حذف گردید', 'Campaign Message': 'پیام کمپاین', 'Campaign Updated': 'کمپاین تجدید گردید', 'Campaigns': 'کمپاین ها', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'می تواند Pol ها را از فایل OSM (باز کردن نقش سرک) را بخواند', 'Canadian Dollars': 'دالر کانادایی', 'Cancel': 'لغو کردن', 'Cancel Crop': 'لغو کردن قطعه ', 'Cancel editing': 'لغو کردن تصحیح', 'Canceled': 'لغو گردید', 'cannot be deleted.': 'حذف شده نمی تواند', 'Cannot make an Organization a branch of itself!': 'یک موسسه نمی تواند نماینده گی خود را بسازد', 'Cannot open created OSM file!': 'نمی تواند فایل OSM ایجاد شده را باز کند', 'Cannot read from file: %(filename)s': '%(filename)s :نمی تواند از روی فایل بخواند', 'Capacity Building': 'ظرفیت ساختمان', 'Capacity Building of Governance': 'ظرفیت ساختمان نظارت', 'Capacity Building of Management Staff': 'ظرفیت ساختمان کارمندان مدیریت', 'Capacity Building of Staff': 'ظرفیت ساختمان کارمندان', 'Capacity Building of Volunteers': 'ظرفیت ساختمان داوطلبان', 'Catalogs': 'فهرست ها', 'Catchment Protection': 'حفاظت حوزه آبریز', 'caucasoid': 'ککاسویید', 'CDRT (Community Disaster Response Teams)': 'CDRT (تیم واکنش به حادثات انجمن)', 'Cell Tower': 'برج حجره', 'Certificate': 'تصدیق نامه', 'Certificate added': 'تصدیق نامه اضافه گردید', 'Certificate Catalog': 'فهرست تصدیق نامه', 'Certificate deleted': 'تصدیق نامه حذف گردید', 'Certificate Details': 'مشخصات تصدیق نامه', 'Certificate updated': 'تصدیق نامه تجدید گردید', 'Certificates': 'تصدیق نامه ها', 'Certification added': 'تصدیق اضافه گردید', 'Certification deleted': 'تصدیق حذف گردید', 'Certification Details': 'مشخصات تصدیق', 'Certification updated': 'تصدیق تجدید گردید', 'Certifications': 'تصدیق ها', 'Certifying Organization': 'تصدیق موسسه', 'Chairman': 'رئیس', 'Change Password': 'تغییر رمز عبور', 'Chapter': 'فصل', 'Check all': 'انتخاب همه', 'Check this to make your search viewable by others.': 'این اختیار را انتخاب کنید تا جستجوی خود را با دیگران قابل دید کنید', 'Check-In': 'بررسی ورودی', 'Check-Out': 'بررسی خروجی', 'Child (2-11)': 'طفل (2-11)', "Children's Education": 'تحصیلات اطفال', 'Choose Country': 'کشور را انتخاب نمایید', 'Christian': 'مسیحی', 'Clean-Up Campaign': 'کمپاین را پاک نمایید', 'Cleaner': 'پاک کننده', 'Clear': 'پاک', 'clear': 'پاک', 'Clear All': 'همه را پاک نمایید', 'Clear filter': 'فیلتر را پاک نمایید', 'Clear selection': 'انتخاب را پاک نمایید', 'Click anywhere on the map for full functionality': 'یک دکمه را بالای نقشه برای کارکرد کامل فشار دهید', 'click here': 'اینجا را کلیک نمایید', 'Click on the link': 'لینک را انتخاب نمایید', 'Click on the slider to choose a value': 'برای انتخاب یک قیمت بالای سلایدر کلیک نمایید', 'Click to edit': 'برای تصحیح نمودن کلیک نمایید', 'Click where you want to open Streetview': 'سرک که می خواهید ببینید را کلیک نمایید', 'Climate Change Adaptation ': 'توافق با تغییر اقلیم', 'Climate Change Mitigation': 'کاهش تغییر اقلیم', 'Close': 'بستن', 'Close map': 'بستن نقشه', 'Closed': 'بسته شد', 'Club 25 / Pledge 25': 'انجمن 25/ وثیقه 25', 'Cluster': 'گروه', 'Cluster added': 'گروه اضافه گردید', 'Cluster Attribute': 'خواص گروه', 'Cluster deleted': 'گروه حذف گردید', 'Cluster Details': 'مشخصات گروه', 'Cluster Distance': 'فاصله گروه', 'Cluster Threshold': 'سرحد گروه', 'Cluster updated': 'گروه تجدید گردید', 'Clusters': 'گروه ها', 'Coalition added': 'پیوسته گی اضافه گردید', 'Coalition Details': 'مشخصات پیوسته گی', 'Coalition removed': 'پیوسته گی حذف گردید', 'Coalition updated': 'پیوسته گی تجدید گردید', 'Coalitions': 'پیوسته گی ها', 'Coastal Conservation ': 'مکالمات حاشیوی', 'Code': 'کود', 'Comment': 'توضیح', 'Comments': 'توضیحات', 'Commitments': 'تعهدات', 'Communication': 'ارتباطات', 'Communication Officer': 'مامور ارتباطات', 'Communities': 'اجتماعات', 'Community': 'اجتماع ', 'Community Action Planning': 'پلانگذاری فعالیت اجتماع', 'Community Added': 'اجتماع اضافه گردید', 'Community Based Health and First Aid (CBHFA)': 'کمک های اولیه و صحت اجتماع (cbhfa)', 'Community Contacts': 'تماس های اجتماع', 'Community Deleted': 'اجتماع حذف گردید', 'Community Details': 'مشخصات اجتماع', 'Community Disaster Awareness': 'اطلاع حادثه اجتماع', 'Community Early Warning Systems': 'سیستم اخطار به موقع اجتماع', 'Community Health Committees': 'هیئت صحت اجتماع', 'Community Health Initiative/Projects': 'ابتکار/پروژه های صحت اجتماع', 'Community Health Risk Assessments': 'ارزیابی خطرات صحی اجتماع', 'Community Mobilisation': 'حرکت دادن اجتماع', 'Community Mobilization': 'حرکت دادن اجتماع', 'Community Organisation': 'موسسه اجتماع', 'Community Organization': 'موسسه اجتماع', 'Community Preparedness': 'آماده گی اجتماع', 'Community Updated': 'اجتماع تجدید گردید', 'Company': 'کمپنی', 'Competency': 'شایسته گی', 'Competency Rating': 'رتبه بندی شایسته گی', 'Competency Rating added': 'رتبه بندی شایسته گی اضافه گردید', 'Competency Rating Catalog': 'فهرست رتبه بندی شایسته گی', 'Competency Rating deleted': 'رتبه بندی شایسته گی حذف گردید', 'Competency Rating Details': 'مشخصات رتبه بندی شایسته گی', 'Competency Rating updated': 'رتبه بندی شایسته گی تجدید گردید', 'Completed': 'تکمیل گردید', 'Complexion': 'رنگ', 'Compromised': 'توافق گردید', 'Config not found!': 'تنظیمات پیدا نشد', 'Configuration': 'تنظیمات', 'Configure Layer for this Symbology': 'لایه تنظیمات برای این سمبول شناسی', 'Confirmed': 'تایید گردید', 'Confirming Organization': 'موسسه تایید کننده', 'Construction Activities': 'فعالیت های ساختمانی', 'Construction of Water Supply Systems': 'ساخت سیستم تامین آب', 'Contact': 'تماس', 'Contact added': 'تماس اضافه گردید', 'Contact Added': 'تماس اضافه گردید', 'Contact Data': 'معلومات تماس', 'Contact deleted': 'تماس حذف گردید', 'Contact Deleted': 'تماس حذف گردید', 'Contact Details': 'مشخصات تماس', 'Contact Details updated': 'مشخصات تماس تجدید گردید', 'Contact Info': 'معلومات تماس', 'Contact Information': 'معلومات تماس', 'Contact Information Added': 'معلومات تماس اضافه گردید', 'Contact Information Deleted': 'معلومات تماس حذف گردید', 'Contact Information Updated': 'معلومات تماس تجدید گردید', 'Contact Method': 'شیوه تماس', 'Contact People': 'تماس با مردم', 'Contact Person': 'تماس با شخص', 'Contact Updated': 'تماس تجدید گردید', 'Contact Us': 'تماس با ما', 'Contact us': 'تماس با ما', 'Contacts': 'تماس ها', 'Context': 'مفهوم', 'Contingency/Preparedness Planning': 'واقعه/ پلانگذاری آماده گی', 'Contract End Date': 'تاریخ ختم قرارداد', 'Contractual Agreements (Community/Individual)': 'معاهدهای قراردادی (اجتماع/انفرادی)', 'Contractual Agreements (Governmental)': 'معاهده های قراردادی(دولتی)', 'Controller': 'کنترول کننده', 'Cook Islands': 'جزایر آشپزی', 'Coordinate Layer': 'لایه هماهنگی', 'Coordination and Partnerships': 'هماهنگی و مشارکت', 'Coordinator': 'هماهنگ کننده', 'COPY': 'کاپی', 'Corporate Entity': 'موجودیت سازمان', 'Could not add person record': 'ضوابط شخص اضافه نگردید', 'Could not create record.': 'ضوابط ایجاد نگردید', 'Could not generate report': 'گزارش ایجاد نگردید', 'Could not merge records. (Internal Error: %s)': '(%s :خطای داخلی) ضوابط ترکیب نگردید', "Couldn't open %s!": '%s باز کرده نتوانست', 'Country': 'کشور', 'Country Code': 'کود کشور', 'Country is required!': 'به کشور ضرورت است', 'Course': 'دوره', 'Course added': 'دوره اضافه گردید', 'Course Catalog': 'فهرست دوره', 'Course Certificate added': 'تصدیق نامه دوره اضافه گردید', 'Course Certificate deleted': 'تصدیق نامه دوره حذف گردید', 'Course Certificate Details': 'مشخصات تصدیق نامه دوره', 'Course Certificate updated': 'تصدیق نامه دوره تجدید گردید', 'Course Certificates': 'تصدیق نامه های دوره', 'Course deleted': 'دوره حذف گردید', 'Course Details': 'مشخصات دوره', 'Course updated': 'دوره تجدید گردید', 'CREATE': 'ایجاد', 'Create': 'ایجاد', "Create 'More Info'": '"معلومات بیشتر" ایجاد کنید', 'Create a new facility or ensure that you have permissions for an existing facility.': 'یک تسهیلات جدید ایجاد نمایید یا اطمینان حاصل نمایید که برای تسهیلات موجود اجازه دارید.', 'Create a new Group.': 'یک گروپ جدید ایجاد نمایید', 'Create a new organization or ensure that you have permissions for an existing organization.': 'یک موسسه جدید ایجاد نمایید یا اطمینان حاصل نمایید که برای موسسه موجود اجازه دارید.', 'Create a new Team.': 'یک تیم جدید ایجاد نمایید', 'Create Activity': 'فعالیت را اضافه کنید', 'Create Activity Type': 'نوع فعالیت را اضافه کنید', 'Create Award': 'جایزه را اضافه نمایید', 'Create Beneficiary Type': 'نوع بهره بردار را اضافه نمایید', 'Create Campaign': 'کمپاین را اضافه نمایید', 'Create Certificate': 'تصدیق نامه را اضافه نمایید', 'Create Cluster': 'گروه را اضافه نمایید', 'Create Coalition': 'پیوسته گی را اضافه نمایید', 'Create Community': 'انجمن را اضافه نمایید', 'Create Competency Rating': 'میزان شایسته گی را اضافه نمایید', 'Create Contact': 'تماس را اضافه نمایید', 'Create Course': 'دوره را اضافه نمایید', 'Create Department': 'اداره را اضافه نمایید', 'Create Facility': 'تسهیلات را اضافه نمایید', 'Create Facility Type': 'نوع تسهیلات را اضافه نمایید', 'Create Feature Layer': 'مشخصه لایه را اضافه نمایید', 'Create Group': 'گروپ ایجاد نمایید', 'Create Hazard': 'خطر را اضافه نمایید', 'Create Job': 'وظیفه را اضافه نمایید', 'Create Job Title': 'عنوان وظیفه را اضافه نمایید', 'Create Layer': 'لایه را اضافه نمایید', 'Create Location': 'موقعیت را اضافه نمایید', 'Create Location Hierarchy': 'سلسله مراتب موقعیت را اضافه نمایید', 'Create Mailing List': 'لست ایمیل ها را اضافه نمایید', 'Create Map Profile': 'تنظیمات نقشه را اضافه نمایید', 'Create Marker': 'علامت گذار را اضافه نمایید', 'Create Member': 'عضو را اضافه نمایید', 'Create Membership Type': 'نوعیت عضویت را اضافه نمایید', 'Create Milestone': 'مرحله مهم را اضافه نمایید', 'Create National Society': 'جامعه ملی را اضافه نمایید', 'Create Network': 'شبکه را اضافه نمایید', 'Create Office Type': 'نوع اداره را اضافه نمایید', 'Create Organization Type': 'نوع موسسه را اضافه نمایید', 'Create Partner Organization': 'همکار موسسه را اضافه نمایید', 'Create Program': 'برنامه را اضافه نمایید', 'Create Project': 'پروژه را اضافه نمایید', 'Create Projection': 'تصویر را اضافه نمایید', 'Create Record': 'ثبت را اضافه نمایید', 'Create Region': 'منطقه را اضافه نمایید', 'Create Resource': 'منبع را اضافه نمایید', 'Create Resource Type': 'نوع منبع را اضافه نمایید', 'Create Response Summary': 'خلاصه پاسخ را اضافه نمایید', 'Create Role': 'نقش جدید ایجاد نمایید', 'Create Sector': 'بخش را اضافه نمایید', 'Create Staff Member': 'عضو کارمند را اضافه نمایید', 'Create Team': 'تیم ایجاد نمایید', 'Create Training Event': 'رویداد آموزشی را اضافه نمایید', 'Create User': 'استفاده کننده جدید ایجاد نمایید', 'Create Volunteer': 'داوطلب را اضافه نمایید', 'Create Volunteer Cluster': 'گروه داوطلب را اضافه نمایید', 'Create Volunteer Cluster Position': 'منصب گروه داوطلب را اضافه نمایید', 'Create Volunteer Cluster Type': 'نوع گروه داوطلب را اضافه نمایید', 'Create Volunteer Role': 'نقش داوطلب را اضافه نمایید', 'created': 'ایجاد گردید', 'Created By': 'ایجاد گردید توسط', 'Created on %s': 'ایجاد گردید بالای %s', 'Created on %s by %s': 'ایجاد گردید بالای %s توسط %s', 'Credential': 'اعتبار نامه', 'Credential added': 'اعتبار نامه اضافه گردید', 'Credential deleted': 'اعتبار نامه حذف گردید', 'Credential Details': 'مشخصات اعتبار نامه', 'Credential updated': 'اعتبار نامه تجدید گردید', 'Credentialling Organization': 'اعتبار نامه موسسه', 'Credentials': 'اعتبار نامه ها', 'Crop Image': 'قطع کردن عکس', 'curly': 'حلقه حلقه', 'Currency': 'پول رایج', 'Current': 'فعلی', 'current': 'فعلی', 'Current Home Address': 'آدرس خانه فعلی', 'Current Location': 'موقعیت فعلی', 'Currently no Appraisals entered': 'در حال حاضر هیچ ارزیابی وارد نشده است', 'Currently no Certifications registered': 'در حال حاضر هیچ تصدیق ثبت نشده است', 'Currently no Course Certificates registered': 'در حال حاضر هیچ تصدیق دوره ثبت نشده است', 'Currently no Credentials registered': 'در حال حاضر هیچ اعتبار نامه ثبت نشده است', 'Currently no entries in the catalog': 'در حال حاضر هیچ ورودی در فهرست نیست', 'Currently no hours recorded for this volunteer': 'در حال حاضر هیچ ساعت برای این داوطلب ثبت نشد', 'Currently no Participants registered': 'در حال حاضر هیچ شریک اضافه نشده است', 'Currently no Professional Experience entered': 'در حال حاضر هیچ تجربه مسلکی وارد نشده است', 'Currently no programs registered': 'در حال حاضر هیچ برنامه ثبت نشده است', 'Currently no Skill Equivalences registered': 'در حال حاضر هیچ تعادل مهارت ثبت نشده است', 'Currently no Skills registered': 'در حال حاضر هیچ مهارت ثبت نشده است', 'Currently no staff assigned': 'در حال حاضر هیچ کارمند تعیین نشده است', 'Currently no training events registered': 'در حال حاضر هیچ رویداد آموزشی ثبت نشده است', 'Currently no Trainings registered': 'در حال حاضر هیچ آموزشی ثبت نشده است', 'CV': 'خلص سوانح', 'Daily': 'روزمره', 'Daily Work': 'کار روزمره', 'dark': 'تاریک', 'Data': 'معلومات', 'Data added to Theme Layer': 'معلومات به لایه زمینه اضافه گردید', 'Data import error': 'خطا در ورود معلومات', 'Data Type': 'نوع معلومات', 'Data uploaded': 'معلومات اپلود گردید', 'Database': 'دیتابیس', 'Database Development': 'ایجاد دیتابیس', 'Date': 'تاریخ', 'Date Created': 'تاریخ ایجاد گردید', 'Date Due': 'تاریخ پرداخت', 'Date Joined': 'تاریخ وصل گردید', 'Date Modified': 'تاریخ اصلاح گردید', 'Date must be %(max)s or earlier!': 'باید یا زود تر باشد %(max)s تاریخ', 'Date must be %(min)s or later!': 'س یا دیرتر باشد %(min)s تاریخ باید', 'Date must be between %(min)s and %(max)s!': 'تاریخ باید میان %(min)s و %(max)s باشد', 'Date of Birth': 'تاریخ تولد', 'Date Printed': 'تاریخ چاپ گردید', 'Date Received': 'تاریخ دریافت گردید', 'Date resigned': 'تاریخ ثبت گردید', 'Date/Time': 'تاریخ/زمان', 'Day': 'روز', 'De-duplicate': 'المثنی', 'De-duplicate Records': 'المثنی ضوابط', 'Dead Body': 'جسد', 'deceased': 'مرده', 'Deceased': 'مرگ', 'Decision': 'تصمیم', 'Default': 'پیش فرض', 'Default Base layer?': 'لایه مرکزی پیش فرض؟', 'Default Location': 'موقعیت پیش فرض؟', 'Default Marker': 'علامت گذار پیش فرض', 'Default Realm': 'پیش فرض قلمرو', 'Default Realm = All Entities the User is a Staff Member of': 'پیش فرض قلمرو = تمام معلومات داخل شده کارمندان عضو', 'Default?': 'پیش فرض؟', 'Defines the icon used for display of features on handheld GPS.': 'آیکن را توضیح می کند که برای نمایش مشخصه ها بالای GPS دستی استفاده می شود', 'Defines the icon used for display of features on interactive map & KML exports.': 'آیکن را توضیح می کند که برای نمایش مشخصه ها بالای نقشه متقابل و صدور KML استفاده می شود', 'Degrees in a latitude must be between -90 to 90.': 'درجه ها در یک عرض جغرافیایی باید میان -90 الی 90 باشد', 'Degrees in a longitude must be between -180 to 180.': 'درجه ها در یک طول جغرافیایی باید میان -180 الی 180 باشد', 'Degrees must be a number.': 'درجات باید یک شماره باشد', 'DELETE': 'حذف کردن', 'Delete': 'حذف کردن', 'Delete Affiliation': 'حذف کردن پیوسته گی', 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'حذف کردن تمام این نوع که استفاده کننده اجازه اپلود آن را داشت. این برای جریان کار طراحی شده است تا معلومات در صفحه خارج از شبکه نگهداری شود و فقط برای خواندن اپلود می شود.', 'Delete Appraisal': 'حذف کردن ارزیابی', 'Delete Award': 'حذف کردن جایزه', 'Delete Branch': 'حذف کردن نماینده گی', 'Delete Certificate': 'حذف کردن تصدیق نامه', 'Delete Certification': 'حذف کردن تصدیق', 'Delete Cluster': 'حذف کردن گروه', 'Delete Competency Rating': 'حذف کردن رتبه بندی شایسته گی', 'Delete Contact': 'حذف کردن تماس', 'Delete Contact Information': 'حذف کردن معلومات تماس', 'Delete Course': 'حذف کردن دوره', 'Delete Course Certificate': 'حذف کردن تصدیق نامه دوره', 'Delete Credential': 'حذف کردن اعتبار نامه', 'Delete Data from Theme layer': 'حذف کردن معلومات از لایه زمینه', 'Delete Department': 'حذف کردن اداره', 'Delete Deployment': 'حذف کردن گسترش', 'Delete Donor': 'حذف کردن کمک کننده', 'Delete Facility': 'حذف کردن تسهیلات', 'Delete Facility Type': 'حذف کردن نوع تسهیلات', 'Delete Feature Layer': 'حذف کردن لایه مشخصه', 'Delete Group': 'حذف کردن گروپ', 'Delete Hazard': 'حذف کردن خطر', 'Delete Hours': 'حذف کردن ساعات', 'Delete Image': 'حذف کردن عکس', 'Delete Job Title': 'حذف کردن عنوان کار', 'Delete Layer': 'حذف کردن لایه', 'Delete Location': 'حذف کردن موقعیت', 'Delete Location Hierarchy': 'حذف کردن موقعیت سلسله مراتب', 'Delete Mailing List': 'حذف کردن لست ایمیل ', 'Delete Map Profile': 'حذف کردن تنظیمات نقشه', 'Delete Marker': 'حذف کردن علامت گذار', 'Delete Member': 'حذف کردن عضو', 'Delete Membership': 'حذف کردن عضویت', 'Delete Membership Type': 'حذف کردن نوع عضویت', 'Delete National Society': 'حذف کردن اجتماع ملی', 'Delete Office': 'حذف کردن دفتر', 'Delete Office Type': 'حذف کردن نوع دفتر', 'Delete Organization': 'حذف کردن موسسه', 'Delete Organization Type': 'حذف کردن نوع موسسه', 'Delete Participant': 'حذف کردن شریک', 'Delete Partner Organization': 'حذف کردن شریک موسسه', 'Delete Person': 'حذف کردن شخص', 'Delete PoI Type': 'حذف کردن نوع Pol', 'Delete Point of Interest': 'حذف کردن نقطه مرود نظر', 'Delete Professional Experience': 'حذف کردن تجربه مسلکی', 'Delete Program': 'حذف کردن برنامه', 'Delete Project': 'حذف کردن پروژه', 'Delete Projection': 'حذف کردن تصویر', 'Delete Record': 'حذف کردن ضوابط', 'Delete Region': 'حذف کردن ساحه', 'Delete Resource': 'حذف کردن منبع', 'Delete Resource Type': 'حذف کردن نوع منبع', 'Delete Role': 'حذف کردن نقش', 'Delete Room': 'حذف کردن اتاق', 'Delete saved search': 'حذف کردن موارد جستجوی ذخیره شده', 'Delete Sector': 'حذف کردن بخش', 'Delete Service': 'حذف کردن خدمت', 'Delete Skill': 'حذف کردن مهارت', 'Delete Skill Equivalence': 'حذف کردن تعادل مهارت', 'Delete Skill Type': 'حذف کردن نوع مهارت', 'Delete Staff Assignment': 'حذف کردن ماموریت کارمند', 'Delete Staff Member': 'حذف کردن عضو کارمند', 'Delete Status': 'حذف کردن حالات', 'Delete Symbology': 'حذف کردن سمبول شناسی', 'Delete Theme': 'حذف کردن زمینه', 'Delete this Filter': 'حذف کردن فلتر', 'Delete Training': 'حذف کردن آموزش', 'Delete Training Event': 'حذف کردن رویداد آموزشی', 'Delete Volunteer': 'حذف کردن داوطلب', 'Delete Volunteer Cluster': 'حذف کردن گروه داوطلب', 'Delete Volunteer Cluster Position': 'حذف کردن منصب گروه داوطلب', 'Delete Volunteer Cluster Type': 'حذف کردن جایگاه گروه داوطلب', 'Delete Volunteer Role': 'حذف کردن نقش داوطلب', 'deleted': 'حذف گردید', 'Demographics': 'سرشماری', 'Department / Unit': 'اداره/بخش', 'Department added': 'اداره اضافه گردید', 'Department Catalog': 'فهرست اداره', 'Department deleted': 'اداره حذف گردید', 'Department Details': 'مشخصات اداره', 'Department updated': 'اداره تجدید گردید', 'Deployed': 'گسترش یافت', 'Deploying NS': 'گسترش دادن NS', 'Deployment': 'گسترش دادن', 'Deployment added': 'گسترش اضافه گردید', 'Deployment Alert': 'هشدار گسترش', 'Deployment Date': 'تاریخ گسترش', 'Deployment deleted': 'گسترش حذف گردید', 'Deployment Details': 'مشخصات گسترش', 'Deployment Details updated': 'مشخصات گسترش تجدید گردید', 'Deployment Location': 'موقعیت گسترش', 'Deployments': 'گسترش ها', "Describe the procedure which this record relates to (e.g. 'medical examination')": 'دستورالعملی را که به این ضوابط مربوط می شود شرح دهید', 'Description': 'تشریحات', 'Desluding ': 'دیسلودینگ', 'Destination': 'مقصد', 'Detailed Description/URL': 'تفصیلات مقصد/URL', 'Details': 'مشخصات', 'Disable': 'غیر فعال ساختن', 'Disaster Law': 'قانون حوادث', 'Disaster Management System Officer': 'مامور مدیریت سیستم حوادث', 'Disaster Management Unit Assistant': 'همکار بخش مدیریت حوادث', 'Disaster Risk Reduction': 'کاهش خطرات حوادث', 'Disaster Type': 'نوع حادثه', 'Disease Prevention': 'جلوگیری از حادثه', 'diseased': 'مرض', 'displaced': 'تغییر مکان داده', 'Display Polygons?': 'نمایش چند ضلعی؟', 'Display Routes?': 'نمایش سرک ها؟', 'Display Tracks?': 'نمایش پیگیری ها؟', 'Display Waypoints?': 'نمایش ایستگاه', 'Distribution of Food': 'توزیع غذا', 'Distribution of Non-Food Items': 'توزیع بخش های غیر خوراکه', 'divorced': 'طلاق داده', 'DM / Relief': 'کمک / DM', 'DM Planning': 'پلانگذاری /DM', 'Do you really want to approve this record?': 'آیا شما واقعاٌ می خواهید این ثبت را تصویب نمایید؟', 'Do you really want to delete these records?': 'آیا شما واقعاٌ می خواهید این ثبت را حذف نمایید؟', 'Do you really want to delete this record? (This action can not be reversed)': 'آیا شما واقعاٌ می خواهید این ثبت را حذف نمایید؟ (این عمل غیر قابل بازگشت است)', 'Document Scan': 'اسکن سند', 'Documents': 'اسناد', 'Domain': 'حوزه', 'Donor': 'کمک کننده', 'Donor added': 'کمک کننده اضافه گردید', 'Donor deleted': 'کمک کننده حذف گردید', 'Donor Details': 'مشخصات کمک کننده', 'Donor updated': 'کمک کننده تجدید گردید', 'Donors': 'کمک کننده ها', 'Donors Report': 'گزارش کمک کننده ها', 'Download OCR-able PDF Form': 'شکل OCR-able PDF را دانلود نمایید', 'Draft': 'مسوده', 'Draft Features': 'مشخصه های مسوده', 'Drag an image below to crop and scale it before uploading it:': 'قبل از اپلود یک عکس را قطع و اندازه بکشید:', 'Draw a square to limit the results to just those within the square.': 'یک مربع ترسیم نمایید تا نتیجه را فقط به اندازه همان مربع محدود بسازید.', 'Driver': 'راننده', 'Driving License': 'لیسانس راننده گی', 'DRR': 'DRR', 'DRRPP Extensions': 'DDRRP گسترش ها', 'Duplicate': 'المثنی', 'Duplicate label selected': 'علامت مثنی انتخاب شده است', 'Duration': 'مدت', 'Duration (months)': 'مدت (ماه ها)', 'E-mail': 'ایمیل', 'Early Warning Systems': 'سیستم هشدار عاجل', 'Edit': 'تصحیح', 'Edit %(site_label)s Status': '%(site_label)s تصحیح حالات', "Edit 'More Info'": 'تصحیح "معلومات بیشتر"', 'Edit Activity': 'تصحیح فعالیت', 'Edit Activity Organization': 'تصحیح فعالیت موسسه', 'Edit Activity Type': 'تصحیح نوع فعالیت', 'Edit Address': 'تصحیح آدرس', 'Edit Affiliation': 'تصحیح پیوسته گی', 'Edit Annual Budget': 'تصحیح بودجه سالیانه', 'Edit Appraisal': 'تصحیح ارزیابی', 'Edit Award': 'تصحیح جایزه', 'Edit Beneficiaries': 'تصحیح فایده کننده ها', 'Edit Beneficiary Type': 'تصحیح نوع فایده کننده ', 'Edit Branch Organization': 'تصحیح نماینده گی موسسه', 'Edit Campaign': 'تصحیح کمپاین', 'Edit Certificate': 'تصحیح تصدیق نامه', 'Edit Certification': 'تصحیح تصدیق', 'Edit Cluster': 'تصحیح گروه', 'Edit Community Details': 'تصحیح مشخصات انجمن', 'Edit Competency Rating': 'تصحیح رتبه بندی شایسته گی', 'Edit Contact': 'تصحیح تماس', 'Edit Contact Details': 'تصحیح مشخصات تماس', 'Edit Contact Information': 'تصحیح معلومات تماس', 'Edit Course': 'تصحیح دوره', 'Edit Course Certificate': 'تصحیح تصدیق نامه دوره', 'Edit Credential': 'تصحیح اعتبار نامه', 'Edit Department': 'تصحیح اداره', 'Edit Deployment Details': 'تصحیح مشخصات گسترش', 'Edit Details': 'تصحیح جزئیات', 'Edit Donor': 'تصحیح کمک کننده', 'Edit DRRPP Extensions': 'تصحیح گسترش DRRPP', 'Edit Education Details': 'تصحیح جزئیات تحصیلی', 'Edit Entry': 'تصحیح ورودی', 'Edit Experience': 'تصحیح تجربه', 'Edit Facility': 'تصحیح تسهیلات', 'Edit Facility Type': 'تصحیح نوع تسهیلات', 'Edit Feature Layer': 'تصحیح سحط مشخصه', 'Edit Group': 'تصحیح گروپ', 'Edit Hazard': 'تصحیح خطر', 'Edit Hours': 'تصحیح ساعات', 'Edit Identity': 'تصحیح هوویت', 'Edit Image Details': 'تصحیح جزئیات عکس', 'Edit Job': 'تصحیح وظیفه', 'Edit Job Title': 'تصحیح عنوان وظیفه', 'Edit Keyword': 'تصحیح کلمه کلیدی', 'Edit Layer': 'تصحیح لایه ', 'Edit Level %d Locations?': 'تصحیح درجه %d موقعیت ها؟', 'Edit Location': 'تصحیح موقعیت', 'Edit Location Details': 'تصحیح جزئیات موقعیت', 'Edit Location Hierarchy': 'تصحیح سلسله مراتب موقعیت', 'Edit Log Entry': 'تصحیح ورودی', 'Edit Logged Time': 'تصحیح زمان ورودی', 'Edit Mailing List': 'تصحیح لست ایمیل', 'Edit Map Profile': 'تصحیح تنظیمات نقشه', 'Edit Marker': 'تصحیح علامت گذار', 'Edit Member': 'تصحیح عضو', 'Edit Membership': 'تصحیح عضویت', 'Edit Membership Type': 'تصحیح نوع عضویت', 'Edit Milestone': 'تصحیح مرحله مهم', 'Edit National Society': 'تصحیح اجتماع ملی', 'Edit Network': 'تصحیح شبکه', 'Edit Office': 'تصحیح اداره', 'Edit Office Type': 'تصحیح نوع اداره', 'Edit Organization': 'تصحیح موسسه', 'Edit Organization Type': 'تصحیح نوع موسسه', 'Edit Output': 'تصحیح نتیجه', 'Edit Participant': 'تصحیح شریک', 'Edit Partner Organization': 'تصحیح موسسه همکار', 'Edit Permissions for %(role)s': '%(role)s تصحیح صلاحیت ها برای', 'Edit Person Details': 'تصحیح جزئیات شخص', "Edit Person's Details": 'تصحیح جزئیات شخص', 'Edit PoI Type': 'تصحیح نوع Pol', 'Edit Point of Interest': 'تصحیح نقطه مرود نظر', 'Edit Policy or Strategy': 'تصحیح پالیسی یا ستراتیژی', 'Edit Professional Experience': 'تصحیح تجربه مسلکی', 'Edit Profile Configuration': 'تصحیح تنظیمات پروفایل', 'Edit Program': 'تصحیح برنامه', 'Edit Project': 'تصحیح پروژه', 'Edit Project Organization': 'تصحیح پروژه موسسه', 'Edit Projection': 'تصحیح تصویر', 'Edit Record': 'تصحیح ضوابط', 'Edit Region': 'تصحیح ساحه', 'Edit Resource': 'تصحیح منبع', 'Edit Resource Type': 'تصحیح نوع منبع', 'Edit Response Summary': 'تصحیح خلاصه پاسخ', 'Edit Role': 'تصحیح نقش', 'Edit Room': 'تصحیح اتاق', 'Edit saved search': 'تصحیح جستجوی ذخیره شده', 'Edit Sector': 'تصحیح بخش', 'Edit Service': 'تصحیح خدمت', 'Edit Skill': 'تصحیح مهارت', 'Edit Skill Equivalence': 'تصحیح تعادل مهارت', 'Edit Skill Type': 'تصحیح نوع مهارت', 'Edit Staff Assignment': 'تصحیح ماموریت کارمند', 'Edit Staff Member Details': 'تصحیح جزئیات کارمند عضو', 'Edit Status': 'تصحیح حالات', 'Edit Symbology': 'تصحیح سمبول شناسی', 'Edit Task': 'تصحیح وظیفه', 'Edit Team': 'تصحیح تیم', 'Edit the OpenStreetMap data for this area': 'برای این ساحه معلومات باز کردن نقشه سرک ها را تصحیح نمایید', 'Edit Theme': 'تصحیح زمینه', 'Edit Theme Data': 'تصحیح زمینه معلومات', 'Edit this entry': 'این ورودی را تصحیح نمایید', 'Edit Training': 'تصحیح آموزش', 'Edit Training Event': 'تصحیح رویداد آموزشی', 'Edit Volunteer Cluster': 'تصحیح گروه داوطلب', 'Edit Volunteer Cluster Position': 'تصحیح منصب گروه داوطلب', 'Edit Volunteer Cluster Type': 'تصحیح نوع گروه داوطلب', 'Edit Volunteer Details': 'تصحیح جزئیات داوطلب', 'Edit Volunteer Role': 'تصحیح نقش داوطلب', 'Education': 'تحصیلات', 'Education & Advocacy': 'تحصیلات و وکالت', 'Education Details': 'جزئیات تحصیلات', 'Education details added': 'جزئیات تحصیلات اضافه گردید', 'Education details deleted': 'جزئیات تحصیلات حذف گردید', 'Education details updated': 'جزئیات تحصیلات تجدید گردید', 'Effort Report': 'گزارش تلاش', 'Either a shelter or a location must be specified': 'حمایت و موقعیت هر دو باید تعیین شود', 'Either file upload or image URL required.': 'فایل اپلود شده یا آدرس عکس هردو ضرورت است', 'Email': 'ایمیل', 'Email Address': 'ایمیل آدرس', 'Emergency Contacts': 'تماس های عاجل', 'Emergency Householdwater Treatment and Storage': 'آب خانه معالجه و ذخایر عاجل', 'Emergency Shelter': 'حمایت عاجل', 'Emergency Telecommunications': 'ارتباط از راه دور عاجل', 'Emergency Water Supply': 'تامین آب عاجل', 'Enable': 'فعال', 'Enable in Default Config?': 'فعال سازی به تنظیمات پیش فرض؟', 'End Date': 'تاریخ ختم', "Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": "یک نام را برای جستجو وارد نمایید. ممکن شما % را منحیث ولدکارت استعمال کنید. 'جستجو' را بدون دخولی فشار دهید تا تمام موارد را لست نمایید.", 'Enter a valid email': 'یک ایمیل معتبر وارد نمایید', 'Enter a valid phone number': 'یک شماره تماس معتبر وارد نمایید', 'enter a value': 'یک قیمت اضافه نمایید', 'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'یک قیمت را محتاطانه بدون اشتباه املایی وارد نمایید، این بخش باید با معلومات موجود تطابق نماید.', 'enter date and time': 'تاریخ و زمان را وارد نمایید', 'enter date and time in range %(min)s %(max)s': 'تاریخ و زمان را در محدوده های %(min)s %(max)s وارد نمایید', 'enter date and time on or after %(min)s': '%(min)s تاریخ و زمان را وارد نمایید یا بعد', 'enter date and time on or before %(max)s': '%(max)s تاریخ و زمان را وارد نمایید یا قبل', 'Enter some characters to bring up a list of possible matches': 'بعضی از حروف را وارد نمایید که با لست تطابق نماید', 'Enter the same password as above': 'یک رمز عبور مشابه بالا وارد نمایید', 'Enter your first name': 'اسم تان را وارد نمایید', 'Enter your organization': 'موسسه تان را وارد نمایید', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'وارد کردن یک شماره تماس اختیاری است، اما با انجام دادن آن شما تصویب کرده اید که پیام SMS دریافت نمایید.', 'Enterprise Development Training ': 'آموزش گسترش سرمایه گذاری', 'Entity': 'موجودی', 'Errors': 'اخطار ها', 'ESRI Shape File': 'فایل شکل ESRI', 'Essential Staff?': 'کارمند ضروری؟', 'Estimated Reopening Date': 'برآورد معلومات دوباره باز شده', 'Ethnicity': 'قومیت', 'Euros': 'یورو ها', 'Evacuating': 'تخلیه', 'Evacuation Drills': 'تخلیه تمرینات', 'Events': 'رویداد ها', 'Excellent': 'عالی', 'Experience': 'تجربه', 'expired': 'به پایان رسید', 'Expiring Staff Contracts Report': 'گزارش تماس های کارمند سپری شده', 'Expiry (months)': 'انقضاء (ماه ها)', 'Expiry Date': 'تاریخ انقضاء', 'Export as': 'صادر کردن مانند', 'Export in %(format)s format': 'شکل %(format)s صادر کردن در', 'Export in GPX format': 'صدور در شکل GPX', 'Export in KML format': 'صدور به شکل KML', 'Export in OSM format': 'صدور به شکل OSM', 'Eye Color': 'رنگ چشم', 'Facial hair, color': 'رنگ مو های صورت', 'Facial hair, comment': 'نظر مو های صورت', 'Facial hair, length': 'طول مو های صورت', 'Facial hair, type': 'نوعیت مو های صورت', 'Facilities': 'تسهیلات', 'Facility': 'تسهیل', 'Facility added': 'وسیله اضافه شد', 'Facility Contact': 'تماس با تسهیلات', 'Facility deleted': 'وسیله خذف شد', 'Facility Details': 'جزئیات تسهیلات', 'Facility Status': 'حالات تسهیلات', 'Facility Type': 'نوع وسیله', 'Facility Type added': 'نوع وسیله اضافه گردید', 'Facility Type deleted': 'نوع وسیله حذف گردید', 'Facility Type Details': 'جزئیات نوع وسیله', 'Facility Type updated': 'نوع وسیله تجدید گردید', 'Facility Types': 'انواع تسهیلات', 'Facility updated': 'وسیله تجدید شد', 'Fail': 'ناموفق', 'Fair': 'مناسب (منصفانه)', 'Family': 'فامیل', 'fat': 'چاق', 'Fax': 'فکس', 'Feature Info': 'معلومات مشخصه', 'Feature Layer': 'لایه مشخصه', 'Feature Layer added': 'لایه مشخصه اضافه گردید', 'Feature Layer deleted': 'لایه مشخصه حذف گردید', 'Feature Layer Details': 'جزئیات لایه مشخصه', 'Feature Layer updated': 'لایه مشخصه تجدید گردید', 'Feature Layers': 'لایه مشخصه', 'Feature Namespace': 'فضای نام مشخصه', 'Feature Type': 'نوع مشخصه', 'Features Include': 'شامل مشخصه', 'Feedback': 'بازیافت', 'Feeding Programmes': 'برنامه های متغذی', 'female': 'مونث', 'Field': 'رشته', 'File': 'فایل', 'Files': 'فایل ها', 'fill in order: day(2) month(2) year(4)': 'به ترتیب خانه پری نمایید: روز (2) ماه (2) سال (4)', 'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'به ترتیب خانه پری نمایید: ساعت(2) دقیقه(2) روز (2) ماه (2) سال (4)', 'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'به ترتیب خانه پری نمایید: ساعت(2) دقیقه(2) روز (2) ماه (2) سال (4)', 'fill in order: month(2) day(2) year(4)': 'به ترتیب خانه پری نمایید: ماه (2) روز (2) سال (4)', 'Filter': 'فلتر', 'Filter by Location': 'فلتر توسط موقعیت', 'Filter Options': 'گزینه های فلتر', 'Filter type': 'نوع فلتر', 'Filter type ': 'نوع فلتر', 'Finance / Admin': 'امور مالی / معاون', 'Finance Officer': 'کارمند امور مالی', 'Financial Risk Sharing ': 'اشتراک خطرات مالی', 'Financial Services': 'خدمات مالی', 'Find more': 'بیشتر پیدا کردن', 'Find on Map': 'جستجو در نقشه', 'Fingerprint': 'اثر انگشت', 'First': 'اول', 'First Aid': 'کمک اولیه', 'First Name': 'نام', 'Fleet Manager': 'مدیر سریع', 'Focal Person': 'شخص مرکزی', 'Folder': 'پوشه', 'Food Security': 'امنیت غذا', 'Food Supplementation': 'تکمیل غذا', 'For Entity': 'برای موجودی', 'For live help from the Sahana community on using this application, go to': 'برای کمک زنده از انجمن Sahana جهت استفاده ازین برنامه، برو به', 'For more details on the Sahana Eden system, see the': 'برای جزئیات بیشتر سیستم Sahana Eden ،ببینید', 'forehead': 'پیشانی', 'form data': 'از معلومات', 'Form Settings': 'از تنظیمات', 'Format': 'شکل', 'found': 'پیدا گردید', 'Frequency': 'فریکونسی', 'Full beard': '', 'Fullscreen Map': 'تصویر کامل نقشه', 'Function': 'عمل', 'Function Permissions': 'صلاحیت های عمل', 'Funding': 'سرمایه', 'Funding Report': 'گزارش سرمایه', 'Funds Contributed': 'سرمایه شریک شد', 'Gap Analysis Map': 'تحلیل نقشه فاصله ای', 'Gap Analysis Report': 'تحلیل گزارش فاصله ای', 'Gender': 'جنسیت', 'Generator': 'جنراتور', 'Geocode': 'کود جغرافیایی', 'Geocoder Selection': 'انتخاب کود جغرافیایی', 'GeoJSON Layer': 'لایه GeoJSON', 'Geometry Name': 'نام هندسه', 'GeoRSS Layer': 'لایه GeoRSS', 'Get Feature Info': 'اخذ معلومات مشخصه', 'getting': 'گرفتن', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'در مورد عکس خلص توضیحات را بدهید، مثال: چی چیز بالای عکس قابل دید است (اختیاری)', 'Go': 'برو', "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": 'ثبت نام کردن و برنامه خود را ثبت نمایید. شما هر آدرس را گذاشته می توانید و شما فقط باید اجازه "اصلاح نقشه" را انتخاب نمایید %(url)s برو به', 'Go to Functional Map': 'برو به نقشه عملیاتی', 'Goatee': 'گواتی', 'Good': 'خوب', 'Google Layer': 'لایه گوگل', 'Government': 'دولت', 'GPS Marker': 'علامت گذاری GPS', 'GPS Track': 'پیگیری GPS', 'GPS Track File': 'پیگیری فایل توسط GPS', 'GPX Layer': 'لایه GPX', 'Grade': 'رتبه', 'Graph': 'گراف', 'Great British Pounds': 'پون بریتانیای کبیر', 'Greater than 10 matches. Please refine search further': 'بیشتر از 10 تطابق. لطفا جستجو را بیشتر تصحیح نمایید', 'green': 'سبز', 'grey': 'خاکستری', 'Grid': 'شبکه', 'Group': 'گروپ', 'Group added': 'گروپ اضافه گردید', 'Group deleted': 'گروپ حذف گردید', 'Group description': 'توضیحات گروپ', 'Group Description': 'توضیحات گروپ', 'Group Details': 'جزئیات گروپ', 'Group Head': 'رئیس گروپ', 'Group Leader': 'رهبر گروپ', 'Group Member added': 'عضو گروپ اضافه گردید', 'Group Members': 'اعضای گرووپ', 'Group Name': 'اسم گروپ', 'Group Type': 'نوع گروپ', 'Group updated': 'گروپ تجدید گردید', 'Grouped by': 'گروپ بندی گردید توسط', 'Groups': 'گروپ ها', 'Hair Color': 'رنگ مو', 'Hair Comments': 'توضیحات مو', 'Hair Length': 'درازی مو', 'Hair Style': 'ستایل مو', 'Hand Washing Facilities': 'تسهیلات شست شوی دست', 'Hazard': 'خطر', 'Hazard added': 'خطر اضافه گردید', 'Hazard added to Project': 'خطر به پروژه اضافه گردید', 'Hazard deleted': 'خطر حذف گردید', 'Hazard Details': 'جزئیات خطر', 'Hazard removed from Project': 'خطر از پروژه حذف گردید', 'Hazard updated': 'خطر تجدید گردید', 'Hazards': 'خطرات', 'Headquarters': 'دفتر مرکزی', 'Health': 'صحت', 'Health Awareness, Promotion': 'آگاهی صحی، ترویج', 'Health Facilities - Construction and Operation': 'تسهیلات صحی - ساختمانی و عملیات', 'Health Policy, Strategy Development': 'پالیسی صحی، تشکیل استراتیژی', 'Height': 'ارتفاع', 'Height (cm)': 'ارتفاع (cm)', 'Heliport': 'میدان هلیکوپتر', 'Help': 'کمک', 'HFA': 'HFA', 'HFA Priorities': 'تقدم های HFA', 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': '1HFA: مطمیین شوید که کاهش خطر آفات یک اولویت ملی و منطقوی همراه با یک اساس تاسیساتی برای اجرای امور.', 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': '2HFA: از خطرات حادثه آگاهی، تعیین و تعیین هوویت نمایید و هشدار عاجل را بالا ببرید', 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': '3HFA: برای ساختن فرهنگ محافظوی و ارتجاعی بودن درجات از علم، بدعت و تحصیل استفاده کنید.', 'HFA4: Reduce the underlying risk factors.': '4HFA: عوامل اساسی حادثه را کاهش دهید.', 'HFA5: Strengthen disaster preparedness for effective response at all levels.': '5HFA: آماده گی های خطر را برای پاسخ گویی موثر به تمام درجات محکم کنید. ', 'Hide': 'پنهانی', 'Hide Chart': 'جدول پنهان', 'Hide Pivot Table': 'محور میز پنهان', 'Hide Table': 'میز پنهان', 'Hierarchy': 'سلسله مراتب', 'Hierarchy Level 1 Name (e.g. State or Province)': 'نام سلسله مراتب درجه 1 (مثال: ایالت یا ولایت)', 'Hierarchy Level 2 Name (e.g. District or County)': 'نام سلسله مراتب درجه 2 (مثال: ناحیه یا کشور)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'نام سلسله مراتب درجه 3 (مثال: شهر، شهرک، قریه)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'نام سلسله مراتب درجه 4 (مثال: همسایه گی)', 'Hierarchy Level 5 Name': 'نام سلسله مراتب درجه 5', 'High': 'بلند', 'Highest Priority Open Requests': 'باز کردن درخواست های بلندترین برتری', 'Hindu': 'هندو', 'Home Address': 'آدرس منزل', 'Home Country': 'کشور اصلی', 'Home Phone': 'شماره تماس', 'Hospital': 'شفاخانه', 'Hospitals': 'شفاخانه ها', 'Host': 'میزبان', 'Host National Society': 'میزبان انجمن ملی', 'Hour': 'ساعت', 'Hourly': 'مدت کم', 'Hours': 'ساعات', 'hours': 'ساعات', 'Hours added': 'ساعات اضافه گردید', 'Hours by Program Report': 'ساعات با گزارش برنامه', 'Hours by Role Report': 'ساعات با گزارش نقش', 'Hours deleted': 'ساعات حذف گردید', 'Hours Details': 'جزئیات ساعات', 'Hours updated': 'ساعات تجدید گردید', 'House Design': 'دیزاین خانه', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'چقدر جزئیات دیده شده است. زوم کردن زیاد جزئیات زیاد را نشان میدهد اما ساحه وسیع را نشان نمیدهد. زوم کردن کم ساحه وسیع را نشان می دهد اما جزئیات زیاد را ارائه نمی کند', 'How often you want to be notified. If there are no changes, no notification will be sent.': 'چقدر شما می خواهید اطلاع پیدا کنید. اگر هیچ تغییراتی نباشد، هیچ اطلاعیه ای فرستاده نخواهد شد.', 'How you want to be notified.': 'چگونه شما می خواهید مطلع شوید.', 'HTML': 'HTML', 'Human Resource': 'منبع انسانی', 'Human Resources': 'منابع انسانی', 'I agree to the %(terms_of_service)s': '%(terms_of_service)s من موافق هستم با', 'ICBRR Staff': 'کارمند ICBRR', 'ID': 'هوویت', 'ID Tag Number': 'شماره ضمیمه هوویت', 'ID type': 'نوع هوویت', 'Identities': 'هویت ها', 'Identity': 'هویت', 'Identity added': 'هویت اضافه گردید', 'Identity deleted': 'هویت حذف گردید', 'Identity Details': 'جزئیات هویت', 'Identity updated': 'هویت تجدید گردید', 'IEC Materials': 'مواد IEC', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'اگر یک استعمال کننده شناسایی کند که آنها دارای یک ایمیل آدرس با این دومین هستند، زمینه ثابت کننده توسط کسی که ثابت نمودن بیشتر را نیازمند باشد پر می شود.', 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'در صورتیکه انتخاب شد، اطلاعیه شامل تمام ضوابط توضیح شده می باشد. در صورتیکه انتخاب نشد، برای هر ضبط توضیح شده یک اطلاعیه فرستاده خواهد گردید.', 'If it is a URL leading to HTML, then this will downloaded.': 'اگر یک آدرس به طرف HTML می رود، این بارگیری خواهد شد', 'If neither are defined, then the Default Marker is used.': 'اگر هیچ کدام توضیح نشده است، انتخاب پیش فرض استفاده شده است.', 'If not found, you can have a new location created.': 'در صورتیکه پیدا نشد، شما می توانید یک موقعیت جدید انتخاب نمایید', 'If the location is a geographic area, then state at what level here.': 'اگر موقعیت ساحه جغرافیایی است، پس درجه مکان خویش را بگویید', 'If the person counts as essential staff when evacuating all non-essential staff.': 'هنگام خروج کارمندان غیر ضروری اگر شخص منحیث کارمند ضروری است', 'If there are multiple configs for a person, which should be their default?': 'اگر برای یک شخص تنظیمات مختلف موجود است، کدام باید پیش فرض باشد؟', "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'اگر این تنظیمات در مینوی تنظیمات GIS نمایش داده شد، برای استفاده از آن یک نام بدهید. یک نام برای تنظیمات نقشه شخصی مطابق اسم استفاده کننده تنظیم می شود.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'اگر این درین رشته جمعیت زیاد باشد پس استفاده کننده ای که این موسسه را تعیین می کند هنگام ثبت نام کردن منحیث یک کارمند این موسسه مقرر خواهد گردید مگر اینکه دومین آنها با دومین رشته فرق داشته باشد.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'اگر درین بخش جمعیت زیاد است پس یک استفاده کننده با دومین مشخص اتومات به شکل کارمند این موسسه مقرر خواهد شد', 'If this record should be restricted then select which role is required to access the record here.': 'اگر این ضبط باید محدود گردد پس نقشی که برای دسترسی به این ضبط ضرورت است را انتخاب نمایید', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'اگر این ضبط باید محدود گردد پس نقش هایی که مجوز دارند به این ضبط دسترسی داشته باشند را انتخاب نمایید', "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": 'اگر شما نوع را در این لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن فعالیت" نوع جدید آن را بسازید.', "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Create Beneficiary'.": 'اگر شما فایده گیرنده را در لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن فایده گیرنده" جدید آن را بسازید.', "If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.": 'اگر شما کمپاین را در لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن کمپاین" جدید آن را بسازید.', "If you don't see the Cluster in the list, you can add a new one by clicking link 'Create Cluster'.": 'اگر شما گروه را در لست نمی بینید، شما می توانید جدید آن را با فشار دادن لینک "اضافه کردن یک گروه جدید" اضافه نمایید ', "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": 'اگر شما انجمن را در لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن انجمن" جدید آن را بسازید.', "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'اگر شما موقعیت را در لست نمی بینید، شما می توانید با فشار دادن لینک " اضافه کردن موقعیت" جدید آن را بسازید.', "If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.": 'اگر شما مرحله مهم را در لست نمی بینید، شما می توانید با فشار دادن لینک " اضافه کردن مرحله مهم" جدید آن را بسازید.', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'اگر شما موسسه را در لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن موسسه" یک موسسه جدید بسازید.', "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": 'اگر شما لست پروژه را نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن "پروژه" جدید آن را بسازید.', "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": 'اگر شما بخش را در لست نمی بینید، شما می توانید یک بخش جدید را با فشار دادن لینک "اضافه کردن بخش جدید" بسازید.', "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": 'اگر شما نوع را در لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن نوع فعالیت" جدید آن را بسازید.', "If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": 'اگر شما نوع را در لست نمی بینید، شما می توانید یک نوع جدید را با فشار دادن لینک "اضافه نمودن نوع تسهیلات" بسازید.', "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": 'اگر شما نوع را در این لست نمی بینید، می توانید با فشار دادن لینک "اضافه کردن نوع دفتر" یک دفتر جدید بسازید.', "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'اگر شما نوع را در این لست نمی بینید، شما می توانید با فشار دادن لینک "اضافه کردن نوع موسسه" نوع جدید آن را بسازید.', "If you don't see the Type in the list, you can add a new one by clicking link 'Create Region'.": 'اگر شما نوع را در لست نمی بینید، شما می توانید یک نوع جدید را با فشار دادن لینک "اضافه نمودن ناحیه" بسازید.', "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": 'اگر شما یک نام پوشه را وارد می کنید و لایه در این پوشه در تعویض کننده لایه نقشه ظاهر می شود. یک پوشه فرعی با نام های جداگانه a`/` ایجاد شده می تواند.', 'If you have any questions or need support, please see': 'اگر شما سوال یا به کمک ضرورت دارید، لطفاً این را ببینید', 'If you would like to help, then please %(sign_up_now)s': '%(sign_up_now)s اگر شما خواستید کمک نمایید پس لطفاً', 'ignore': 'نادیده گرفتن', 'Ignore Errors?': 'رد کردن خطاها؟', 'Image': 'عکس', 'Image added': 'عکس اضافه گردید', 'Image deleted': 'عکس حذف گردید', 'Image Details': 'جزئیات عکس', 'Image File(s), one image per page': 'فایل (های) عکس، یک عکس در هر صفحه', 'Image Type': 'نوع عکس', 'Image updated': 'عکس تجدید گردید', 'Images': 'عکس ها', 'Immediately': 'فوراً', 'Immunisation Campaigns': 'کمپاین ایمن سازی', 'Import': 'ورود', 'Import Activity Data': 'ورود معلومات فعالیت', 'Import Activity Type data': 'ورود نوع معلومات فعالیت', 'Import Annual Budget data': 'ورود معلومات بودجه سالیانه', 'Import Awards': 'ورود جوایز', 'Import Certificates': 'ورود تصدیق نامه ها', 'Import Community Data': 'ورود معلومات انجمن', 'Import Contacts': 'ورود تماس ها', 'Import Courses': 'ورود دوره ها', 'Import Data': 'ورود معلومات', 'Import Data for Theme Layer': 'ورود معلومات برای لایه زمینه', 'Import Departments': 'ورود اداره ها', 'Import Deployments': 'ورود گسترش ها', 'Import Facilities': 'ورود تسهیلات', 'Import Facility Types': 'ورود انواع تسهیلات', 'Import from CSV': 'ورود از CSV', 'Import from OpenStreetMap': 'ورود از باز کردن نقشه سرک', 'Import Hazard data': 'ورود معلومات خطرات', 'Import Hazards': 'ورود خطرات', 'Import Hours': 'ورود ساعات', 'Import Layers': 'ورود سطوح', 'Import Location Data': 'ورود معلومات موقعیت', 'Import Location data': 'ورود معلومات موقعیت', 'Import Locations': 'ورود موقعیت ها', 'Import Logged Time data': 'ورود معلومات زمان وارد شده', 'Import Members': 'ورود اعضاء', 'Import Membership Types': 'ورود انواع عضویت', 'Import Offices': 'ورود دفاتر', 'Import Organizations': 'ورود موسسه ها', 'Import Participant List': 'ورود لست شریک', 'Import Participants': 'ورود شریک ها', 'Import Partner Organizations': 'ورود موسسه های شریک', 'Import PoI Types': 'ورود انواع Pol', 'Import Points of Interest': 'ورود نقطه های مورد نظر', 'Import Policies & Strategies': 'ورود پالیسی ها و استراتیژی ها', 'Import Project Organizations': 'ورود پروژه های موسسه', 'Import Projects': 'ورود پروژه ها', 'Import Red Cross & Red Crescent National Societies': 'ورود انجمن های ملی هلال احمر و صلیب سرخ', 'Import Resource Types': 'ورود انواع منبع', 'Import Resources': 'ورود منابع', 'Import Sector data': 'ورود معلومات بخش', 'Import Service data': 'ورود معلومات خدمت', 'Import Services': 'ورود خدمات', 'Import Staff': 'ورود کارمند', 'Import Tasks': 'ورود وظیفه', 'Import Theme data': 'ورود معلومات زمینه', 'Import Training Events': 'ورود رویداد های آموزشی', 'Import Training Participants': 'ورود اشتراک کننده های آموزشی', 'Import Volunteer Cluster Positions': 'ورود منصب های داوطلب گروه', 'Import Volunteer Cluster Types': 'ورود انواع داوطلب گروه', 'Import Volunteer Clusters': 'ورود گروه ها داوطلب', 'Import Volunteers': 'ورود داوطلبین', 'Improved Production Techniques': 'تکنیک های تولید توسعه یافته', 'In error': 'در خطا', 'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'شما باید یک حساب در سرور باز کردن سرک ثبت نام نمایید ، %(name_short)s جهت قابل تصحیح ساختن معلومات باز کردن نقشه سرک از', 'Inactive': 'غیر فعال', 'InBox': 'صندوق پیام ها', 'Incident': 'حادثه', 'Incident Categories': 'طبقات حادثه', 'Incident Reports': 'گزارشات حادثه', 'Incidents': 'حوادث', 'Incorrect parameters': 'پارامتر های غلط', 'Infant (0-1)': 'طفل (0-1)', 'Infant and Young Child Feeding': 'تغذیه اطفال و جوان ', 'Inherited?': 'میراث گرفته؟', 'Initials': 'نخستین ها', 'injured': 'آسیب دیده', 'input': 'ورودی', 'Installation of Rainwater Harvesting Systems': 'نصب سیستم ذخیره آب باران', 'Instructor': 'آموزگار', 'insufficient number of pages provided': 'شماره های کمبود صفحات تهیه گردید', 'Insufficient Privileges': 'امتیازات نا کافی', 'Insufficient vars: Need module, resource, jresource, instance': 'کمبود های دیگر: به طرح، منبع، j منبع، نمونه ضرورت است', 'Insurance ': 'اطمینان', 'Integrity error: record can not be deleted while it is referenced by other records': 'خطای کلی: زمانی که ثبت در مرجع ثبت دیگری است غیر قابل حذف می باشد', 'Intergovernmental': 'چند دولتی', 'Invalid data: record %(id)s not accessible in table %(table)s': '%(table)s در جدول قابل دسترسی نیست %(id)s معلومات غیر معتبر: ثبت', 'Invalid form (re-opened in another window?)': 'شکل غیر معتبر (در پنجره دیگر نیز باز شده؟)', 'Invalid Location!': 'موقعیت غیر معتبر!', 'Invalid phone number': 'شماره تماس غیر معتبر', 'Invalid phone number!': 'شماره تماس غیر معتبر!', 'Invalid request': 'درخواست غیر معتبر', 'Invalid Site!': 'ساحه غیر معتبر!', 'Invalid source': 'منبع غیر معتبر', 'Inventory': 'فهرست', 'Inventory Items': 'بخش های موجودی', 'Irrigation and Watershed Management': 'مدیریت آبگردان و آبیاری', 'Is editing level L%d locations allowed?': 'آیا تصحیح موقعیت های درجه L%d اجازه دارد؟', 'Is this a strict hierarchy?': 'آیا این نص صریح سلسله مراتب است؟', 'Issuing Authority': 'صدور اختیار', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'این تنها مکان ها فعال را تسخیر نمی کند، بلکه معلومات که آنها بالای محدوده هر پروژه در هر ساحه تهیه می کنند را نیز تسخیر می کند', 'IT Telecom': 'ارتباطات معلومات تکنالوژی (IT)', 'Item Categories': 'طبقات بخش', 'Items': 'بخش ها', 'Jewish': 'یهودی', 'JNAP Priorities': 'اولویت های JNAP', 'JNAP-1: Strategic Area 1: Governance': 'JANP-1 : ساحه استراتیژی 1: اداره', 'JNAP-2: Strategic Area 2: Monitoring': 'JANP-2: ساحه استراتیژی 2: نظارت', 'JNAP-3: Strategic Area 3: Disaster Management': 'JANP-3: ساحه استراتیژی 3:مدیریت حادثه', 'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'JANP-4: ساحه استراتیژی4: کاهش خطر و توافق با تغییرات اقلیم', 'Job added': 'وظیفه اضافه گردید', 'Job deleted': 'وظیفه حذف گردید', 'Job Schedule': 'تقسیم اوقات کار', 'Job Title': 'عنوان وظیفه', 'Job Title added': 'عنوان وظیفه اضافه گردید', 'Job Title Catalog': 'فهرست عنوان وظیفه', 'Job Title deleted': 'عنوان وظیفه حذف گردید', 'Job Title Details': 'جزئیات عنوان وظیفه', 'Job Title updated': 'عنوان وظیفه تجدید گردید', 'Job updated': 'وظیفه تجدید گردید', 'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'پلان اعمال مشترک ملی برای اداره خطرات آفات و توافق تغییرات اقلیمی. فقط به جزیره کوک قابل تطبیق است', 'Journal': 'روزنامه', 'Journal entry added': 'ورودی روزنامه اضافه گردید', 'Journal entry deleted': 'ورودی روزنامه حذف گردید', 'Journal Entry Details': 'جزئیات ورودی روزنامه', 'Journal entry updated': 'ورودی روزنامه تجدید گردید', 'JS Layer': 'لایه JS', 'Keep Duplicate': 'حفظ المثنی', 'Keep Original': 'حفظ اصلی', 'Key': 'کلید', 'Key Value pairs': 'جوره های قیمت کلیدی', 'Keyword': 'کلمه کلیدی', 'Keyword Added': 'کلمه کلیدی اضافه گردید', 'Keyword Deleted': 'کلمه کلیدی حذف گردید', 'Keyword Updated': 'کلمه کلیدی تجدید گردید', 'Keywords': 'کلمه های کلیدی', 'KML Layer': 'لایه KML', 'Knowledge Management': 'مدیریت علمیت', 'Language': 'لسان', 'Last': 'آخرین', 'Last Checked': 'آخرین بررسی', 'Last Contacted': 'آخرین تماس', 'Last known location': 'آخرین موقعیت شناخته شده', "Last Month's Work": 'کار آخرین ماه', 'Last Name': 'تخلص', 'Last run': 'آخرین حرکت', 'Last status': 'آخرین حالات', 'Last updated': 'آخرین تجدید', "Last Week's Work": 'کار هفته اخیر', 'Latitude': 'عرض جغرافیایی', 'Latitude & Longitude': 'عرض جغرافیایی و طول جغرافیایی', 'Latitude and Longitude are required': 'به عرض جغرافیایی و طول جغرافیایی ضرورت است', 'Latitude is Invalid!': 'عرض جغرافیایی غیر معتبر است!', 'Latitude is North - South (Up-Down).': 'عرض جغرافیایی است شمال - جنوب (بالا - پایین).', 'Latitude is North-South (Up-Down).': 'عرض جغرافیایی است شمال - جنوب (بالا - پایین).', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'عرض جغرافیایی در خط استوا صفر و در نیم کره شمالی مثبت و در نیم کرده جنوبی منفی است.', 'Latitude must be between -90 and 90.': 'عرض جغرافیایی باید در میان -90 و 90 باشد.', 'Latitude of far northern end of the region of interest.': 'عرض جغرافیایی آخرین نقطه شمالی ساحه مورد نظر', 'Latitude of far southern end of the region of interest.': 'عرض جغرافیایی آخرین نقطه جنوبی ساحه مورد نظر', 'Latitude of Map Center': 'عرض جغرافیایی مرکز نقشه', 'Latitude should be between': 'عرض جغرافیایی باید میان ', 'Latrine Construction': 'ساختن مستراح', 'Layer': 'لایه', 'Layer added': 'لایه اضافه گردید', 'Layer deleted': 'لایه حذف گردید', 'Layer Details': 'جزئیات لایه', 'Layer has been Disabled': 'لایه غیر فعال گردید', 'Layer has been Enabled': 'لایه فعال گردید', 'Layer Name': 'نام لایه', 'Layer Properties': 'مشخصه های لایه', 'Layer removed from Symbology': 'لایه از سمبول شناسی حذف گردید', 'Layer updated': 'لایه تجدید گردید', 'Layers': 'سطوح', 'Lead Implementer': 'هدایت انجام دهنده', 'Lead Implementer for this project is already set, please choose another role.': 'هدایت انجام دهنده برای این پروژه قبلاً تنظیم گردیده است، لطفاً نقش دیگری را انتخاب نمایید.', 'Leader': 'رهبر', 'Left-side is fully transparent (0), right-side is opaque (1.0).': 'سمت-چپ کاملا شفاف است (0)، سمت راست غیر شفاف است (1.0).', 'Legal Approvals': 'تصویبات قانونی', 'Legend': 'افسانه', 'Legend URL': 'آدرس افسانه', 'less': 'کم', 'Less Options': 'گزینه ها کم', 'Level': 'سطح', "Level is higher than parent's": 'سطح از والدین بلندتر است', 'Level of Award': 'سطح جایزه', 'Level of competency this person has with this skill.': 'سطح شایسته گی که این شخص با این مهارت دارد', 'License Number': 'شماره جواز', 'light': 'روشن', 'Link to this result': 'اتصال به این نتیجه', 'List': 'لست', 'List %(site_label)s Status': 'لست %(site_label)s حالات', 'List Activities': 'لست فعالیت ها', 'List Activity Organizations': 'لست فعالیت موسسه ها', 'List Activity Types': 'لست انواع فعالیت', 'List Addresses': 'لست کردن آدرس ها', 'List Affiliations': 'لست پیوسته گی ها', 'List All': 'لست کردن همه', 'List All Community Contacts': 'لست کردن تماس تمام انجمن ها', 'List Annual Budgets': 'لست کردن بودجه سالیانه', 'List Awards': 'لست کردن جوایز', 'List Beneficiaries': 'لست کردن فایده گیرنده ها', 'List Beneficiary Types': 'لست کردن انواع فایده گیرنده ها', 'List Branch Organizations': 'لست کردن نماینده گی موسسه ها', 'List Campaigns': 'لست کردن کمپاین ها', 'List Certificates': 'لست کردن تصدیق نامه ها', 'List Certifications': 'لست کردن تصدیق ها', 'List Clusters': 'لست کردن گروه ها', 'List Coalitions': 'لست کردن پیوسته گی ها', 'List Communities': 'لست کردن انجمن ها', 'List Competency Ratings': 'لست کردن رتبه های شایسته گی', 'List Contact Information': 'لست کردن معلومات تماس', 'List Contacts': 'لست کردن تماس ها', 'List Course Certificates': 'لست کردن تصدیق نامه های دوره', 'List Courses': 'لست کردن دوره', 'List Credentials': 'لست کردن اعتبار نامه ها', 'List Data in Theme Layer': 'لست کردن معلومات در لایه زمینه', 'List Departments': 'لست کردن اداره ها', 'List Deployments': 'لست کردن گسترش ها', 'List Donors': 'لست کردن کمک کننده ها', 'List Education Details': 'لست کردن جزئیات تحصیلات', 'List Facilities': 'لست کردن تسهیلات', 'List Facility Types': 'لست کردن انواع تسهیلات', 'List Feature Layers': 'لست کردن سطوح مشخصه ', 'List Groups': 'لست کردن گروپ ها', 'List Hazards': 'لست کردن خطرات', 'List Hours': 'لست کردن ساعات', 'List Identities': 'لست کردن هوویت ها', 'List Images': 'لست کردن عکس ها', 'List Job Titles': 'لست کردن عناوین وظیفه', 'List Jobs': 'لست کردن وظایف', 'List Keywords': 'لست کردن کلمات کلیدی', 'List Layers': 'لست کردن سطوح مشخصه ', 'List Layers in Profile': 'لست کردن سطوح در پروفایل', 'List Layers in Symbology': 'لست کردن سطوح در سمبول شناسی', 'List Location Hierarchies': 'لست کردن سلسله های مراتب موقعیت', 'List Locations': 'لست کردن موقعیت ها ', 'List Log Entries': 'لست کردن ورودی ها', 'List Logged Time': 'لست کردن لست ایمیل ها', 'List Mailing Lists': 'لست کردن تنظیمات نقشه', 'List Map Profiles': 'لست کردن علامت گذار ها', 'List Markers': 'لست کردن انواع عضویت', 'List Members': 'لست کردن عضویت', 'List Membership Types': 'لست کردن انواع عضویت', 'List Memberships': 'لست کردن عضویت ها', 'List Milestones': 'لست کردن مراحل مهم', 'List Networks': 'لست کردن شبکه ها', 'List of Appraisals': 'لست کردن ارزیابی ها', 'List of Facilities': 'لست کردن تسهیلات', 'List of Professional Experience': 'لست کردن تجربه مسلکی', 'List of Roles': 'لست کردن نقش ها', 'List Office Types': 'لست کردن انواع دفتر', 'List Offices': 'لست کردن دفاتر', 'List Organization Types': 'لست کردن انواع موسسه', 'List Organizations': 'لست کردن موسسه ها', 'List Outputs': 'لست کردن نتایج', 'List Participants': 'لست کردن اشتراک کننده ها', 'List Partner Organizations': 'لست کردن موسسه های همکار', 'List Persons': 'لست کردن اشخاص', "List Persons' Details": 'لست کردن جزئیات شخص', 'List PoI Types': 'لست کردن انواع Pol', 'List Points of Interest': 'لست کردن نقطه های مورد نظر', 'List Policies & Strategies': 'لست کردن استراتیژی ها و پالیسی ها', 'List Profiles configured for this Layer': 'لست کردن پروفایل های که برای این لایه تنظیم گردیده است', 'List Programs': 'لست کردن برنامه ها', 'List Project Organizations': 'لست کردن پروژه موسسه ها', 'List Projections': 'لست کردن تصاویر', 'List Projects': 'لست کردن پروژه ها', 'List Records': 'لست کردن ضوابط', 'List Red Cross & Red Crescent National Societies': 'لست کردن جوامع ملی صلیب سرخ و هلال احمر', 'List Regions': 'لست کردن نواحی', 'List Response Summaries': 'لست کردن خلاصه های پاسخ', 'List Roles': 'لست کردن نقش ها', 'List Rooms': 'لست کردن اتاق ها', 'List saved searches': 'لست کردن جستجو های ذخیره شده', 'List Sectors': 'لست کردن بخش ها', 'List Services': 'لست کردن خدمات', 'List Skill Equivalences': 'لست کردن تعادل مهارت', 'List Skill Types': 'لست کردن انواع مهارت', 'List Skills': 'لست کردن مهارت ها', 'List Staff & Volunteers': 'لست کردن کارمند و داوطلبان', 'List Staff Assignments': 'لست کردن ماموریت های کارمند', 'List Staff Members': 'لست کردن اعضای کارمندان', 'List Statuses': 'لست کردن جایگاه ها', 'List Symbologies': 'لست کردن سمبول شناسی ها', 'List Symbologies for Layer': 'لست کردن سمبول شناسی ها برای لایه', 'List Tasks': 'لست کردن وظایف', 'List Teams': 'لست کردن تیم ها', 'List Themes': 'لست کردن زمینه ها', 'List Training Events': 'لست کردن رویداد های آموزشی', 'List Trainings': 'لست کردن آموزش ها', 'List Volunteer Cluster Positions': 'لست کردن منصب های گروه داوطلب', 'List Volunteer Cluster Types': 'لست کردن انواع گروه داوطلب', 'List Volunteer Clusters': 'لست کردن گروه های داوطلب', 'List Volunteer Roles': 'لست کردن نقش های داوطلب', 'List Volunteers': 'لست کردن داوطلبان', 'Live Help': 'کمک زنده', 'Livelihood / CTP': 'معاش / CTP', 'Livelihood Manager': 'مدیر معاشات', 'Livelihoods': 'معاشات', 'Load': 'بارگیری', 'Load Cleaned Data into Database': 'بارگیری کردن معلومات پاک شده در دیتابیس', 'Load Raw File into Grid': 'بارگیری کردن فایل ها خام در شبکه', 'Loading': 'در حال بارگیری', 'Local Currency': 'پول رایج محلی', 'Local Name': 'اسم محلی', 'Local Names': 'اسم ها محلی', 'Location': 'موقعیت', 'Location added': 'موقعیت اضافه گردید', 'Location Added': 'موقعیت اضافه گردید', 'Location added to Organization': 'موقعیت به موسسه اضافه گردید', 'Location deleted': 'موقعیت حذف گردید', 'Location Deleted': 'موقعیت حذف گردید', 'Location Details': 'جزئیات موقعیت', 'Location Found': 'موقعیت پیدا گردید', 'Location Group': 'موقعیت گروپ', 'Location Hierarchies': 'سلسله ها مراتب موقعیت', 'Location Hierarchy': 'سلسله مراتب موقعیت', 'Location Hierarchy added': 'سلسله مراتب موقعیت اضافه گردید', 'Location Hierarchy deleted': 'سلسله مراتب موقعیت حذف گردید', 'Location Hierarchy Level 1 Name': 'نام سلسله مراتب موقعیت درجه 1', 'Location Hierarchy Level 2 Name': 'نام سلسله مراتب موقعیت درجه 2', 'Location Hierarchy Level 3 Name': 'نام سلسله مراتب موقعیت درجه 3', 'Location Hierarchy Level 4 Name': 'نام سلسله مراتب موقعیت درجه 4', 'Location Hierarchy Level 5 Name': 'نام سلسله مراتب موقعیت درجه 5', 'Location Hierarchy updated': 'سلسله مراتب موقعیت تجدید گردید', 'Location is Required!': 'به موقعیت ضرورت است!', 'Location needs to have WKT!': 'موقعیت باید دارای WKT باشد!', 'Location NOT Found': 'موقعیت پیدا نشد', 'Location removed from Organization': 'موقعیت از موسسه حذف گردید', 'Location updated': 'موقعیت تجدید گردید', 'Locations': 'موقعیت ها', 'Locations of this level need to have a parent of level': 'موقعیت های این درجه باید درجه بلند تر داشته باشد', 'Log entry added': 'ورودی اضافه گردید', 'Log Entry Deleted': 'ورودی حذف گردید', 'Log Entry Details': 'جزئیات ورودی', 'Log entry updated': 'ورودی تجدید گردید', 'Log New Time': 'زمان ورودی جدید', 'Log Time Spent': 'زمان ورودی مصرف گردید', 'Logged Time': 'زمان ورود', 'Logged Time Details': 'جزئیات زمان ورودی', 'Login': 'وارد شدن', 'login': 'وارد شدن', 'Login using Facebook account': 'وارد شدن توسط حساب facebook', 'Login using Google account': 'وارد شدن توسط حساب Google', 'Login with Facebook': 'وارد شدن توسط facebook', 'Login with Google': 'وارد شدن توسط google', 'Logistics & Warehouses': 'تهیه و توزیع انبار', 'Logo': 'لوگو', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'لوگوی موسسه. این باید به شکل png یا jpeg باشد و بزرگ تر از 400 x400 نباشد', 'long': 'طویل', 'Long Name': 'نام طویل', 'long>12cm': 'طویل > 12 cm', 'Longitude': 'طول جغرافیایی', 'Longitude is Invalid!': 'طول جغرافیایی غیر معتبر است! ', 'Longitude is West - East (sideways).': 'طول جغرافیایی غرب است - شرق (راه های فرعی)', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'طول جغرافیایی در نخست خط نصف النهار صفر است (در گرینویچ، انگلستان) و به طرف شرق سراسر اروپا و آسیا مثبت است. طول جغرافیایی به طرف غرب، سراسر اتلانتیک و امریکا منفی است.', 'Longitude must be between -180 and 180.': 'طول جغرافیایی باید میان -180 و 180 باشد.', 'Longitude of far eastern end of the region of interest.': 'طول جغرافیایی انتهای شرق دور منطقه مورد نظر', 'Longitude of far western end of the region of interest.': 'طول جغرافیایی انتهای غرب دور منطقه مورد نظر.', 'Longitude of Map Center': 'طول جغرافیایی مرکز نقشه', 'Longitude should be between': 'طول جغرافیایی باید میان ', 'Lost': 'از دست رفته', 'Lost Password': 'رمز عبور گمشده', 'Low': 'کم ', 'Mailing list': 'لست ایمیل', 'Mailing list added': 'لست ایمیل اضافه گردید', 'Mailing list deleted': 'لست ایمیل حذف گردید', 'Mailing List Details': 'جزئیات لست ایمیل', 'Mailing List Name': 'نام لست ایمیل', 'Mailing list updated': 'لست ایمیل تجدید گردید', 'Mailing Lists': 'لست های ایمیل', 'Main Duties': 'وظایف اساسی', 'Main?': 'اساسی؟', 'Mainstreaming DRR': 'جاری سازی DRR', 'Major': 'عمده', 'male': 'مذکر', 'Manage Layers in Catalog': 'لایه مدیریت در فهرست', 'Manage Your Facilities': 'تسهیلات تان را مدیریت کنید', 'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'مدیریت مواد و منابع انسانی برای بهتر آماده شدن خطرات آینده و آسیب پذیری ها.', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'اجباری. در GeoServer، این نام لایه است. در داخل WFS getCapabilities، این نام نوعیت مشخصه بعد از (:) است.', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'اجباری. URL اساسی برای دسترسی یافتن به خدمات. مثلاً http://host.domain/geoserver/wfs?', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'اجباری. URL اساسی برای دسترسی یافتن به خدمات. مثلاً http://host.domain/geoserver/wms?', 'Map': 'نقشه', 'Map cannot display without prepop data!': 'نقشه بدون معلومات prepop نمایش داده نمی تواند', 'Map Center Latitude': 'عرض جغرافیایی مرکز نقشه', 'Map Center Longitude': 'طول جغرافیایی مرکز نقشه', 'Map Profile': 'تنظیمات نقشه', 'Map Profile added': 'تنظیمات نقشه اضافه گردید', 'Map Profile deleted': 'تنظیمات نقشه حذف گردید', 'Map Profile updated': 'تنظیمات نقشه تجدید گردید', 'Map Profiles': 'تنظیمات نقشه', 'Map has been copied and set as Default': 'نقشه کاپی شد و منحیث پیش فرض تنظیم گردید', 'Map has been set as Default': 'نقشه منحیث پیش فرض تنظیم گردید', 'Map is already your Default': 'نقشه قبلاً منحیث پیش فرض تعیین شده است', 'Map not available: Cannot write projection file - %s': '%s - نقشه موجود نیست: قابلیت نوشتن فایل تصویر نیست', 'Map not available: No Projection configured': 'نقشه موجود نیست: هیچ تصویری تنظیم نشده', 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'نقشه موجود نیست: تصویر %(projection)s تایید نشد - لطفا به %(path)s تعریف اضافه نمایید', 'Map of Communities': 'نقشه انجمن ها', 'Map of Facilities': 'نقشه تسهیلات', 'Map of Offices': 'نقشه دفاتر', 'Map of Projects': 'نقشه پروژه ها', 'Map of Resources': 'نقشه منابع', 'Map Settings': 'ترتیبات نقشه', 'Map Viewing Client': 'مشتری دیدن نقشه', 'Map Zoom': 'زوم نقشه', 'Marital Status': 'حالت مدنی', 'Mark as duplicate': 'علامت گذاری کردن منحیث المثنی', 'Marker': 'علامت گذار ', 'Marker added': 'علامت گذار اضافه گردید', 'Marker deleted': 'علامت گذار حذف گردید', 'Marker Details': 'جزئیات علامت گذار', 'Marker updated': 'علامت گذار تجدید گردید', 'Markers': 'علامت گذار ها', 'Markets/Marketing Analysis, Linkages and Support': 'مارکیت ها/ تحلیل کننده بازار، ارتباطات و حمایت', 'married': 'متاهل', 'Matching Records': 'ضوابط مشابه', 'Max': 'حداکثر', 'Maximum Extent': 'حداکثر وسعت', 'Maximum Location Latitude': 'حداکثر موقعیت عرض جغرافیایی', 'Maximum Location Longitude': 'حداکثر طول جغرافیایی', 'Maximum must be greater than minimum': 'حداکثر باید از حد اقل بزرگتر باشد', 'Measure Area: Click the points around the polygon & end with a double-click': 'مقیاس ساحه: نقاط اطراف چند ضلعی را کلیک نمایید و توسط دوبار کلیک نمودن ختم نمایید', 'Measure Length: Click the points along the path & end with a double-click': 'طول اندازه: بالای نقاط که در امتداد مسیر هستند کلیک نمایید و با دوبار کلیک نمودن پایان دهید', 'Medical Conditions': 'حالات صحی', 'Medical Supplies and Equipment': 'تدارکات صحی و تجهیزات', 'medium': 'متوسط', 'Medium': 'متوسط', 'medium<12cm': 'متوسط > 12cm', 'Member': 'عضو', 'Member added': 'عضو اضافه گردید', 'Member deleted': 'عضو حذف گردید', 'Member Details': 'جزئیات عضو', 'Member ID': 'هوویت عضو', 'Member Organizations': 'عضو موسسه ها', 'Member updated': 'عضو تجدید گردید', 'Members': 'اعضا', 'Membership': 'عضویت', 'Membership added': 'عضویت اضافه گردید', 'Membership deleted': 'عضویت حذف گردید', 'Membership Details': 'جزئیات عضویت', 'Membership Fee': 'فیس عضویت', 'Membership Paid': 'عضویت پرداخته شد', 'Membership Type added': 'نوع عضویت اضافه گردید', 'Membership Type deleted': 'نوع عضویت حذف گردید', 'Membership Type Details': 'جزئیات نوع عضویت', 'Membership Type updated': 'نوع عضویت تجدید گردید', 'Membership Types': 'انواع عضویت', 'Membership updated': 'عضویت تجدید گردید', 'Memberships': 'عضویت ها', 'Menu': 'منو', 'Merge': 'یکجا کردن', 'Merge records': 'یکجا کردن ضوابط', 'Message': 'پیام ', 'Method disabled': 'روش غیر فعال گردید', 'MGRS Layer': 'لایه MGRS', 'Middle Name': 'نام وسطی', 'Milestone': 'مرحله مهم', 'Milestone Added': 'مرحله مهم اضافه گردید', 'Milestone Deleted': 'مرحله مهم حذف گردید', 'Milestone Details': 'جزئیات مرحله مهم', 'Milestone Updated': 'مرحله مهم تجدید گردید', 'Milestones': 'مراحل مهم', 'Military': 'نظامی', 'Min': 'حداقل', 'Minimum Location Latitude': 'حداقل موقعیت عرض جغرافیایی', 'Minimum Location Longitude': 'حداقل موقعیت طول جغرافیایی', 'Minute': 'دقیقه', 'Minutes must be a number.': 'دقایق باید یک عدد باشد', 'Minutes must be less than 60.': 'دقایق باید کمتر از 60 باشد.', 'Missing': 'مفقود', 'missing': 'گمشده', 'Mission': 'ماموریت', 'Missions': 'ماموریت ها', 'Mobile': 'متحرک', 'Mobile Health Units': 'بخش های صحی متحرک', 'Mobile Phone': 'تیلفون همراه', 'Mobile Phone Number': 'شماره تیلفون همراه', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'تغییر مشخصه: مشخصه ای که می خواهید بی شکل بسازید را انتخاب نمایید و یکی از نقاط را در نوع مشخصه انتخاب شده بکشانید', 'mongoloid': 'منگولوید', 'Monitoring and Evaluation': 'ارزیاب و نظارت', 'Month': 'ماه', 'Monthly': 'ماهانه', 'more': 'بیشتر', 'More Info': 'معلومات بیشتر', 'More Options': 'گزینه ها بیشتر', 'more...': 'بیشتر ...', 'Morgue': 'مرده خانه', 'Moustache': 'بروت', 'Move Feature: Drag feature to desired location': 'انتقال مشخصه: مشخصه را در موقعیت دلخواه تان بکشانید', 'Multiple': 'چندین', 'Muslim': 'مسلمان', 'Must a location have a parent location?': 'یک موقعیت باید موقعیت کلی داشته باشد؟', 'My Logged Hours': 'ساعات ورود شده من', 'My Maps': 'نقشه ها من', 'My Open Tasks': 'وظایف باز شده من', 'My Profile': 'پروفایل من', 'My Tasks': 'وظایف من', 'Name': 'نام', 'Name and/or ID': 'نام و / یا هوویت', 'Name field is required!': 'اسم رشته ضروری است!', 'Name of a programme or another project which this project is implemented as part of': 'نام یک پروگرام یا یک پروژه دیگر که این پروژه انجام شده باشد منحیث', 'Name of Award': 'نام جایزه', 'Name of Father': 'نام پدر', 'Name of Institute': 'نام انجمن', 'Name of Map': 'نام نقشه', 'Name of Mother': 'نام مادر', 'Name of the person in local language and script (optional).': 'نام شخص به لسان محلی و دستنویس (اختیاری)', 'National': 'ملی', 'National ID Card': 'کارت هوویت ملی', 'National Societies': 'جوامع ملی', 'National Society': 'جامعه ملی', 'National Society / Branch': 'جامعه ملی/ نماینده گی', 'National Society added': 'جامعه ملی اضافه گردید', 'National Society deleted': 'جزئیات ملی حذف گردید', 'National Society Details': 'جزئیات جامعه ملی', 'National Society updated': 'جزئیات ملی تجدید گردید', 'Nationality': 'ملیت', 'Nationality of the person.': 'ملیت شخص', 'NDRT (National Disaster Response Teams)': 'NDRT (تیم های مقابله را خطرات ملی)', "Need a 'url' argument!": 'به استدلال یک "آدرس" ضرورت است!', 'Needs': 'ضروریات', 'negroid': 'سیاه پوست', 'Network added': 'شبکه اضافه گردید', 'Network Details': 'جزئیات شبکه', 'Network removed': 'شبکه حذف گردید', 'Network updated': 'شبکه تجدید گردید', 'Networks': 'شبکه ها', 'Never': 'هیچ وقت', 'New': 'جدید', 'new ACL': 'ACL جدید', 'New Annual Budget created': 'بودجه سالیانه جدید ایجاد گردید', 'New Deployment': 'گسترش جدید', 'New Entry': 'ورودی جدید', 'New Hazard': 'خطر جدید', 'New Location': 'موقعیت جدید', 'New Organization': 'موسسه جدید', 'Add Output': 'نتیجه جدید', 'New Post': 'پست جدید', 'New Records': 'ضوابط جدید', 'New Role': 'نقش جدید', 'New Sector': 'بخش جدید ', 'New Service': 'خدمت جدید', 'New Theme': 'زمینه جدید', 'New updates are available.': 'تجدید های جدید موجود است.', 'News': 'تازه ها', 'Next': 'بعدی', 'Next run': 'ردیف بعدی', 'Next View': 'نمایش بعدی', 'NGO': 'NGO', 'no': 'نخیر', 'No': 'نخیر', 'No access to this record!': 'به این ضبط دسترسی موجود نیست!', 'No Activities Found': 'هیچ فعالیت پیدا نشد', 'No Activity Organizations Found': 'هیچ فعالیت موسسه دریافت نشد', 'No Activity Types Found': 'هیچ نوع فعالیت پیدا نشد', 'No Activity Types found for this Activity': 'برای این فعالیت هیچ نوع فعالیت پیدا نشد', 'No Activity Types found for this Project Location': 'برای موقعیت این پروژه هیچ نوع فعایت پیدا نشد', 'No Affiliations defined': 'هیچ پیوسته گی معین نشد', 'No annual budgets found': 'هیچ بودجه سالیانه پیدا نشد', 'No Appraisals found': 'هیچ ارزیابی پیدا نشد', 'No Awards found': 'هیچ جوایز پیدا نشد', 'No Base Layer': 'هیچ لایه مرکزی', 'No Beneficiaries Found': 'هیچ فایده کننده پیدا نشد', 'No Beneficiary Types Found': 'هیچ نوع فایده کننده پیدا نشد', 'No Branch Organizations currently registered': 'هیچ شاخه موسسه فعلا ثبت ننموده است', 'No Campaigns Found': 'هیچ کمپاین پیدا نشد', 'No Clusters currently registered': 'هیچ گروه فعلاً ثبت نشده است', 'No Coalitions currently recorded': 'هیچ اتحاد فعلاً ثبت نشده است', 'No Communities Found': 'هیچ انجمن پیدا نشد', 'No contact information available': 'هیچ معلومات تماس موجود نیست', 'No contact method found': 'هیچ روش تماس پیدا نشد', 'No Contacts currently registered': 'هیچ تماس تا فعلا ثبت نشده است', 'No Contacts Found': 'هیچ تماس پیدا نشد', 'No data available': 'هیچ معلومات موجود نیست', 'No data available in table': 'در جدول هیچ معلومات موجود نیست', 'No Data currently defined for this Theme Layer': 'برای این لایه زمینه فعلاً هیچ معلومات تعیین نشده است', 'No Deployments currently registered': 'فعلا هیچ گسترش ثبت نگردیده است', 'No Donors currently registered': 'هیچ کمک کننده فعلاً ثبت نشده است', 'No education details currently registered': 'فعلاً هیچ جزئیات تحصیلی ثبت نشده است', 'No entries currently available': 'فعلاً هیچ ورودی در دسترس نیست', 'No entries found': 'هیچ ورودی پیدا نشد', 'No entry available': 'هیچ ورودی موجود نیست', 'No Facilities currently registered': 'هیچ تسهیلات فعلاٌ ثبت نشده است', 'No Facility Types currently registered': 'هیچ نوع تسهیلات فعلاٌ ثبت نشده است', 'No Feature Layers currently defined': 'هیچ لایه مشخصه فعلاً تعیین نشده است', 'No forms to the corresponding resource have been downloaded yet.': 'هنوز هیچ بورم مشابه به منبع دانلود نشده است', 'No further users can be assigned.': 'دیگر استفاده کننده تعیین شده نمی تواند', 'No Groups currently registered': 'فعلاً هیچ گروپ ثبت نشده است', 'No Hazards currently registered': 'فعلاً هیچ خطرات ثبت نشده است', 'No Hazards found for this Project': 'برای این پروژه هیچ خطرات پیدا نشد', 'No Identities currently registered': 'فعلاً هیچ هوویت ثبت نشده است', 'No Images currently registered': 'فعلاً هیچ عکس ثبت نشده است', 'No jobs configured': 'هیچ وظیفه ترتیب نشده است', 'No jobs configured yet': 'هنوز هیچ وظایف ترتیب نشده است', 'No Keywords Found': 'هیچ کلمه کلیدی دریافت نشد', 'No Layers currently configured in this Profile': 'درین پروفایل فعلاً هیچ لایه تنظیم نشده است', 'No Layers currently defined': 'فعلاً هیچ لایه تعیین نشده است', 'No Layers currently defined in this Symbology': 'فعلاٌ در این سمبول شناسی هیچ لایه تعیین نشده است', 'No Location Hierarchies currently defined': 'فعلاً هیچ سلسله مراتب موقعیت تعیین نشده است', 'No location information defined!': 'هیچ معلومات موقعیت تعیین نشده است!', 'No Locations currently available': 'فعلاً هیچ موقعیت موجود نیست', 'No Locations Found': 'هیچ موقعیت پیدا نشد', 'No Locations found for this Organization': 'برای این موسسه هیچ موقعیت پیدا نشد', 'No Mailing List currently established': 'فعلاٌ هیچ لست ایمیل ها تاسیس نشده است', 'No Map Profiles currently defined': 'فعلاٌ هیچ تنظیمات نقشه تعیین نشده است', 'No Markers currently available': 'فعلاٌ هیچ علامت گذار تعیین نشده است', 'No match': 'هیچ مشابه موجود نیست', 'No matching element found in the data source': 'در منبع معلومات هیچ عنصر مشابه پیدا نشد', 'No Matching Records': 'هیچ ضبط مشابه دریافت نشده', 'No matching records found': 'هیچ ثبت مشابه پیدا نشد', 'No Members currently registered': 'فعلاً هیچ عضو ثبت نکرده است', 'No members currently registered': 'فعلاٌ هیچ عضو ثبت نشده است', 'No membership types currently registered': 'فعلاً هیچ نوع عضویت ثبت نشده است', 'No Memberships currently registered': 'فعلاً هیچ عضویت ثبت نشده است', 'No Milestones Found': 'هیچ مرحله مهم دریافت نشد', 'No Networks currently recorded': 'فعلاً هیچ شبکه ثبت نشده است', 'No Office Types currently registered': 'فعلاً هیچ نوع دفتر ثبت نشده است', 'No Offices currently registered': 'فعلاً هیچ دفتر ثبت نشده است', 'No Open Tasks for %(project)s': 'هیچ وظایف باز برای %(project)s موجود نیست', 'No options available': 'هیچ اختیار موجود نیست', 'no options available': 'هیچ اختیار موجود نیست', 'No options currently available': 'فعلاً هیچ اختیار موجود نیست', 'No Organization Types currently registered': 'فعلاٌ هیچ انواع موسسه ثبت نشد', 'No Organizations currently registered': 'فعلاً هیچ موسسه ثبت نشده است', 'No Organizations for Project(s)': 'هیچ موسسه برای پروژه (ها)', 'No Organizations found for this Policy/Strategy': 'برای این پالیسی/استراتیژی هیچ موسسه پیدا نشد', 'No outputs defined': 'هیچ نتیجه پیدا نشد', 'No Partner Organizations currently registered': 'فعلاً هیچ موسسه همکار ثبت نشده است', 'No Persons currently registered': 'فعلاٌ هیچ شخص ثبت نشده است', 'No PoI Types currently available': 'فعلاٌ هیچ انواع Pol موجود نیست', 'No Points of Interest currently available': 'فعلاٌ هیچ نقطه مورد نظر موجود نیست', 'No PoIs available.': 'Polها موجود نیست', 'No Policies or Strategies found': 'هیچ پالیسی یا استراتیژی پیدا نشد', 'No Presence Log Entries currently registered': 'فعلاٌ هیچ ورودی ثبت نشده است', 'No Professional Experience found': 'هیچ تجربه مسلکی پیدا نشد', 'No Profiles currently have Configurations for this Layer': 'فعلاٌ برای این لایه تنظیمات پروفایل موجود نیست', 'No Projections currently defined': 'فعلاٌ هیچ تصویر تعیین نشده است', 'No Projects currently registered': 'فعلاً هیچ پروژه ثبت نشده است', 'No Ratings for Skill Type': 'هیچ ارزیابی برای نوع مهارت موجود نیست', 'No Records currently available': 'فعلاً هیچ ضبط موجود نیست', 'No records in this resource': 'هیچ ثبت درین منبع موجود نیست', 'No records in this resource. Add one more records manually and then retry.': 'هیچ ثبت درین منبع موجود نیست. یک ثبت اضافه نمایید و دوباره امتحان کنید.', 'No records to review': 'هیچ ضبط برای بازدید کردن موجود نیست', 'No Red Cross & Red Crescent National Societies currently registered': 'فعلاً هیچ انجمن ملی هلال احمر و صلیب سرخ ثبت نشده است', 'No Regions currently registered': 'فعلاً هیچ ساحه ثبت نشده است', 'No report specified.': 'هیچ گزارش تعیین نشده است', 'No Resource Types defined': 'هیچ انواع منبع تعیین نشده است', 'No Resources in Inventory': 'هیچ موجودی منبع دریافت نشد', 'No Response': 'بدون پاسخ', 'No Response Summaries Found': 'هیچ خلاصه پاسخ پیدا نشد', 'No Restrictions': 'هیچ محدودیت دریافت نشد', 'No role to delete': 'هیچ نقش برای حذف کردن موجود نیست', 'No roles currently assigned to this user.': 'فعلاً برای این استفاده کننده هیچ نقش تعیین نشده است.', 'No Roles defined': 'هیچ نقش تعیین نشده است', 'No Rooms currently registered': 'فعلاً هیچ اتاق ثبت نشده است', 'No Search saved': 'هیچ جستجو ذخیره نشد', 'No Sectors currently registered': 'فعلاً هیچ بخش ثبت نشده است', 'No Sectors found for this Organization': 'برای این موسسه هیچ بخش پیدا نشد', 'No Sectors found for this Project': 'برای این پروژه هیچ بخش پیدا نشد', 'No Sectors found for this Theme': 'برای این زمینه هیچ بخش پیدا نشد', 'No Services currently registered': 'فعلاً هیچ خدمات ثبت نشده است', 'No Services found for this Organization': 'هیچ خدمت برای این موسسه پیدا نشد', 'No Staff currently registered': 'فعلاً هیچ کارمند ثبت نشده است', 'No staff or volunteers currently registered': 'فعلاً هیچ کارمند یا داوطلب ثبت نشده است', 'No Statuses currently registered': 'فعلاً هیچ حالت ثبت نشده است', 'No Symbologies currently defined': 'فعلاً هیچ سمبول شناسی تعیین نشده است', 'No Symbologies currently defined for this Layer': 'فعلاً برای این لایه هیچ سمبول شناسی تعیین نشده است', 'No Tasks Assigned': 'هیچ وظیفه تعیین نشده است', 'No tasks currently registered': 'فعلاً هیچ وظیفه ثبت نشده است', 'No Teams currently registered': 'فعلاً هیچ تیم ثبت نشده است', 'No Themes currently registered': 'فعلاً هیچ زمینه ثبت نشده است', 'No Themes found for this Activity': 'برای این فعالیت هیچ زمینه پیدا نشد', 'No Themes found for this Project': 'برای این پروژه هیچ زمینه پیدا نشد', 'No Themes found for this Project Location': 'برای موقعیت این پروژه هیچ زمینه پیدا نشد', 'No Time Logged': 'هیچ زمان ورود نیست', 'No time stamps found in this resource': 'هیچ مهر زمانی در این منبع پیدا نشد', 'No users with this role at the moment.': 'فعلاً با این نقش هیچ استفاده کننده موجود نیست', "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'هیچ UTC آفست پیدا نشد. لطفاً UTC آفست رادر جزئیات "استفاده کننده پروفایل" ترتیب نمایید. مثال: UTC+0530', 'No Volunteer Cluster Positions': 'هیچ منصب های بخش داوطلب پیدا نشد', 'No Volunteer Cluster Types': 'هیچ انواع بخش داوطلب پیدا نشد', 'No Volunteer Clusters': 'هیچ بخش های داوطلب موجود نیست', 'No Volunteers currently registered': 'فعلاً هیچ داوطلبان ثبت شده است', 'none': 'هیچ ', 'None': 'هیچ', 'NONE': 'هیچ', 'None (no such record)': 'هیچ (چنین یک ثبت موجود نیست)', 'None of the above': 'هیچ کدام بالایی', 'Nonexistent or invalid resource': 'معدوم یا منبع غیر معتبر', 'Normal': 'عادی', 'Normal Job': 'وظیفه عادی', 'NOT %s AND NOT %s': 'نه %s و نه %s', 'NOT %s OR NOT %s': 'یا نه %s نه %s', 'Not Authorized': 'اجازه داده نشد', 'Not implemented': 'اجرا نشد', 'Not installed or incorrectly configured.': 'نصب نشد یا نادرست تنظیم شد', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'به یاد داشته باشید که این لست فقط داوطلبان فعال را نمایش می دهد. برای دیدن تمام اشخاص ثبت شده در سیستم، در عوض ازین صفحه استفاده نمایید', 'Note that when using geowebcache, this can be set in the GWC config.': 'به یاد داشته باشید هنگام استفاده کردن از geowebcache، این می تواند در تنظیمات GWC ترتیب شود.', 'Notification frequency': 'فریکونسی اطلاعیه', 'Notification method': 'روش اطلاعیه', 'Notify': 'مطلع', 'Number': 'شماره', 'Number of Activities': 'تعداد فعالیت ها', 'Number of Beneficiaries': 'تعداد فایده کننده ها', 'Number of Countries': 'تعداد کشور ها', 'Number of Deployments': 'تعداد گسترش ها', 'Number of Disaster Types': 'تعداد انواع خطرات', 'Number of Facilities': 'تعداد تسهیلات', 'Number of Missions': 'تعداد ماموریت ها', 'Number of Responses': 'تعداد پاسخ ها', 'Number or Label on the identification tag this person is wearing (if any).': 'شماره یا لیبل هوویت که این شخص به لباس خود دارد (اگر هر کدام)', 'Nutrition': 'تغذیه', 'Nutritional Assessments': 'ارزیابی تغذیه', 'Object': 'شی', 'Objectives': 'اهداف', 'Observer': 'مشاهده کننده', 'obsolete': 'منسوخ', 'Obsolete': 'منسوخ', 'OCR Form Review': 'بازدید فورم ORC', 'OCR module is disabled. Ask the Server Administrator to enable it.': 'طرح ORC غیر فعال گردید. از رئیس سرور بخواهید آن را فعال نماید.', 'OCR review data has been stored into the database successfully.': 'باز دید معلومات ORC موفقانه در دیتابیس ذخیره گردید.', 'OD Coordinator': 'هماهنگ کننده OD', 'Office': 'دفتر', 'Office added': 'دفتر اضافه گردید', 'Office Address': 'آدرس دفتر', 'Office deleted': 'دفتر حذف گردید', 'Office Details': 'جزئیات دفتر', 'Office Phone': 'تیلفون دفتر', 'Office Type': 'نوع دفتر', 'Office Type added': 'نوع دفتر اضافه گردید', 'Office Type deleted': 'نوع دفتر حذف گردید', 'Office Type Details': 'جزئیات نوع دفتر', 'Office Type updated': 'نوع دفتر تجدید گردید', 'Office Types': 'انواع دفتر', 'Office updated': 'دفتر تجدید گردید', 'Offices': 'دفاتر', 'OK': 'صحیح است', 'on %(date)s': '%(date)s بالای', 'On by default?': 'به شکل پیش فرض؟', 'On Hold': 'در دست', 'Only showing accessible records!': 'تنها نمایش ضوابط قابل دسترسی!', 'Opacity': 'مبهم', 'Open': 'باز کردن', 'Open Chart': 'باز کردن جدول', 'Open Incidents': 'باز کردن حوادث', 'Open Map': 'باز کردن نقشه', 'Open recent': 'باز کردن تازه ها', 'Open Report': 'باز کردن گزارش', 'Open Table': 'باز کردن جدول', 'Open Tasks for %(project)s': '%(project)s باز کردن وظایف برای', 'Open Tasks for Project': 'باز کردن وظایف برای پروژه', 'Opening Times': 'اوقات باز', 'OpenStreetMap Layer': 'لایه باز کردن نقشه سرک', 'OpenStreetMap OAuth Consumer Key': 'کلید OpenStreetMap Oauth Consumer', 'OpenStreetMap OAuth Consumer Secret': 'رمز OpenStreetMap Oauth Consumer', 'OpenWeatherMap Layer': 'لایه OpenStreetMap', 'Operation not permitted': 'عمیات اجازه داده نشد', 'Optional password for HTTP Basic Authentication.': 'رمز عبور اختیاری برای مجوز ابتدایی HTTP', 'Optional selection of a background color.': 'انتخاب اختیاری یک رنگ زمینه', 'Optional selection of a MapServer map.': 'انتخاب اختیاری نقشه MapServer.', 'Optional selection of an alternate style.': 'انتخاب اختیاری یک استایل تعویضی.', 'Optional username for HTTP Basic Authentication.': 'نام استفاده کننده اختیاری برای اجازه ابتدایی HTTP.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'اختیاری. در Geoserver، این آدرس محل نام محل کار است.', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'اختیاری. اسم عنصر کسی که باید آدرس یک فایل عکس باید در popups باشد.', 'Optional. The name of an element whose contents should be put into Popups.': 'اختیاری. نام عنصر کسی که محتویات باید در داخل popups باشد.', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": 'اختیاری. نام ستون هندسی. در postGIS این پیش فرض در "the_geom".', 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'نام طرح. در GeoServer این شکل نام است http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'or': 'یا', 'Organisational Preparedness - Nhq and Branches': 'آماده گی موسسهی - Nhq و شاخه ها', 'Organization': 'موسسه', 'Organization added': 'موسسه اضافه گردید', 'Organization added to Policy/Strategy': 'موسسه به پالیسی/استراتیژی اضافه گردید', 'Organization added to Project': 'موسسه به پروژه اضافه گردید', 'Organization deleted': 'موسسه حذف گردید', 'Organization Details': 'جزئیات موسسه', 'Organization group': 'گروپ موسسه', 'Organization removed from Policy/Strategy': 'موسسه از پالیسی/استراتیژی حذف گردید', 'Organization removed from Project': 'موسسه از پروژه حذف گردید', 'Organization Type': 'نوع موسسه', 'Organization Type added': 'نوع موسسه اضافه گردید', 'Organization Type deleted': 'نوع موسسه حذف گردید', 'Organization Type Details': 'جزئیات نوع موسسه', 'Organization Type updated': 'نوع موسسه تجدید گردید', 'Organization Types': 'انواع موسسه', 'Organization Units': 'بخش های موسسه', 'Organization updated': 'موسسه تجدید گردید', 'Organization(s)': 'موسسه (ها)', 'Organization/Branch': 'موسسه/نماینده گی', 'Organizational Development': 'گسترش موسسهی', 'Organizations': 'موسسه ها', 'Organizations / Teams / Facilities': 'موسسه ها / تیم ها / تسهیلات', 'Origin': 'اصل', 'Original': 'اصلی', 'OSM file generation failed!': 'تولید فایل OSM ناکام شد!', 'OSM file generation failed: %s': '%s :تولید فایل OSM ناکام شد', 'Other': 'دیگران', 'other': 'دیگر ', 'Other Address': 'آدرس دیگر', 'Other Details': 'جزئیات دیگر', 'Other Users': 'استفاده کننده های دیگر', 'Others': 'دیگران', 'Outcomes, Impact, Challenges': 'نتیجه، فشردن، چالش ها', 'Output': 'نتیجه', 'Output added': 'نتیجه اضافه گردید', 'Output deleted': 'نتیجه حذف گردید', 'Output updated': 'نتیجه تجدید گردید', 'Outputs': 'نتایج', 'Outreach Staff': 'کارمند برتر', 'overdue': 'دیر آمدن', 'Overlays': 'پوشش ها', 'Owned Records': 'ضوابط شخصی', 'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'ساختمان جزایر آرام برای عملیات تغییرات اقلیمی. فقط در پروژه های کشور های آرام قابل اجرا می باشد', 'Page': 'صفحه', 'paid': 'پرداخته', 'Paid': 'پرداخته شد', 'Pan Map: keep the left mouse button pressed and drag the map': 'نقشه Pan: کلید چپ موس را فشار داده و روی نقشه رها نمایید', 'Parent': 'parent', "Parent level should be higher than this record's level. Parent level is": 'درجه parent باید بالاتر از درجه این ثبت باشد. درجه parent عبارت است از', 'Parent needs to be of the correct level': 'Parent باید یک درجه درست باشد', 'Parent needs to be set': 'Parent باید تنظیم شود', 'Parent needs to be set for locations of level': 'برای موقعیت های درجه parent باید تنظیم شود', 'Part of the URL to call to access the Features': 'بخشی از آدرس برای دسترسی به مشخصه ها', 'Participant': 'شریک', 'Participant added': 'شریک اضافه گردید', 'Participant deleted': 'شریک حذف گردید', 'Participant Details': 'جزئیات شریک', 'Participant updated': 'شریک تجدید گردید', 'Participants': 'شرکا', 'Participatory Hygiene Promotion': 'اشتراک در پیشرفت حفظ الصحه', 'Partner': 'همکار', 'Partner National Society': 'همکار اجتماع ملی', 'Partner Organization added': 'موسسه همکار اضافه گردید', 'Partner Organization deleted': 'موسسه همکار حذف گردید', 'Partner Organization Details': 'جزئیات موسسه همکار', 'Partner Organization updated': 'موسسه همکار تجدید گردید', 'Partner Organizations': 'موسسه های همکار', 'Partners': 'همکاران', 'Pass': 'عبور', 'Passport': 'پاسپورت', 'Password': 'رمزعبور', 'PDF File': 'فایل PDF', 'Peer Support': 'حمایت همکاران', 'Pending': 'در انتظار', 'per': 'درهر', 'Percentage': 'فیصدی', 'Performance Rating': 'اجرای درجه بندی ها', 'Permanent Home Address': 'آدرس دائمی', 'Person': 'شخص', 'Person added': 'شخص اضافه گردید', 'Person deleted': 'شخص حذف گردید', 'Person Details': 'جزئیات شخص', 'Person details updated': 'جزئیات شخص تجدید گردید', 'Person Entity': 'موجودی شخص', 'Person must be specified!': 'شخص باید تعیین گردد!', 'Person or OU': 'شخص و یا OU', 'Person Registry': 'فهرست شخص', 'Person who has actually seen the person/group.': 'شخص که واقعاً شخص را دیده باشد/ گروپ', "Person's Details": 'جزئیات شخص', "Person's Details added": 'جزئیات شخص اضافه گردید', "Person's Details deleted": 'جزئیات شخص حذف گردید', "Person's Details updated": 'جزئیات شخص تجدید گردید', 'Personal': 'شخصی', 'Personal Profile': 'پروفایل شخصی', 'Persons': 'اشخاص', "Persons' Details": 'جزئیات شخص', 'Philippine Pesos': 'مطرح کردن فلیپینی', 'Phone': 'تیلفون', 'Phone #': 'تیلفون#', 'Phone 1': 'تیلفون1', 'Phone 2': 'تیلفون2', 'Phone number is required': 'شماره تماس ضرورت است', 'Photograph': 'عکاس', 'PIFACC Priorities': 'تقدمات PIFACC', 'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'PIFACC- 1: اجراات محسوس، مقیاس های توافق روی زمین', 'PIFACC-2: Governance and Decision Making': 'PIFACC - 2: کنترول و تصمیم گیری', 'PIFACC-3: Improving our understanding of climate change': 'PIFACC - 3: توسعه دانش تغییر اقلیم خود', 'PIFACC-4: Education, Training and Awareness': 'PIFACC - 4: تحصیلات، آموزش و آگاهی', 'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'PIFACC - 5: کاهش انتشار گاز Greenhouse جهانی', 'PIFACC-6: Partnerships and Cooperation': 'PIFACC - 6: تعاون و همکاری', 'PIL (Python Image Library) not installed': 'PIL () نصب نشد', 'PIL (Python Image Library) not installed, images cannot be embedded in the PDF report': 'PIL () نصب نشد، عکس ها در گزارش PDF جاسازی شده نمی تواند', 'Place of Birth': 'محل تولد', 'Place on Map': 'محل روی نقشه', 'Planning and Construction of Drainage Systems ': 'پلانگذاری و ساختمانی سیستم آبکشی', 'Please choose a type': 'لطفاٌ یک نوع را انتخاب نمایید', 'Please enter a first name': 'لطفا یک نام را اضافه نمایید', 'Please enter a last name': 'لطفاً یک تخلص را اضافه نمایید', 'Please enter a number only': 'لطفاً فقط یک شماره اضافه نمایید', 'Please enter a valid email address': 'لطفاً یک ایمیل آدرس معتبر وارد نمایید', 'Please fill this!': 'لطفاً این را خانه پری نمایید', "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'لطفاً هر قدر جزئیات که می توانید اضافه نمایید، همچنان آدرس مکانی که خطا رخ می دهد یا شما می خواهید مشخصه جدید برود.', 'Please record Beneficiary according to the reporting needs of your project': 'لطفاً فایده گیرنده را مطابق ضروریات گزارش پروژه ثبت نمایید', 'Please Select a Facility': 'لطفاً یک تسهیلات را انتخاب نمایید', 'Please select a valid image!': 'لطفاً یک عکس معتبر انتخاب نمایید!', 'Please select exactly two records': 'لطفاً دقیقاً دو ثبت را انتخاب نمایید', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'لطفاً برای معلومات اضافی، و همچنان خلاصه ثبت اگر تجدید شده است ازین بخش استفاده نمایید.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'برای معلومات اضافی مانند Ids Ushahidi ازین بخش استفاده نمایید، خلاصه ثبت اگر تجدید گردیده را نیز شامل بسازید.', 'PMER': 'PMER', 'PoI': 'Pol', 'PoI Type added': 'نوع Pol اضافه گردید', 'PoI Type deleted': 'نوع Pol حذف گردید', 'PoI Type Details': 'جزئیات نوع Pol ', 'PoI Type updated': 'نوع Pol تجدید گردید', 'PoI Types': 'انواع Pol', 'Point of Interest added': 'نقطه مورد نظر اضافه گردید', 'Point of Interest deleted': 'نقطه مورد نظر حذف گردید', 'Point of Interest Details': 'جزئیات نقطه مورد نظر', 'Point of Interest updated': 'نقطه مورد نظر تجدید گردید', 'Points of Interest': 'نقاط مورد نظر', 'PoIs': 'Pol ها', 'PoIs successfully imported.': 'Pol ها موفقانه صادر گردید', 'Policies & Strategies': 'پالیسی ها و استراتیژی ها', 'Policy Development': 'ایجاد پالیسی', 'Policy or Strategy': 'پالیسی یا استراتیژی', 'Policy or Strategy added': 'پالیسی یا استراتیژی اضافه گردید', "Policy or Strategy added, awaiting administrator's approval": 'پالیسی یا استراتیژی اضافه گردید، انتظار تصدیق رئیس', 'Policy or Strategy deleted': 'پالیسی یا استراتیژی حذف گردید', 'Policy or Strategy updated': 'پالیسی یا استراتیژی تجدید گردید', 'Polygon': 'چند ضلعی', 'Poor': 'ضعیف', 'Popup Fields': 'رشته های popup', 'Popup Label': 'نشان popup', 'Position': 'منصب', 'Positions': 'منصب ها', 'Post Harvest Storage and Management': 'ذخیره و مدیریت پست', 'Postcode': 'کود پست', 'Power Supply Type': 'نوع تهیه کننده برق', 'Powered by': 'طراحی شد توسط', 'Powered by Sahana Eden': 'توسط Sahana Eden طراحی شد', 'Preferred Name': 'نام ترجیحی', 'Presence': 'حاضر', 'Presence Condition': 'حالات فعلی', 'Presence Log': 'ورودی فعلی', 'Previous': 'قبلی ', 'Previous View': 'نمایش قبلی', 'Print': 'چاپ', 'Priority': 'اولویت', 'Priority from 1 to 9. 1 is most preferred.': 'تقدم از 1 الی 9. 1 بیشتر تقدم داده می شود.', 'Privacy': 'خلوت', 'Private': 'شخصی', 'Procedure': 'دستورالعمل', 'Processing': 'جریان', 'Profession': 'مسلک', 'Professional Experience': 'تجربه مسلکی', 'Professional Experience added': 'تجربه مسلکی اضافه گردید', 'Professional Experience deleted': 'تجربه مسلکی حذف گردید', 'Professional Experience Details': 'جزئیات تجربه مسلکی', 'Professional Experience updated': 'تجربه مسلکی تجدید گردید', 'Profile Configuration': 'تنظیمات پروفایل', 'Profile Configuration removed': 'تنظیمات پروفایل حذف گردید', 'Profile Configuration updated': 'تنظیمات پروفایل تجدید گردید', 'Profile Configurations': 'تنظیمات پروفایل', 'Profile Configured': 'پروفایل تنظیم گردید', 'Profile Details': 'جزئیات پروفایل', 'Profile Page': 'صفحه پروفایل', 'Profile Picture': 'عکس پروفایل', 'Profile Picture?': 'عکس پروفایل؟', 'Profiles': 'پروفایل ها', 'Program': 'برنامه', 'Program added': 'برنامه اضافه گردید', 'Program deleted': 'برنامه حذف گردید', 'Program Details': 'جزئیات برنامه', 'Program Hours (Month)': 'ساعات برنامه (ماه)', 'Program Hours (Year)': 'ساعات برنامه (سال)', 'Program updated': 'برنامه تجدید گردید', 'Programme Manager': 'مدیر برنامه', 'Programme Preparation and Action Plan, Budget & Schedule': 'آماده گی برنامه و پلان عمل، بودجه و تقسیم اوقات', 'Programs': 'برنامه ها', 'Project': 'پروژه', 'Project added': 'پروژه اضافه گردید', 'Project Assessments and Planning': 'ارزیابی های پروژه و پلانگذاری', 'Project Calendar': 'تقویم پروژه', 'Project deleted': 'پروژه حذف گردید', 'Project Details': 'جزئیات پروژه', 'Project Name': 'نام پروژه', 'Project not Found': 'پروژه پیدا نشد', 'Project Officer': 'کارمند پروژه', 'Project Organization Details': 'جزئیات پروژه موسسه', 'Project Organization updated': 'پروژه موسسه تجدید گردید', 'Project Organizations': 'پروژه موسسه ها', 'Project Report': 'گزارش پروژه', 'Project Task': 'وظیفه پروژه', 'Project Time Report': 'گزارش زمان پروژه', 'Project updated': 'پروژه تجدید گردید', 'Projection': 'تصویر', 'Projection added': 'تصویر اضافه گردید', 'Projection deleted': 'تصویر حذف گردید', 'Projection Details': 'جزئیات تصویر', 'Projection Type': 'نوع تصویر', 'Projection updated': 'تصوی تجدید گردید', 'Projections': 'تصاویر', 'Projects': 'پروژه ها', 'Projects Map': 'نقشه پروژه ها', 'Proposed': 'پیشنهاد شد', 'Provide a password': 'یک رمز عبور تهیه نمایید', 'Provision of Inputs': 'تدارکات داخلی', 'Provision of Tools and Equipment': 'تدارکات لوازم و تجهیزات', 'Psychosocial Support': 'حمایت روانی', 'Public': 'عمومی', 'Python GDAL required for Shapefile support!': 'برای حمایه Shapefile به Python GDAL ضرورت است!', 'Python needs the ReportLab module installed for PDF export': 'لابراتوار گزارش نصب نگردید', 'Python needs the xlrd module installed for XLS export': 'خطا: اجرا نمودن Python به مودل xlrd نیاز دارد تا در XLS export نصب باشد', 'Python needs the xlwt module installed for XLS export': 'خطا: اجرا نمودن Python به مودل xlrd نیاز دارد تا در XLS export نصب باشد', 'Quantity': 'کمیت', 'Query': 'پرسش', 'Query Feature': 'پرسش مشخصه', 'Queryable?': 'قابل پرسش', 'Race': 'مسابقه', 'Rangeland, Fisheries and Forest Management': 'RAGELAND، ماهی گیری و مدیریت جنگل', 'Rapid Data Entry': 'ورود معلومات سریع', 'Rating': 'ارزیابی', 'RDRT (Regional Disaster Response Teams)': 'RDRT (تیم مقابله با خطرات ساحوی)', 'RDRT Members': 'اعضای RDRT', 'RDRT Type': 'نوع RDRT', 'READ': 'خواندن', 'Ready': 'آماده', 'Receive %(opt_in)s updates:': 'دریافت %(opt_in)s تجدید ها', 'Receive updates': 'دریافت تازه ها', 'Received Shipments': 'محموله ها دریافت شد', 'Record': 'ضبط', 'Record added': 'ضبط اضافه گردید', 'Record already exists': 'ضبط قبلاً موجود است', 'Record approved': 'ضبط تصدیق گردید', 'Record could not be approved.': 'ضبط تصدیق شده نتوانست', 'Record could not be deleted.': 'ضبط حذف شده نتوانست', 'Record deleted': 'ضبط حذف گردید', 'Record Details': 'جزئیات ضبط', 'Record not found': 'ضبط پیدا نشد', 'Record not found!': 'ضبط پیدا نشد!', 'Record updated': 'ضبط تجدید گردید', 'Record Updates': 'تجدید های ضبط', 'Records': 'ضوابط', 'records deleted': 'ضوابط حذف گردید', 'Records merged successfully.': 'ضوابط موفقانه یکجا گردید', 'red': 'سرخ', 'Red Cross & Red Crescent National Societies': 'انجمن های ملی صلیب سرخ و هلال احمر', 'Red Cross / Red Crescent': 'صلیب سرخ/ هلال احمر', 'Referral': 'مراجعه', 'Refresh Rate (seconds)': 'نرخ تازه سازی (ثانیه ها)', 'Region': 'ساحه', 'Region added': 'ساحه اضافه گردید', 'Region deleted': 'ساحه حذف گردید', 'Region Details': 'جزئیات ساحه', 'Region Location': 'موقعیت ساحه', 'Region updated': 'ساحه تجدید گردید', 'Regional': 'ساحوی', 'Regions': 'ساحات', 'Register': 'ثبت کردن', 'Register As': 'ثبت کردن منحیث', 'Register for Account': 'ثبت کردن برای حساب', 'Registered users can %(login)s to access the system': 'استفاده کننده های ثبت شده می توانند %(login)s در دسترسی سیستم', 'Registration not permitted': 'ثبت اجازه داده نشد', 'Reject': 'رد کردن', 'Relationship': 'رابطه', 'Relief Team': 'تیم امداد', 'Religion': 'مذهب', 'reload': 'دوباره بارگیری', 'Reload': 'دوباره بارگیری', 'Remove': 'حذف کردن', 'Remove Coalition': 'حذف کردن پیوسته گی', 'Remove existing data before import': 'معلومات موجود را قبل از صدور حذف نمایید', 'Remove Feature: Select the feature you wish to remove & press the delete key': 'حذف کردن مشخصه: مشخصه ای که می خواهید حذف نمایید را انتخاب نمایید و دکمه حذف را فشار دهید', 'Remove Layer from Profile': 'لایه را از پروفایل حذف نمایید', 'Remove Layer from Symbology': 'لایه را از سمبول شناسی حذف نمایید', 'Remove Network': 'شبکه را حذف نمایید', 'Remove Organization from Project': 'موسسه را از پروژه حذف نمایید', 'Remove Profile Configuration for Layer': 'تنظیمات پروفایل را از لایه حذف نمایید', 'Remove selection': 'انتخاب را حذف نمایید', 'Remove Skill': 'مهارت را حذف نمایید', 'Remove Symbology from Layer': 'سمبول شناسی را از لایه حذف نمایید', 'Remove this entry': 'این موجودی را حذف نمایید', 'Reopened': 'دوباره باز کردن', 'Repeat': 'تکرار ', 'Repeat your password': 'رمز عبور را تکرارً وارد نمایید', 'Replace': 'تعویض', 'Reply': 'پاسخ', 'Report': 'گزارش', 'Report of': 'گزارش', 'Report on Annual Budgets': 'گزارش بودجه سالیانه', 'Report Options': 'گزینه های گزارش', 'Reports': 'گزارشات', 'representation of the Polygon/Line.': 'نمایش چند ضلعی/ خط', 'Request': 'درخواست', 'Requested By Facility': 'توسط تسهیلات درخواست گردید', 'Requested Items': 'مواد درخواستی', 'Requests': 'درخواست ها', 'Requires Login': 'به وارد شدن ضرورت است', 'Reset': 'دوباره تنظیم کردن', 'Reset all filters': 'دوباره تنظیم کردن تمام فلتر ها', 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'دوباره اندازه کردن مشخصه: مشخصه ای که می خواهید دوباره اندازه کنید را انتخاب نمایید و به اندازه میل تان در نقطه اتصال بکشانید', 'Resource added': 'منبع اضافه گردید', 'Resource deleted': 'منبع حذف گردید', 'Resource Details': 'جزئیات منبع', 'Resource Inventory': 'موجودی منبع', 'Resource Management System': 'سیستم مدیریت منبع', 'Resource Management System account has been activated': 'حساب سیستم مدیریت منبع فعال گردید', 'Resource Transfers for Acquiring Assets': 'انتقال های منبع برای بدست آوردن سرمایه', 'Resource Transfers for Replacing/ Provisioning Or Consumption': 'انتقال های منبع برای تعویض/ تدارکات مصرف', 'Resource Type': 'نوع منبع', 'Resource Type added': 'نوع منبع اضافه گردید', 'Resource Type deleted': 'نوع منبع حذف گردید', 'Resource Type Details': 'جزئیات نوع منبع', 'Resource Type updated': 'نوع منبع تجدید گردید', 'Resource Types': 'انواع منبع', 'Resource updated': 'منبع تجدید گردید', 'Responded': 'پاسخ داده شد', 'Response': 'پاسخ', 'Response Summaries': 'خلاصه های پاسخ', 'Response Summary Added': 'خلاصه پاسخ اضافه گردید', 'Response Summary Deleted': 'خلاصه پاسخ حذف گردید', 'Response Summary Details': 'جزئیات خلاصه پاسخ', 'Response Summary Report': 'گزارش خلاصه پاسخ', 'Response Summary Updated': 'خلاصه پاسخ تجدید گردید', 'REST Filter': 'فلتر REST', 'Retrieve Password': 'بازیابی رمز عبور', 'retry': 'تلاش مجدد', 'Revert Entry': 'برگشت به ورودی', 'Review': 'بازدید', 'RFA Priorities': 'تقدم RFA', 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: کنترول- موسسهی، رسمی، پالیسی و تصمیم گیری ساحه کاری', 'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: دانش، معلومات، آگاهی عمومی و تحصیلات', 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: تحلیل و ارزیابی خطرات، آسیب پذیری ها و عناصر در خطر', 'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: پلانگذاری برای موثریت آماده گی، مقابله و بهبود', 'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: موثریت، تکمیل و مردم دقیقی برای سیستم هشدار به وقت', 'RFA6: Reduction of Underlying Risk Factors': 'RFA6: کاهش عوامل خطرات اساسی', 'Risk Management and Quality Assurance': 'مدیریت خطر و اطمینان کیفیت', 'Risk Transfer': 'انتقال خطر', 'RMS': 'RMS', 'RMS Team': 'تیم RMS', 'Role': 'نقش', 'Role added': 'نقش اضافه گردید', 'Role assigned to User': 'نقش به استفاده کننده تعیین گردید', 'Role deleted': 'نقش حذف گردید', 'Role Details': 'جزئیات نقش', 'Role Name': 'نام نقش', 'Role Required': 'نقش لازمی است', 'Role updated': 'نقش تجدید گردید', 'Roles': 'نقش ها', 'Roles currently assigned': 'فعلاٌ نقش ها تعیین شد', 'Roles of User': 'نقش های استفاده کننده', 'Roles Permitted': 'به نقش ها اجازه داده شد', 'Roles updated': 'نقش ها تجدید گردید', 'Room': 'اتاق', 'Room added': 'اتاق اضافه گردید', 'Room deleted': 'اتاق حذف گردید', 'Room Details': 'جزئیات اتاق', 'Room updated': 'اتاق تجدید گردید', 'Rooms': 'اتاق ها', 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'مشخصه چرخانیدن: مشخصه ای که می خواهید بچرخانید را انتخاب نمایید و به نقطه اتصال موقعیت دلخواه تان بکشانید', 'Run every': 'هر مسابقه', 'S3PivotTable unresolved dependencies': 'وابستگی حل نا شده S3PivotTable', 'Sahana Community Chat': 'حرف زدن انجمن Sahana', 'Sahana Eden Humanitarian Management Platform': 'پلت فارم مدیریت بشردوستانه Sahana Eden', 'Sahana Eden Website': 'وبسایت Sahana Eden', 'Sanitation': 'حفظ الصحه', 'Save': 'ذخیره کردن', 'Save and Continue Editing': 'ذخیره و ادامه به اصلاح', 'Save as New Map?': 'ذخیره کردن منحیث یک نقشه جدید؟', 'Save Map': 'ذخیره کردن نقشه', 'Save search': 'ذخیره کردن جستجو', 'Save this search': 'ذخیره کردن این جستجو', 'Save: Default Lat, Lon & Zoom for the Viewport': 'ذخیره کردن: غرض جغرافیایی پیش فرض، طول جغرافیایی و زوم برای Viewport', 'Saved': 'ذخیره گردید', 'Saved Filters': 'فلتر های ذخیره شده', 'Saved filters': 'فلتر های ذخیره شده', 'Saved Filters...': 'فلتر های ذخیره شده...', 'Saved Maps': 'نقشه های ذخیره شده', 'Saved search added': 'جستجوی ذخیره شده اضافه گردید', 'Saved search deleted': 'جستجوی ذخیره شده حذف گردید', 'Saved search details': 'جزئیات جستجوی ذخیره شده', 'Saved search updated': 'جستجوی ذخیره شده تجدید گردید', 'Saved Searches': 'جستجو های ذخیره شده', 'Saved searches': 'جستجو های ذخیره شده', 'Scanned Copy': 'کاپی سکن شده', 'Scanned Forms Upload': 'انتقال فورم های سکن شده', 'Scheduled Jobs': 'وظایف تقسیم شده', 'Schema': 'طرح', 'School Holidays only': 'فقط رخصتی های مکتب', 'School RC Units Development': 'ایجاد بخش های RC مکتب', 'School Safety and Children Education,': 'محفوظیت مکتب و تحصیلات اطفال', 'Seaport': 'ساحل', 'Search': 'جستجو', 'Search %(site_label)s Status': 'جستجو %(site_label)s حالات', 'Search Activities': 'فعالیت های جستجو', 'Search Activity Types': 'انواع فعالیت جستجو', 'Search Addresses': 'آدرس های جستجو', 'Search Affiliations': 'پیوسته گی های جستجو', 'Search Annual Budgets': 'جستجوی بودجه های سالیانه ', 'Search Appraisals': 'جستجوی ارزیابی', 'Search Awards': 'جستجوی جوایز', 'Search Beneficiaries': 'جستجوی فایده کننده ها', 'Search Beneficiary Types': 'جستجوی انواع فایده کننده ها', 'Search Branch Organizations': 'جستجوی شاخه های موسسه ها', 'Search by skills': 'جستجوی توسط مهارت ها', 'Search Campaigns': 'جستجوی کمپاین ها', 'Search Certificates': 'جستجوی تصدیق نامه ها', 'Search Certifications': 'جستجوی تصدیق ها', 'Search Clusters': 'جستجوی گروه ها', 'Search Coalitions': 'جستجوی پیوسته گی ها', 'Search Communities': 'جستجوی انجمن ها', 'Search Community Contacts': 'جستجوی تماس های انجمن', 'Search Competency Ratings': 'جستجوی ارزیابی های شایسته گی ', 'Search Contact Information': 'جستجوی معلومات تماس', 'Search Contacts': 'جستجوی تماس ها', 'Search Course Certificates': 'جستجوی تصدیق نامه های دوره', 'Search Courses': 'جستجوی دوره ها', 'Search Credentials': 'جستجوی اعتبار نامه ها', 'Search Criteria': 'جستجوی معیار ها', 'Search Departments': 'جستجوی اداره ها', 'Search Deployments': 'جستجوی گسترش ها', 'Search Donors': 'جستجوی اهدا کنندگان', 'Search Education Details': 'جستجوی جزئیات تحصیلی', 'Search Entries': 'جستجو ورودی ها', 'Search Facilities': 'جستجوی تسهیلات', 'Search Facility Types': 'جستجوی انواع تسهیلات', 'Search Feature Layers': 'جستجوی سلایه های مشخصه', 'Search for a Person': 'جستجوی یک شخص', 'Search for a Project by name, code, location, or description.': 'جستجوی برای پروژه توسط نام، کود، موقعیت یا توضیحات.', 'Search for a Project by name, code, or description.': 'جستجو برای پروژه توسط نام، کود، یا توضیحات.', 'Search for a Project Community by name.': 'جستجو برای پروژه انجمن توسط نام', 'Search for Activity Organization': 'جستجوی برای فعالیت موسسه', 'Search for Activity Type': 'جستجوی برای نوع فعالیت', 'Search for office by organization or branch.': 'جستجوی برای دفتر توسط موسسه یا شاخه.', 'Search for office by organization.': 'جستجوی برای دفتر توسط موسسه', 'Search Groups': 'جستجوی گروپ ها', 'Search Hazards': 'جستجوی خطرات', 'Search Hours': 'جستجوی ساعات', 'Search Identity': 'جستجوی هوویت', 'Search Images': 'جستجوی عکس ها', 'Search Job Titles': 'جستجوی عناوین وظیفه', 'Search Keywords': 'جستجوی کلمه کلیدی', 'Search Layers': 'جستجوی سطوح', 'Search Location': 'جستجوی موقعیت', 'Search Location Hierarchies': 'جستجوی سلسله مراتب موقعیت', 'Search location in Geonames': 'جستجوی موقعیت در Geoname ها', 'Search Locations': 'جستجوی موقعیت ها', 'Search Log Entry': 'جستجوی ورود به سیستم', 'Search Logged Time': 'جستجوی زمان بندی', 'Search Mailing Lists': 'جستجوی لست های ایمیل', 'Search Map Profiles': 'جستجوی تنظیمات نقشه', 'Search Markers': 'جستجوی علامت گذار ها', 'Search Member': 'جستجوی عضو', 'Search Members': 'جستجوی اعضا', 'Search Membership': 'جستجوی عضویت', 'Search Membership Types': 'جستجوی انواع عضویت', 'Search Milestones': 'جستجوی مراحل مهم', 'Search Networks': 'جستجوی شبکه ها', 'Search Office Types': 'جستجوی انواع دفتر', 'Search Offices': 'جستجوی دفاتر', 'Search Open Tasks for %(project)s': '%(project)s جستجوی باز کردن وظیفه برای', 'Search Organization Types': 'جستجوی انواع موسسه ', 'Search Organizations': 'جستجوی موسسه ها', 'Search Participants': 'جستجوی موسسه کننده گان', 'Search Partner Organizations': 'جستجوی موسسه های همکار', "Search Person's Details": 'جستجوی جزئیات شخص', 'Search Persons': 'جستجوی اشخاص', 'Search PoI Types': 'جستجوی انواع Pol', 'Search Points of Interest': 'جستجوی نقاط مورد نظر', 'Search Policies & Strategies': 'جستجوی پالیسی های و استراتیژی ها', 'Search Professional Experience': 'جستجوی تجربه مسلکی', 'Search Programs': 'جستجوی برنامه ها', 'Search Project Organizations': 'جستجوی پروژه موسسه ها', 'Search Projections': 'جستجوی تصاویر', 'Search Projects': 'جستجوی پروژه ها', 'Search Records': 'جستجوی ضبط ها', 'Search Red Cross & Red Crescent National Societies': 'جستجوی جوامع ملی صلیب سرخ و هلال احمر', 'Search Regions': 'جستجوی نواحی', 'Search Resource Types': 'جستجوی انواع منبع', 'Search Resource Inventory': 'جستجوی موجودی منبع', 'Search Response Summaries': 'جستجوی خلاصه های پاسخ', 'Search Results': 'جستجوی نتایج ', 'Search Roles': 'جستجوی نقش ها', 'Search Rooms': 'جستجوی اتاق ها', 'Search saved searches': 'جستجوی جستجو های ذخیره شده', 'Search Sectors': 'جستجوی بخش ها', 'Search Services': 'جستجوی خدمات ', 'Search Shipped Items': 'جستجوی بخش های فرستاده شده', 'Search Skill Equivalences': 'جستجوی تعادلات مهارت', 'Search Skill Types': 'جستجوی انواع مهارت', 'Search Skills': 'جستجوی مهارت ها', 'Search Staff': 'جستجوی کارمند', 'Search Staff & Volunteers': 'جستجوی کارمند و داوطلبان', 'Search Staff Assignments': 'جستجوی ماموریت های کارمند', 'Search Symbologies': 'جستجوی سمبول شناسی ها', 'Search Tasks': 'جستجوی وظایف', 'Search Teams': 'جستجوی تیم ها', 'Search Theme Data': 'جستجوی معلومات زمینه', 'Search Themes': 'جستجوی زمینه ها', 'Search Training Events': 'جستجوی رویداد های آموزشی', 'Search Training Participants': 'جستجوی اشتراک کننده های آموزشی', 'Search Volunteer Cluster Positions': 'جستجوی منصب های گروه های داوطلب', 'Search Volunteer Cluster Types': 'جستجوی انواع گروه داوطلب', 'Search Volunteer Clusters': 'جستجوی گروه های داوطلب', 'Search Volunteer Roles': 'جستجوی نقش های داوطلب', 'Search Volunteers': 'جستجوی داوطلبین', 'Secondary Server (Optional)': 'server کمکی (اختیاری)', 'seconds': 'ثانیه ها', 'Seconds must be a number.': 'ثانیه ها باید یک شماره داشته باشد.', 'Seconds must be less than 60.': 'ثانیه ها باید کمتر از از 60 باشد', 'Secretary General': 'مشاور عمومی', 'Sector': 'بخش', 'Sector added': 'بخش اضافه گردید', 'Sector added to Organization': 'بخش به موسسه اضافه گردید', 'Sector added to Project': 'بخش به پروژه اضافه گردید', 'Sector added to Theme': 'بخش به زمینه اضافه گردید', 'Sector deleted': 'بخش حذف گردید', 'Sector Details': 'جزئیات بخش', 'Sector removed from Organization': 'بخش از موسسه حذف گردید', 'Sector removed from Project': 'بخش از پروژه حذف گردید', 'Sector removed from Theme': 'بخش از زمینه حذف گردید', 'Sector updated': 'بخش تجدید گردید', 'Sectors': 'بخش ها', 'Sectors to which this Activity Type can apply': 'بخش هایی که به این نوع فعالیت بکار می رود', 'Sectors to which this Theme can apply': 'بخش هایی که به این زمینه بکار می رود', 'Security': 'امنیت', 'Security Officer': 'مامور امنیت', 'See All Entries': 'تمام ورودی ها را ببینید', 'see comment': 'دیدن توضیحات', 'see more': 'دیدن بیشتر', 'Seen': 'دیده شد', 'Select %(location)s': 'ها را انتخاب نمایید %(location)s', "Select 2 records from this list, then click 'Merge'.": 'ازین لست دو ضبط را انتخاب نمایید، بعداً کلید "یکجا سازی" را فشار دهید', "Select a Room from the list or click 'Add Room'": 'یک اتاق را از لست انتخاب نمایید یا "اضافه کردن اتاق" را فشار دهید', 'Select all': 'انتخاب همه', 'Select All': 'انتخاب همه', 'Select an existing bin': 'یک صندوق موجود را انتخاب نمایید', 'Select an image to upload. You can crop this later by opening this record.': 'یک عکس را برای اضافه کردن انتخاب نمایید. بعداً شما می توانید این را بعداً با باز کردن این ضبط قطع نمایید.', 'Select Existing Location': 'انتخاب موقعیت موجود', 'Select from registry': 'انتخاب کردن از دفتر ثبت اسناد', 'Select one or more option(s) that apply': 'برای اجرا کردن یک یا گزینه های بیشتر اضافه نمایید', 'Select resources to import': 'منابع را برای صدور انتخاب نمایید', 'Select the default site.': 'ساحه پیش فرض را انتخاب نمایید.', 'Select the option that applies': 'اختیاری که تطبیق می شود را انتخاب نمایید', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'پوشش برای ارزیابی و فعالیت های مربوط به هر یک نیاز به شناسایی خلا را انتخاب کنید.', 'Select the person assigned to this role for this project.': 'فرد را که برای این پروژه به این نقش اختصاص داده شده انتخاب کنید.', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "انتخاب کنید که اگر تمام موقعیت های مشخص در عمیق ترین لایه سلسله مراتب یک parent ضرورت داشته باشند. برای مثال، اگر 'ناحیه' کوچکترین بخش در سلسله مراتب باشد، پس تمام موقعیت های مشخص باید یک ناحیه را منحیث parent داشته باشند.", "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "این را انتخاب کنید اگر تمام موقعیت های مشخص به یک موقعیت parent در موقعیت سلسله مراتب نیاز داشته باشد. این می تواند برای تنظیم 'ساحه' نمایانگر یک منطقه متاثر کمک نماید.", 'Select this if you need this resource to be mapped from site_id instead of location_id.': 'این را انتخاب کنید اگر شما ضرورت دارید تا این منبع بجای location_id از site_id نقشه برداری شود.', 'Select This Location': 'انتخاب این موقعیت', 'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'شکل OCR انتخاب شده هیچ صفحه ندارد. با داونلود نمودن یک فارم جدید یک مرور دیگر را برای ایجاد مرور انتخاب نمایید.', 'Send a message to this person': 'به این شخص یک پیام بفرستید', 'Send a message to this team': 'به این تیم یک پیام بفرستید', 'Send batch': 'فرستادن بسته', 'Send Message': 'فرستادن پیام', 'Send Task Notification': 'اطلاعیه وظیفه را ارسال نمایید', 'Senior (50+)': 'ارشد(50+)', 'Sent Shipments': 'محموله های فرستاده شده', 'separated': 'جدا گردید', 'separated from family': 'از فامیل جدا گردید', 'Service': 'خدمت', 'Service added': 'خدمت اضافه گردید', 'Service added to Organization': 'خدمت به موسسه اضافه گردید', 'Service deleted': 'خدمت حذف گردید', 'Service Details': 'جزئیات خدمت', 'Service Record': 'ضبط خدمت', 'Service removed from Organization': 'خدمت از موسسه حذف گردید', 'Service updated': 'خدمت تجدید گردید', 'Services': 'خدمات', 'Set as my Default': 'منحیث پیش فرض من تنظیم شود', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'True را به اجازه دادن وارد نمودن تغییرات توسط استعمال کنندگان که مدیر نیستند تعیین نمایید.', 'Settings': 'تنظیمات', 'Sex': 'جنسیت', 'Shapefile Layer': 'لایه Shapefile', 'Share': 'شریک ', 'shaved': 'تراشیدن', 'Shelter': 'محافظت', 'short': 'کوتاه', 'Short Description': 'توضیحات کوتاه', 'Short Title / ID': 'عنوان کوتاه(ID)', 'short<6cm': 'کوتاه> 6cm', 'Show': 'نمایش دادن', 'Show %(number)s entries': 'نمایش %(number)s منو', 'Show on Map': 'نمایش دادن روی نقشه', 'Show Pivot Table': 'نمایش جدول اصلی', 'Show Table': 'نمایش دادن جدول', 'Show totals': 'نمایش دادن کلی', 'Showing 0 to 0 of 0 entries': 'نمایش دادن 0 الی 0 ورودی ها', 'Showing _START_ to _END_ of _TOTAL_ entries': 'دخولی های _START_ to _END_ of _TOTAL_ را نشان می دهد', 'sides': 'جوانب', 'sign-up now': 'اشتراک کردن', 'Signature': 'امضا', 'Simple Search': 'جستجوی ساده', 'Simulation ': 'شبیه سازی', 'single': 'واحد', 'Single PDF File': 'فایل PDF واحد', 'Site': 'ساحه', 'Site Name': 'نام ساحه', 'Site Planning': 'پلانگذاری ساحه', 'Site Selection': 'انتخاب ساحه', 'Sitemap': 'نقشه ساحه', 'Situation': 'حالات', 'Situation Monitoring/Community Surveillance': 'نمایش حالات/نظارت انجمن', 'Skeleton Example': 'مثال ساختمان', 'Sketch': 'طرح خلاصه', 'Skill': 'مهارت', 'Skill added': 'مهارت اضافه گردید', 'Skill Catalog': 'فهرست مهارت', 'Skill deleted': 'مهارت حذف گردید', 'Skill Details': 'جزئیات مهارت', 'Skill Equivalence': 'تعادل مهارت', 'Skill Equivalence added': 'تعادل مهارت اضافه گردید', 'Skill Equivalence deleted': 'تعادل مهارت حذف گردید', 'Skill Equivalence Details': 'جزئیات تعادل مهارت', 'Skill Equivalence updated': 'تعادل مهارت تجدید گردید', 'Skill Equivalences': 'تعادلات مهارت', 'Skill removed': 'مهارت حذف گردید', 'Skill Type': 'نونع مهارت', 'Skill Type added': 'نوع مهارت اضافه گردید', 'Skill Type Catalog': 'فهرست نوع مهارت', 'Skill Type deleted': 'نوع مهارت حذف گردید', 'Skill Type updated': 'نوع مهارت تجدید گردید', 'Skill updated': 'مهارت تجدید گردید', 'Skills': 'مهارت ها', 'Skin Marks': 'نشانی های جلد', 'slim': 'لاغر', 'Small Scale Mitigation': 'کاهش مقیاس های کوچک', 'Social Mobilisation': 'تحرک اجتماعی', 'Solid Waste Management': 'مدیریت ضایعات جامد', 'Sops and Guidelines Development': 'گسترش هدایت ها و غذاهای مایع', 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'متاسفانه ظاهرا موقعیت %(location)s ها خارج از ساحه سطح بالا است %(parent)s.', 'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'متاسفانه ظاهراً موقعیت %(location)s ها خارج از ساحه تمویل شده این گسترش است.', 'Sorry location appears to be outside the area of parent %(parent)s.': 'ببخشید، به نظر می رسد که موقعیت بیرون از ساحه parent %(parent)s باشد.', 'Sorry location appears to be outside the area supported by this deployment.': 'ببخشید، به نظر می رسد که موقعیت بیرون از ساحه حمایت شده توسط این گسترش باشد.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'ببخشید، فقط استعمال کنندگان همراه با نقش مدیر نقشه اجازه دارند تا این به این موقعیت ها تغییر وارد کنند', 'Sorry, there are no addresses to display': 'متاسفانه هیچ آدرس برای نمایش وجود ندارد', 'source': 'Target', 'Source': 'منبع', 'Source Name': 'نام منبع', 'Source URL': 'آدرس منبع', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'منطقه مشخص (مثلاً ساختمان/اتاق) در داخل موقعیت که این شخص/گروپ دیده می شود.', 'Specific locations need to have a parent of level': 'موقعیت های معین باید یک لایه بالا داشته باشند', 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'مرکاتور های کره یی (900913) لازم است تا در نقشه باز سرک/گوگل/لایه های بر اساس بنگ استفاده شود.', 'Spraying of Vectors': 'پاشیندن بخش ها', 'Staff': 'کارمند', 'Staff & Volunteers': 'داوطلبان و کارمند', 'Staff & Volunteers (Combined)': 'داوطلبان و کارمند(ترکیب شد)', 'Staff Assigned': 'کارمند تعیین گردید', 'Staff Assignment Details': 'جزئیات ماموریت کارمند', 'Staff Assignment removed': 'ماموریت کارمند حذف گردید', 'Staff Assignment updated': 'ماموریت کارمند تجدید گردید', 'Staff Assignments': 'ماموریت های کارمند', 'Staff ID': 'هوویت کارمند', 'Staff Management': 'مدیریت کارمند', 'Staff Member added': 'عضو کارمند اضافه گردید', 'Staff member added': 'عضو کارمند اضافه گردید', 'Staff Member deleted': 'عضو کارمند حذف گردید', 'Staff Member Details': 'جزئیات عضو کارمند', 'Staff Member Details updated': 'جزئیات عضو کارمند تجدید گردید', 'Staff Record': 'ضبط کارمند', 'Staff Report': 'گزارش کارمند', 'Staff with Contracts Expiring in the next Month': 'ختم قرارداد با کارمند در ماه آینده', 'Staff/Volunteer Record': 'کارمند/ ضبط داوطلب', 'Start Date': 'تاریخ شروع', 'Status': 'حالات', "Status 'assigned' requires the %(fieldname)s to not be blank": 'حالات "تعیین شده" درخواست %(fieldname)s خالی نباشد', 'Status added': 'حالات اضافه گردید', 'Status deleted': 'حالات حذف گردید', 'Status Details': 'جزئیات حالات', 'Status updated': 'حالات تجدید گردید', 'Statuses': 'حالات', 'Stockpiling, Prepositioning of Supplies': 'ذخیره کردن، ذخایر اضافی', 'straight': 'مستقیم', 'Strategy Development': 'ایجاد استراتیژی', 'Street Address': 'آدرس سرک', 'Street View': 'نمایش سرک', 'String used to configure Proj4js. Can be found from %(url)s': 'برای ترتیب کردن Proj4js رشته استفاده شد. از %(url)s های پیدا شده می تواند', 'Strong': 'قوی', 'Style': 'روش', 'Style invalid': 'روش غیر معتبر', 'Sub Chapter': 'فصل فرعی', 'Submission successful - please wait': 'اطاعت موفقانه - لطفا منتظر بمانید', 'Submit': 'ارائه کردن', 'suffered financial losses': 'نقص های مالی', 'Supervisor': 'ناظر', 'Supplier': 'حمایه کننده', 'Suppliers': 'حمایه کننده ها', 'Swiss Francs': 'سویدنی فرانسوی ها', 'Switch to 3D': 'تبدیل کردن به 3D', 'Symbologies': 'سمبول شناسی ها', 'Symbology': 'سمبول شناسی', 'Symbology added': 'سمبول شناسی اضافه گردید', 'Symbology deleted': 'سمبول شناسی حذف گردید', 'Symbology Details': 'جزئیات سمبول شناسی', 'Symbology removed from Layer': 'سمبول شناسی از لایه حذف گردید', 'Symbology updated': 'سمبول شناس تجدید گردید', 'Table Permissions': 'اجازه های جدول', 'Tablename': 'نام جدول', 'Tags': 'ضمیمه ها', 'tall': 'دراز', 'Task': 'وظیفه', 'Task added': 'وظیفه اضافه گردید', 'Task deleted': 'وظیفه حذف گردید', 'Task Details': 'جزئیات وظیفه', 'Task updated': 'وظیفه تجدید گردید', 'Tasks': 'وظایف', 'Team': 'تیم', 'Team added': 'تیم اضافه گردید', 'Team deleted': 'تیم حذف گردید', 'Team Description': 'توضیحات تیم', 'Team Details': 'جزئیات تیم', 'Team Leader': 'رهبر تیم', 'Team Member added': 'عضو تیم اضافه گردید', 'Team Members': 'عضو های تیم', 'Team Name': 'نام تیم', 'Team Type': 'نوع تیم', 'Team updated': 'تیم تجدید گردید', 'Teams': 'تیم ها', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'به Geoserver می گوید تا Metatilinling را انجام دهد که تعداد لیبل های المثنی را کاهش می دهد.', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'مدت برای درجه پنجم در بخش ریاست کشور(مثال: رای یا postcode بخش فرعی) این درجه زیاد استفاده نمی شود', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'مدت برای درجه چهارم در بخش ریاست کشور (مثال: قریه، همسایه گی یا گرانبهایی)', 'Term for the primary within-country administrative division (e.g. State or Province).': 'مدت برای بخش ریاست ابتدایی کشور (مثال: ایالت یا ولایت)', 'Term for the secondary within-country administrative division (e.g. District or County).': 'مدت برای بخش ریاست فرعی کشور (مثال: ناحیه یا کشور)', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'مدت برای بخش ریاست در درجه سوم کشور(مثال: شهر یا ولایت)', 'Terms of Service': 'مدت های خدمت', 'Tertiary Server (Optional)': 'Server ثالث (اختیاری)', 'Text': 'متن', 'The': 'همان', 'The area is': 'ساحه است', 'The Area which this Site is located within.': 'ساحه ای که این ساحه در آن موقعیت دارد.', 'The attribute used to determine which features to cluster together (optional).': 'مشخصه برای تعیین کردن این که کدام مشخصه برای گروه های یکجا است استفاده می شود', 'The attribute which is used for the title of popups.': 'مشخصه برای عنوان Popups استفاده می شود', 'The attribute within the KML which is used for the title of popups.': 'نشان های که در داخل KML برای عنوان پنجره ها استفاده می شوند', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'نشان های که در داخل KML برای متن پنجره ها استفاده می شوند. (یک فضا را در میان نشانه ها استفاده نمایید)', 'The body height (crown to heel) in cm.': 'ارتفاع متن (سراپا) به سانتی متر.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'موقعیت فعلی شخص/گروپ، که می تواند (برای گزارش دهی) عمومی یا دقیق (برای نمایش دادن روی یک نقشه) باشد. یک چند حروف را داحل نمایید تا موقعیت های موجوده را جستجو نمایید.', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'ایمیل آدرس که تقاضا های تاییدی به آن فرستاده می شود (معمولاً می تواند یک گروپ از ایمیل ها یا یک ایمیل باشد). اگر موقعیت خالی باشد پس تقاضا ها طور اتومات تایید می شوند اگر مدیر مطابقت نماید.', 'The facility where this position is based.': 'تسهیلات جایی که رتبه قرار داده شده است.', 'The first or only name of the person (mandatory).': 'فقط نام شخص (الزامی)', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'شکل آدرس عبارت است ازhttp://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.', 'The language you wish the site to be displayed in.': 'لسان که شما می خواهید ساحه نمایش داده شود', 'The length is': 'طول عبارت است از', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'موقعیت که شخص از آن آمده است، که می تواند (برای گزارش دهی) عمومی یا دقیق (برای نمایش دادن روی یک نقشه) باشد. یک چند حروف را داحل نمایید تا موقعیت های موجوده را جستجو نمایید.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'موقعیت که شخص به آن می رود، که می تواند (برای گزارش دهی) عمومی یا دقیق (برای نمایش دادن روی یک نقشه) باشد. یک چند حروف را داحل نمایید تا موقعیت های موجوده را جستجو نمایید.', 'The map will be displayed initially with this latitude at the center.': 'نقشه برای اولین بار درین عرض جغرافیایی نمایش داده خواهد شد', 'The map will be displayed initially with this longitude at the center.': 'نقشه برای اولین بار درین طول جغرافیایی نمایش داده خواهد شد', 'The Maximum valid bounds, in projected coordinates': 'حد اکثر حدود معتبر، در هماهنگی های طرح شده', 'The minimum number of features to form a cluster. 0 to disable.': 'حد اقل تعداد مشخصه ها برای تشکیل دادن این گروه. 0 به غیر فعال شدن', 'The name to be used when calling for or directly addressing the person (optional).': 'نام که برای صدا نمودن یا مستقیماً نام بردن از کسی (اختیاری) استفاده می شود.', 'The number of pixels apart that features need to be before they are clustered.': 'تعداد پکسل های که لازم است تا قبل از گروپ شدن شخص شود.', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'تعداد مربع ها در اطراف نقشه برای داونلود نمودن. صفر به آن معنی است که صفحه اول زود تر بارگیری می شود، اعداد بلند تر به آن معنی است که سوژه متعاقب تیز تر است.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'دفتر ثبت اسناد موسسه تمام چگونگی استراحت موسسه ها را در ساحه کاری نگهداری می کند.', "The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'مودل پروژه می تواند برای ثبت نمودن معلومات پروژه و اینکه بیان نماید که کی در کجا چی می کند استفاده می شود.', "The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "'formuuid' غیر مجاز است. شما یک مرور فارم را انتخاب نموده اید که در سرور موجود نیست.", "The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "'jobuuid' غیر مجاز است. نشست فارم اپلود شده غیر مجاز است. شما باید دوباره اپلود نمایید.", "The staff member's official job title": 'عنوان وظیفه رسمی عضو کارمند', 'The system supports 2 projections by default:': 'سیستم به شکل پیش فرض دو تصویر را حمایه می تواند', 'The uploaded Form is unreadable, please do manual data entry.': 'شکل دریافت شده غیر قابل خوانش است، لطفاً معلومات را مستقیماً وارد نمایید.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL برای صفحه بدست آوردن ظرفیت های خدمات طرح نقشه انترنتی (WMS) لایه کی را می خواهید از طریق صفحه مرورگر روی نقشه موجود باشد.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'آدرس فایل عکس. اگر شما یک فایل عکس اپلود ننمایید، پس شما باید موقعیت آن را درینجا تعیین نمایید', 'The URL to access the service.': 'آدرس برای دسترسی به خدمت', "The volunteer's role": 'نقش داوطلب', 'The weight in kg.': 'وزن به کیلوگرام', 'Theme': 'زمینه', 'Theme added': 'زمینه اضافه گردید', 'Theme added to Activity': 'زمینه به فعالیت اضافه گردید', 'Theme added to Project': 'زمینه به پروژه اضافه گردید', 'Theme added to Project Location': 'زمینه به موقعیت پروژه اضافه گردید', 'Theme Data': 'زمینه معلومات', 'Theme Data deleted': 'زمینه معلومات حذف گردید', 'Theme Data updated': 'زمینه معلومات تجدید گردید', 'Theme deleted': 'زمینه حذف گردید', 'Theme Details': 'جزئیات زمینه', 'Theme Layer': 'لایه زمینه', 'Theme removed from Activity': 'زمینه از فعالیت حذف گردید', 'Theme removed from Project': 'زمینه از پروژه حذف گردید', 'Theme removed from Project Location': 'زمینه از موقعیت پروژه حذف گردید', 'Theme updated': 'زمینه تجدید گردید', 'Themes': 'زمینه ها', 'There are multiple records at this location': 'درین موقعیت ضبط های مختلف موجود است', "There are no details for this person yet. Add Person's Details.": 'برای این شخص جزئیات موجود نیست. جزئیات شخص را اضافه نمایید.', 'There are too many features, please Zoom In': 'مشخصه های زیادی موجود است، لطفاً زوم نمایید', 'There is no address for this person yet. Add new address.': 'برای این شخص هنوز هیچ آدرس موجود نیست. یک آدرس جدید اضافه نمایید', 'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'تا هنوز هیچ جایگاه برای این %(site_label)s نیست. جایگاه %(site_label)s را اضافه نمایید.', 'There was a problem, sorry, please try again later.': 'یک مشکل بوجود آمد، متاسفانه، لطفاً کمی بعد تر دوباره امتحان نمایید', 'These are the filters being used by the search.': 'این ها فلتر های هستند که توسط جستجو استعمال می شوند.', 'These need to be added in Decimal Degrees.': 'این باید به درجات اعشاری اضافه شود', 'This email-address is already registered.': 'این آدرس ایمیل قبلاً راجستر شده است', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'این مناسب است اگر این سطح تحت ساختمان است. برای جلوگیری نمودن از تعریف تصادفی بعد از تکمیل این سطح، این را می توانید که False بسازید.', 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'این را می توان معمولاً با استفاده از ودجت در استایل در لایه مشخصات در نقشه تغییر دهید.', 'This job has already been finished successfully.': 'این وظیفه قبلاً موفقانه به اتمام رسید', 'This level is not open for editing.': 'این درجه برای اصلاح کردن باز نشده است', 'This role can not be assigned to users.': 'این نقش برای استفاده کننده گان تعیین شده نمی تواند ', 'This should be an export service URL, see': 'این باید یک آدرس خدمت صادر شونده باشد، ببینید', 'Tiled': 'مربع شده', 'Time': 'زمان', 'Time Actual': 'زمان حقیقی', 'Time Estimate': 'زمان تخمینی', 'Time Estimated': 'زمان تخمین شده', 'Time Frame': 'زمان معین', 'Time Log': 'زمان ورودی', 'Time Log Deleted': 'زمان ورودی حذف گردید', 'Time Log Updated': 'زمان ورودی تجدید گردید', 'Time Logged': 'زمان ورودی', 'Time Taken': 'زمان گرفته شده', 'Timeline': 'مسیر زمانی', 'times': 'اوقات', 'times (0 = unlimited)': 'اوقات (0=نا محدود)', 'times and it is still not working. We give in. Sorry.': 'اوقات و هنوز کار نمی کند. ما می دهیم. متاسفانه', 'Title': 'عنوان', 'Title to show for the Web Map Service panel in the Tools panel.': 'عنوان برای نمایش خدمت نقشه جهانی در panel لوازم', 'TMS Layer': 'لایه TMS', 'to download a OCR Form.': 'برای اخذ کردن یک فایل OCR', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'برای تغییرات آوردن در OpenStreetMap شما باید تنظیمات OpenStreetMap را در Map Config تغییر دهید.', 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'برای حذف نمودن مسیر زمانی: از چرخ موس یا کلید های جهت یابی استفاده نمایید و یا مسیر زمانی را کش کنید.', 'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'برای چاپ نمودن یا شریک ساختن نقشه باید یک تصویر از زمینه کمپیوتر تان را بگیرید. اگر برای این کار به کمک نیاز دارید، به این دستورات برای %(windows)s یا %(mac)s مراجعه نمایید.', 'to reset your password': 'برای دوباره تنظیم کردن رمز عبور', "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "برای جستجو نمودن یک موقعیت، نام را وارد نمایید. شما باید % را منحیث ولدکارت استفاده نمایید. برای لست نمودن تمام موقعیت ها 'جستجو' را بدون تمام دخولی ها فشار دهید.", "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "برای جستجو نمودن یک عضو، هر کدام از قسمت های نام شخص یا گروپ را وارد نمایید. شما می توانید از % منحیث ولدکارت استفاده نمایید. برای لست نمودن تمام اعضا 'جستجو' را بدون دخولی ها فشار دهید.", "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'برای جستجو نمودن یک شخص، هر کدام از قسمت های نام، نام وسطی، تخلص شان یا شماره شناسایی یک شخص را بدون فاصله را وارد نمایید.', 'tonsure': 'تراشیدن', 'Tools and Guidelines Development': 'ایجاد رهنما ها و لوازم', 'total': 'مجموعی', 'Total': 'مجموعی', 'Total Annual Budget': 'مجموع بودجه سالیانه', 'Total Funding (Local Currency)': 'سرمایه موجود(پول رایج فعلی)', 'Total Funding Amount': 'مقدار سرمایه مجموع', 'Total Persons': 'مجموع اشخاص', 'Total Population': 'نفوس مجموع', 'Total Records: %(numrows)s': '%(numrows)s :ضبط مجموع', 'Tourist Group': 'گروپ توریست', 'Trackable': 'قابل دریافت', 'Training': 'آموزشی', 'Training added': 'آموزش اضافه گردید', 'Training Course Catalog': 'فهرست دوره آموزشی', 'Training deleted': 'آموزش حذف گردید', 'Training Details': 'جزئیات آموزش', 'Training Event': 'رویداد های آموزش', 'Training Event added': 'رویداد آموزش اضافه گردید', 'Training Event deleted': 'رویداد آموزش حذف گردید', 'Training Event Details': 'جزئیات رویداد آموزش', 'Training Event updated': 'رویداد آموزش تجدید گردید', 'Training Events': 'رویداد های آموزشی', 'Training Facility': 'تسهیلات آموزش', 'Training Hours (Month)': 'ساعات آموزش(ماه ها)', 'Training Hours (Year)': 'ساعات آموزش(سال)', 'Training of Community/First Responders': 'آموزش انجمن/ اولین پاسخ دهنده ها', 'Training of Master Trainers/Trainers': 'آموزش استادان ماستر/آموزگاران', 'Training Report': 'گزارش آموزش', 'Training updated': 'آموزش تجدید گردید', 'Trainings': 'آموزش ها', 'Transfer': 'انتقال', 'Transit': 'عبور', 'Transparent?': 'شفاف', 'Tree and Mangrove Planting': 'درخت و کشت درخت', 'Type': 'نوع', "Type the first few characters of one of the Participant's names.": 'چند حرف اول نام اشتراک کننده را بنویسید', "Type the first few characters of one of the Person's names.": 'چند حرف اول نام شخص را بنویسید', 'UN agency': 'نمایندگی ملل متحد', 'Unable to parse CSV file or file contains invalid data': 'به فایل CSV دسترسی پیدا نشد یا فایل شامل معلومات غیر معتبر است', 'Uncheck all': 'انتخاب نکردن همه', 'United States Dollars': 'دالر های ایالات متحده', 'Units': 'بخش ها', 'Unknown': 'ناشناخته', 'unknown': 'ناشناخته', 'unlimited': 'نا محدود', 'Unmark as duplicate': 'نشانی نکردن منحیث المثنی', 'Unspecified': 'تعیین ناشده', 'Unsupported data format': 'شکل معلومات حمایه نشد', 'Unsupported method': 'روش حمایه نشده', 'UPDATE': 'تجدید', 'Update Coalition': 'پیوسته گی را تجدید نمایید', 'Update Report': 'گزارش را تجدید نمایید', 'Update this entry': 'این ورودی را تجدید نمایید', 'updated': 'تجدید شده', 'Upload an image file (png or jpeg), max. 400x400 pixels!': 'یک فایل عکس را اپلود نمایید (png یا jpeg) حداکثر. 400x400 pixels!', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'یکل فایل عکس را اینجا اپلود نمایید. اگر شما یک فایل عکس را اپلود ننمایید، پس شما باید آدرس آن را در بخش آدرس تعیین نمایید.', 'Upload Format': 'شکل اپلود', 'Upload Scanned OCR Form': 'اپلود کردن شکل OCR سکن شده', 'Upload Shapefile': 'شکل فایل اپلود', 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'فایل اپلود شده یک فایل PDF نمی باشد. یک فایل PDF شکل معتبر را تهیه نمایید.', "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": 'فایل های اپلود شده عکس نمی باشد. شکل های عکس معتبر عبارتند از "gif"."bmp"."jpg"."png".', 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'فایل های اپلود شده PDF صفحات بیشتر/کمتر از مقدار مورد نیاز را دارا است. بررسی نمایید که آیا شما مقدار معین فارم ها را داخل نموده اید و همچنان بررسی نمایید که فارم دارای مقدار معین صفحات است.', 'Urgent': 'عاجل', 'URL': 'URL', 'URL to a Google Calendar to display on the project timeline.': 'آدرس به جنتری Google برای نمایش دادن صفحه پروژه', 'Use decimal': 'از اعشاری استفاده نمایید', 'Use default': 'از پیش فرض استفاده نمایید', 'Use deg, min, sec': 'از درجه، دقیقه، ثانیه استفاده نمایید', 'Use Geocoder for address lookups?': 'برای آدرس های Lookup از Geocoder استفاده نمایید؟', 'Use Site?': 'استفاده سایت؟', 'Use this to set the starting location for the Location Selector.': 'برای انتخاب کننده موقعیت، ازین موقعیت شروع استفاده نمایید', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'در onHover Tooltip و گروه Popups برای اختلاف بین انواع استفاده گردید', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'در onHover Tooltip استفاده می شد و همچنان در اولین رشته گروه Popups برای اختلاف بین ضبط ها استفاده می شد.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'برای معقولانه بودن موقعیت وارد شده عرض جغرافیایی استفاده می شود. شاید برای فلتر کردن لست های منابع که دارای موقعیت ها هستند استفاده شود.', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'برای معقولانه بودن موقعیت وارد شده طول جغرافیایی استفاده می شود. شاید برای فلتر کردن لست های منابع که دارای موقعیت ها هستند استفاده شود.', 'Used to populate feature attributes which can be used for Styling.': 'برای مشخصه معروف استفاده شده است که می تواند برای شکل دهیی نیز استفاده شود.', 'User': 'استفاده کننده', 'User Account': 'حساب استفاده کننده', 'User added to Role': 'استفاده کننده به نقش اضافه گردید', 'User Profile': 'پروفایل استفاده کننده', 'User Roles': 'نقش های استفاده کننده', 'User with Role': 'استفاده کننده با نقش', 'Username': 'نام استفاده کننده', 'Users': 'استفاده کننده ها', 'Users in my Organizations': 'استفاده کننده ها در موسسه های من', 'Users with this Role': 'استفاده کننده ها با این نقش', 'Uses the REST Query Format defined in': 'از شکل سوالREST استفاده می کند', 'using default': 'استفاده از پیش فرض', 'Valid From': 'شکل معتبر', 'Valid Until': 'معتبر الی', 'Validation error': 'خطای معتبر', 'Value': 'ارزش', 'VCA (Vulnerability and Capacity Assessment)': 'VCA (آسیب پذیری و ظرفیت ارزیابی)', 'Vector Control': 'کنترول مسیر', 'Verified': 'تایید شد', 'Version': 'نسخه', 'Very Good': 'بسیار خوب', 'Very Strong': 'بسیار قوی', 'Video Tutorials': 'ویدیو های Tutorial', 'View': 'نمایش', 'View full screen': 'نمایش تکمیل صفحه', 'View Fullscreen Map': 'نمایش نقشه در صفحه کامل', 'View Location Details': 'نمایش جزئیات موقعیت', 'View on Map': 'نمایش در نقشه', 'Vocational Training and Employment Skills': 'آموزش های مسلکی و مهارت های استخدام', 'Volunteer': 'داوطلب', 'Volunteer added': 'داوطلب اضافه گردید', 'Volunteer Cluster': 'گروه داوطلب', 'Volunteer Cluster added': 'گروه داوطلب اضافه گردید', 'Volunteer Cluster deleted': 'گروه داوطلب حذف گردید', 'Volunteer Cluster Position': 'منصب گروه داوطلب', 'Volunteer Cluster Position added': 'منصب گروه داوطلب اضافه گردید', 'Volunteer Cluster Position deleted': 'منصب گروه داوطلب حذف گردید', 'Volunteer Cluster Position updated': 'منصب گروه داوطلب تجدید گردید', 'Volunteer Cluster Type': 'نوع گروه داوطلب', 'Volunteer Cluster Type added': 'نوع گروه داوطلب اضافه گردید', 'Volunteer Cluster Type deleted': 'نوع گروه داوطلب حذف گردید', 'Volunteer Cluster Type updated': 'نوع گروه داوطلب تجدید گردید', 'Volunteer Cluster updated': 'گروه داوطلب تجدید گردید', 'Volunteer deleted': 'داوطلب حذف گردید', 'Volunteer Details': 'جزئیات داوطلب', 'Volunteer Details updated': 'جزئیات داوطلب تجدید گردید', 'Volunteer Hours': 'ساعات داوطلب', 'Volunteer Insurance': 'بیمه داوطلب', 'Volunteer Management': 'مدیریت داوطلب', 'Volunteer Recognition': 'شناخت داوطلب ', 'Volunteer Record': 'ضبط داوطلب', 'Volunteer Recruitment': 'استخدام داوطلب', 'Volunteer Report': 'گزارش داوطلب', 'Volunteer Role': 'نقش داوطلب', 'Volunteer Role added': 'نقش داوطلب اضافه گردید', 'Volunteer Role Catalog': 'فهرست نقش داوطلب', 'Volunteer Role deleted': 'نقش داوطلب حذف گردید', 'Volunteer Role Details': 'جزئیات نقش داوطلب', 'Volunteer Role updated': 'نقش داوطلب تجدید گردید', 'Volunteer Service Record': 'ضبط خدمت داوطلب', 'Volunteer Training': 'آموزش داوطلب', 'Volunteering in Emergencies Guidelines/Toolkit': 'داوطلب شدن در رهنما های عاجل/ Toolkit', 'Volunteering in Pandemic Emergency Situations': 'داوطلب شدن در حالات ضروری وسیع', 'Volunteers': 'داوطلبین', 'Warehouse': 'انبار', 'Warehouse Manager': 'مدیر انبار', 'Warehouse Stock': 'ذخیره انبار', 'Warehouses': 'انبار ها', 'WARNING': 'اخطار', 'Water and Sanitation': 'حفظ الصحه و آب', 'Water Testing': 'آزمایش آب', 'Watsan': 'Watsan', 'Watsan Officer': 'مامور Watsan', 'Watsan Technician': 'تخنیکر Watsan', 'wavy': 'موج دار', 'We have tried': 'ما کوشش نمودیم', 'Weak': 'ضعیف', 'Web Map Service Browser Name': 'نام Web Map Service Browser', 'Web Map Service Browser URL': 'آدرس Web Map Service Browser', 'Website': 'وب سایت', 'Week': 'هفته', 'Weekends only': 'فقط آخر هفته', 'Weekly': 'هفته وار', 'Weight': 'وزن', 'Weight (kg)': 'وزن (کیلوگرام)', 'Well-Known Text': 'متن مشهور', 'WFS Layer': 'لایه WFS', 'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) برای WMS server ها ضرورت است', 'What order to be contacted in.': 'به کدام ترتیب تماس گرفته شود.', 'When this search was last checked for changes.': 'کدام این جستجو آخرین تغییرات بررسی شده است', 'Whether calls to this resource should use this configuration as the default one': 'هرچند تماس ها به این منبع باید این تنظیمات را منحیث پیش فرض استفاده کنند', 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'هر چند عرض البلد و طول البلد از یک سطح بالاتر در سلسله مراتب گرفته می شوند، با وجودیکه یک شکل جدا داخل شده است.', 'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'منبع باید با استفاده از S3Track دنباله شود با وجود استفاده نمودن موقعیت اساسی.', 'Whiskers': 'ریش', 'white': 'سفید', 'Who is doing What Where': 'کی اینجا چه می کند', 'wider area, longer term, usually contain multiple Activities': 'ساحه وسیع تر، مدت طویل تر، معمولاً شامل فعالیت های چندگانه است', 'widowed': 'بیوه شد', 'Will create and link your user account to the following records': 'حساب استفاده کننده شما را به ضبط ذیل ایجاد و لینک می کند', 'With best regards': 'به آرزوی موفقیت', 'WKT is Invalid!': 'WKT غیر معتبر است!', 'WMS Layer': 'لایه WMS', 'Work': 'کار', 'Work on Program': 'کار بالای برنامه', 'X-Ray': 'اشعه X', 'XML parse error': 'خطای تعیین XML', 'XSLT stylesheet not found': 'XSLT style sheet دریافت نشد', 'XSLT transformation error': 'خطای تغییر XSLT', 'XYZ Layer': 'لایه XYZ', 'Year': 'سال', 'Year that the organization was founded': 'سال که موسسه بنا نهاده شد', 'yes': 'بلی', 'Yes': 'بلی', 'You can click on the map below to select the Lat/Lon fields': 'شما می توانید برای انتخاب کردن عرض و طول جغرافیایی بالای نقشه تحتانی کلیک نمایید', "You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": 'شما می توانید با نام گروپ، توضیحات یا نظرات و توسط نام موسسه یا سرنام جستجو نمایید. شما می توانید از % wildcard استفاده نمایید. "جستجو" را فشار دهید بدون وارد شدن به لست همه.', "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": 'شما می توانید توسط نام دوره، نام دعوی، یا رویداد نظرات جستجو نمایید. شما می توانید از % wildcard استفاده نمایید. بدون اضافه کردن تمام رویداد های لست "جستجو" را فشار دهید.', "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'شما می توانید با عنوان وظیفه یا نام شخص جستجو نمایید – هر اسم، وسط یا تخلص که توسط فاصله جدا شده را وارد نمایید.شما می توانید از % wildcard استفاده نمایید. بدون وارد کردن لست تمام اشخاص "جستجو" را فشار دهید.', 'You can search by name, acronym or comments': 'شما می توانید با نام، سرنام، یا هر نظر دیگر جستجو نمایید.', 'You can search by name, acronym, comments or parent name or acronym.': 'شما می توانید با نام سرنام نظرات یا نام بالاتر سرنام جستجو نمایید.', "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'شما می توانید با نام شخص جستجو نمایید – نام تخلص یا نام وسطی را وارد نمایید، که توسط فاصله جدا شده باشد. شما می توانید از % wildcard استفاده نمایید. بدون وارد کردن لست تمام اشخاص "جستجو" را فشار دهید.', "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": 'شما می توانید با نام آموزگار نام دوره یا توضیحات جستجو نمایید. شما می توانید از % wildcard استفاده نمایید. بدون وارد کردن لست تمام آموزگاران "جستجو" را فشار دهید.', 'You can select an area on the image and save to crop it.': 'شما می توانید یک ساحه را بالای عکس انتخاب نمایید و زمانی که آن را قطع کردید ذخیره نمایید.', 'You can select the Draw tool': 'شما می توانید Draw tool را انتخاب نمایید', 'You can set the modem settings for SMS here.': 'شما می توانید تنظیمات modem را برای پیام اینجا ترتیب نمایید', 'You do not have permission for any facility to perform this action.': 'شما برای هیچ تسهیلات اجازه اجرای این عمل را ندارید.', 'You do not have permission for any organization to perform this action.': 'شما برای هیچ موسسه اجازه اجرای این عمل را ندارید.', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "شما تغییرات ذخیره ناشده دارید. دکمه لغو کردن را فشار دهید، بعداً 'ذخیره کردن' را برای ذخیره سازی فشار دهید. برای دور ساختن آن کلید OK را فشار دهید.", 'You have unsaved changes. You need to press the Save button to save them': 'شما تغییرات ذخیره ناشده دارید برای ذخیره کردن آن شما باید کلید ذخیره کردن را فشار دهید.', 'You must agree to the Terms of Service': 'شما باید با جملات خدمت موافقت نمایید', 'You must enter a minimum of %d characters': 'شما حداقل باید %d حروف اضافه نمایید.', 'You need to have at least 2 records in this list in order to merge them.': 'برای یکجا کردن حد اقل شما باید 2 ضبط را در لست اضافه نمایید', 'Your name for this search. Notifications will use this name.': 'نام شما برای این ضبط. اطلاعیه ها ازین نام استفاده خواهند نمود.', 'Your request for Red Cross and Red Crescent Resource Management System (RMS) has been approved and you can now access the system at': 'درخواست شما برای سیستم مدیریت صلیب سرخ و هلال احمر تایید گردید حالا شما می تواید به سیستم دسترسی پیدا نمایید.', 'Youth Leadership Development': 'ایجاد رهبری جوانان', 'Zone': 'حوزه', 'Zoom': 'زوم', 'Zoom In': 'نزدیک آوردن', 'Zoom in closer to Edit OpenStreetMap layer': 'برای اصلاح کردن لایه OpenStreetMap نزدیک تر زوم نمایید', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'زوم کردن: داخل نقشه کلیک نمایید یا از کلید چپ mouse برای کشانیدن و ایجاد یک مربع استفاده نمایید.', 'Zoom Levels': 'درجه های زوم', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'از زوم خارج شده: داخل نقشه کلیک نمایید یا از کلید چپ mouse برای کشانیدن و ایجاد یک مربع استفاده نمایید.', 'Zoom to Current Location': 'به موقعیت فعلی زوم نمایید', 'Zoom to maximum map extent': 'به اندازه حداکثر وسعت نقشه زوم نمایید', }
54.745637
476
0.72306
edb3cf751f23883884e3733ae932ea502cffb989
7,523
py
Python
vel/api/model_config.py
cclauss/vel
78a6a20af80ff613898d2983c83fdb223634aaad
[ "MIT" ]
null
null
null
vel/api/model_config.py
cclauss/vel
78a6a20af80ff613898d2983c83fdb223634aaad
[ "MIT" ]
null
null
null
vel/api/model_config.py
cclauss/vel
78a6a20af80ff613898d2983c83fdb223634aaad
[ "MIT" ]
null
null
null
import datetime as dtm import os.path import torch from vel.exceptions import VelInitializationException from vel.internals.parser import Parser from vel.internals.provider import Provider class ModelConfig: """ Read from YAML configuration of a model, specifying all details of the run. Is a frontend for the provider, resolving all dependency-injection requests. """ PROJECT_FILE_NAME = '.velproject.yaml' @staticmethod def find_project_directory(start_path) -> str: """ Locate top-level project directory """ start_path = os.path.realpath(start_path) possible_name = os.path.join(start_path, ModelConfig.PROJECT_FILE_NAME) if os.path.exists(possible_name): return start_path else: up_path = os.path.realpath(os.path.join(start_path, '..')) if os.path.realpath(start_path) == up_path: raise RuntimeError(f"Couldn't find project file starting from {start_path}") else: return ModelConfig.find_project_directory(up_path) @classmethod def from_file(cls, filename: str, run_number: int, reset=False, seed: int=None, device: str='cuda', params=None): """ Create model config from file """ with open(filename, 'r') as fp: model_config_contents = Parser.parse(fp) project_config_path = ModelConfig.find_project_directory(os.path.dirname(os.path.abspath(filename))) with open(os.path.join(project_config_path, cls.PROJECT_FILE_NAME), 'r') as fp: project_config_contents = Parser.parse(fp) aggregate_dictionary = { **project_config_contents, **model_config_contents } # Options that should exist for every config try: model_name = model_config_contents['name'] except KeyError: raise VelInitializationException("Model configuration must have a 'name' key") return ModelConfig( model_name=model_name, filename=filename, configuration=aggregate_dictionary, run_number=run_number, project_dir=project_config_path, reset=reset, seed=seed, device=device, parameters=params ) @classmethod def from_memory(cls, model_name: str, model_data: dict, run_number: int, project_dir: str, reset=False, seed: int=None, device: str='cuda', params=None): """ Create model config from supplied data """ return ModelConfig( model_name=model_name, filename="[memory]", configuration=model_data, run_number=run_number, project_dir=project_dir, reset=reset, seed=seed, device=device, parameters=params ) def __init__(self, model_name: str, filename: str, configuration: dict, run_number: int, project_dir: str, reset=False, seed: int=None, device: str='cuda', parameters=None): self._model_name = model_name self.filename = filename self.device = device self.reset = reset self.run_number = run_number self.seed = seed if seed is not None else (dtm.date.today().year + self.run_number) self.contents = configuration self.project_dir = project_dir self.command_descriptors = self.contents.get('commands', []) # This one is special and needs to get removed if 'commands' in self.contents: del self.contents['commands'] self.provider = Provider(self._prepare_environment(), {'model_config': self}, parameters=parameters) def _prepare_environment(self) -> dict: """ Return full environment for dependency injection """ return {**self.contents, 'run_number': self.run_number} #################################################################################################################### # COMMAND UTILITIES def get_command(self, command_name): """ Return object for given command """ return self.provider.instantiate_from_data(self.command_descriptors[command_name]) def run_command(self, command_name, varargs): """ Instantiate model class """ command_descriptor = self.get_command(command_name) return command_descriptor.run(*varargs) #################################################################################################################### # MODEL DIRECTORIES def checkpoint_dir(self, *args) -> str: """ Return checkpoint directory for this model """ return self.output_dir('checkpoints', self.run_name, *args) def data_dir(self, *args) -> str: """ Return data directory for given dataset """ return self.project_data_dir(*args) def openai_dir(self) -> str: """ Return directory for openai output files for this model """ return self.output_dir('openai', self.run_name) def project_data_dir(self, *args) -> str: """ Directories where to store project files """ return os.path.join(self.project_dir, 'data', *args) def output_dir(self, *args) -> str: """ Directories where to store project files """ return os.path.join(self.project_dir, 'output', *args) def project_top_dir(self, *args) -> str: """ Top-level directory """ return os.path.join(self.project_dir, *args) #################################################################################################################### # NAME UTILITIES @property def run_name(self) -> str: """ Return name of the run """ return "{}/{}".format(self._model_name, self.run_number) @property def name(self) -> str: """ Return name of the run """ return self._model_name #################################################################################################################### # PROVIDER API def provide(self, name): """ Return a dependency-injected instance """ return self.provider.instantiate_by_name(name) #################################################################################################################### # BANNERS - Maybe shouldn't be here, but they are for now def banner(self, command_name) -> None: """ Print a banner for running the system """ device = torch.device(self.device) print("=" * 80) print(f"Pytorch version: {torch.__version__} cuda version {torch.version.cuda} cudnn version {torch.backends.cudnn.version()}") print("Running model {}, run {} -- command {} -- device {}".format(self._model_name, self.run_number, command_name, self.device)) if device.type == 'cuda': device_idx = 0 if device.index is None else device.index print(f"CUDA Device name {torch.cuda.get_device_name(device_idx)}") print(dtm.datetime.now().strftime("%Y/%m/%d - %H:%M:%S")) print("=" * 80) def quit_banner(self) -> None: """ Print a banner for running the system """ print("=" * 80) print("Done.") print(dtm.datetime.now().strftime("%Y/%m/%d - %H:%M:%S")) print("=" * 80) #################################################################################################################### # Small UI utils def __repr__(self): return f"<ModelConfig at {self.filename}>"
40.229947
137
0.569852
ebd53a3d950587dff683ef17de0e89d30c337498
4,115
py
Python
configs/wider_face/ttfnet_d53_face.py
ellery92/mmdetection
5b1dfa2c97aac50458feb18ac3da11a68866f63c
[ "Apache-2.0" ]
null
null
null
configs/wider_face/ttfnet_d53_face.py
ellery92/mmdetection
5b1dfa2c97aac50458feb18ac3da11a68866f63c
[ "Apache-2.0" ]
null
null
null
configs/wider_face/ttfnet_d53_face.py
ellery92/mmdetection
5b1dfa2c97aac50458feb18ac3da11a68866f63c
[ "Apache-2.0" ]
null
null
null
# model settings input_size = 512 model = dict( type='TTFNet', pretrained='./pretrain/darknet53.pth', backbone=dict( type='DarknetV3', layers=[1, 2, 8, 8, 4], inplanes=[3, 32, 64, 128, 256, 512], planes=[32, 64, 128, 256, 512, 1024], norm_cfg=dict(type='BN'), out_indices=(1, 2, 3, 4), frozen_stages=1, norm_eval=False), neck=None, bbox_head=dict( type='TTFHead', inplanes=(128, 256, 512, 1024), head_conv=128, wh_conv=64, hm_head_conv_num=2, wh_head_conv_num=2, num_classes=2, wh_offset_base=16, wh_agnostic=True, wh_gaussian=True, shortcut_cfg=(1, 2, 3), norm_cfg=dict(type='BN'), alpha=0.54, hm_weight=1., wh_weight=5.)) cudnn_benchmark = True train_cfg = dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False) test_cfg = dict( nms=dict(type='nms', iou_thr=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200) # model training and testing settings # dataset settings dataset_type = 'WIDERFaceDataset' data_root = 'data/widerface/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_label=True), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=16, workers_per_gpu=2, train=dict( type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'WIDER_train/images/Annotations/train.txt', img_prefix=data_root + 'WIDER_train/images', min_size=17, pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'WIDER_val/images/Annotations/val.txt', img_prefix=data_root + 'WIDER_val/images', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'WIDER_val/images/Annotations/val.txt', img_prefix=data_root + 'WIDER_val/images', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=0.0004, paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 10, step=[18, 22]) checkpoint_config = dict(interval=1) bbox_head_hist_config = dict( model_type=['ConvModule', 'DeformConvPack'], sub_modules=['bbox_head'], save_every_n_steps=500) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 24 device_ids = range(2) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = 'work_dirs/ttfnet_d53_face' load_from = None resume_from = "work_dirs/ttfnet_d53_face/latest.pth" resume_from = None workflow = [('train', 1)]
29.818841
77
0.622114
5921c493b179a141674fe44fdde02451be85e99c
7,059
py
Python
intersight/models/iam_resource_permission_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
21
2018-03-29T14:20:35.000Z
2021-10-13T05:11:41.000Z
intersight/models/iam_resource_permission_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
14
2018-01-30T15:45:46.000Z
2022-02-23T14:23:21.000Z
intersight/models/iam_resource_permission_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
18
2018-01-03T15:09:56.000Z
2021-07-16T02:21:54.000Z
# coding: utf-8 """ Cisco Intersight Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501 The version of the OpenAPI document: 1.0.9-1295 Contact: intersight@cisco.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from intersight.configuration import Configuration class IamResourcePermissionAllOf(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'permission_roles': 'list[IamPermissionToRoles]', 'target_app': 'str', 'holder': 'IamSecurityHolder', 'resource': 'MoBaseMo' } attribute_map = { 'permission_roles': 'PermissionRoles', 'target_app': 'TargetApp', 'holder': 'Holder', 'resource': 'Resource' } def __init__(self, permission_roles=None, target_app=None, holder=None, resource=None, local_vars_configuration=None): # noqa: E501 """IamResourcePermissionAllOf - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._permission_roles = None self._target_app = None self._holder = None self._resource = None self.discriminator = None if permission_roles is not None: self.permission_roles = permission_roles if target_app is not None: self.target_app = target_app if holder is not None: self.holder = holder if resource is not None: self.resource = resource @property def permission_roles(self): """Gets the permission_roles of this IamResourcePermissionAllOf. # noqa: E501 :return: The permission_roles of this IamResourcePermissionAllOf. # noqa: E501 :rtype: list[IamPermissionToRoles] """ return self._permission_roles @permission_roles.setter def permission_roles(self, permission_roles): """Sets the permission_roles of this IamResourcePermissionAllOf. :param permission_roles: The permission_roles of this IamResourcePermissionAllOf. # noqa: E501 :type: list[IamPermissionToRoles] """ self._permission_roles = permission_roles @property def target_app(self): """Gets the target_app of this IamResourcePermissionAllOf. # noqa: E501 Name of the service owning the resource. # noqa: E501 :return: The target_app of this IamResourcePermissionAllOf. # noqa: E501 :rtype: str """ return self._target_app @target_app.setter def target_app(self, target_app): """Sets the target_app of this IamResourcePermissionAllOf. Name of the service owning the resource. # noqa: E501 :param target_app: The target_app of this IamResourcePermissionAllOf. # noqa: E501 :type: str """ self._target_app = target_app @property def holder(self): """Gets the holder of this IamResourcePermissionAllOf. # noqa: E501 :return: The holder of this IamResourcePermissionAllOf. # noqa: E501 :rtype: IamSecurityHolder """ return self._holder @holder.setter def holder(self, holder): """Sets the holder of this IamResourcePermissionAllOf. :param holder: The holder of this IamResourcePermissionAllOf. # noqa: E501 :type: IamSecurityHolder """ self._holder = holder @property def resource(self): """Gets the resource of this IamResourcePermissionAllOf. # noqa: E501 :return: The resource of this IamResourcePermissionAllOf. # noqa: E501 :rtype: MoBaseMo """ return self._resource @resource.setter def resource(self, resource): """Sets the resource of this IamResourcePermissionAllOf. :param resource: The resource of this IamResourcePermissionAllOf. # noqa: E501 :type: MoBaseMo """ self._resource = resource def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list( map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items())) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, IamResourcePermissionAllOf): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, IamResourcePermissionAllOf): return True return self.to_dict() != other.to_dict()
34.773399
1,052
0.638617
a912b9dce62a105914574cfb241027366b1a6341
4,128
py
Python
examples/display_text_background_color_padding.py
lesamouraipourpre/Adafruit_CircuitPython_Display_Text
c31c0ef2da48bfed7da2188039b59251f02110ea
[ "Unlicense", "MIT-0", "MIT" ]
37
2019-01-11T16:31:15.000Z
2021-11-09T10:43:38.000Z
examples/display_text_background_color_padding.py
tekktrik/Adafruit_CircuitPython_Display_Text
f60a4095da77710c0b81d66ada148d09ad25dbd4
[ "MIT" ]
123
2019-01-15T21:49:49.000Z
2022-03-13T00:16:52.000Z
examples/display_text_background_color_padding.py
tekktrik/Adafruit_CircuitPython_Display_Text
f60a4095da77710c0b81d66ada148d09ad25dbd4
[ "MIT" ]
41
2019-02-12T01:54:41.000Z
2022-03-12T22:34:31.000Z
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT """ This example shows the use color and background_color """ import time import board import displayio from adafruit_bitmap_font import bitmap_font from adafruit_display_text import label # Setup the SPI display if "DISPLAY" in dir(board): # use built in display (PyPortal, PyGamer, PyBadge, CLUE, etc.) # see guide for setting up external displays (TFT / OLED breakouts, RGB matrices, etc.) # https://learn.adafruit.com/circuitpython-display-support-using-displayio/display-and-display-bus display = board.DISPLAY else: print("Starting external display") # goes to serial only # Setup the LCD display with driver # You may need to change this to match the display driver for the chipset # used on your display from adafruit_ili9341 import ILI9341 # from adafruit_st7789 import ST7789 displayio.release_displays() # setup the SPI bus spi = board.SPI() tft_cs = board.D9 # arbitrary, pin not used tft_dc = board.D10 tft_backlight = board.D12 tft_reset = board.D11 while not spi.try_lock(): spi.configure(baudrate=32000000) spi.unlock() display_bus = displayio.FourWire( spi, command=tft_dc, chip_select=tft_cs, reset=tft_reset, baudrate=32000000, polarity=1, phase=1, ) # Number of pixels in the display DISPLAY_WIDTH = 320 DISPLAY_HEIGHT = 240 # display = ST7789(display_bus, width=240, height=240, rotation=0, rowstart=80, colstart=0) # create the display display = ILI9341( display_bus, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, rotation=180, # The rotation can be adjusted to match your configuration. auto_refresh=True, native_frames_per_second=90, ) display.show(None) # font=terminalio.FONT # this is the Builtin fixed dimension font font = bitmap_font.load_font("fonts/LeagueSpartan-Bold-16.bdf") text = [] text.append("none") # no ascenders or descenders text.append("pop quops") # only descenders text.append("MONSTERs are tall") # only ascenders text.append("MONSTERs ate pop quops") # both ascenders and descenders text.append("MONSTER quops\nnewline quops") # with newline display.auto_refresh = True myGroup = displayio.Group() display.show(myGroup) text_area = [] myPadding = 4 for i, thisText in enumerate(text): text_area.append( label.Label( font, text=thisText, color=0xFFFFFF, background_color=None, background_tight=False, padding_top=myPadding, padding_bottom=myPadding, padding_left=myPadding, padding_right=myPadding, ) ) this_x = 10 this_y = 10 + i * 40 text_area[i].x = 10 text_area[i].y = 3 + i * 50 text_area[i].anchor_point = (0, 0) text_area[i].anchored_position = (this_x, this_y) myGroup.append(text_area[i]) print("background color is {}".format(text_area[0].background_color)) while True: time.sleep(2) text_area[0].text = "text" # change some text in an existing text box # Note: changed text must fit within existing number of characters # when the Label was created for area in text_area: area.background_color = 0xFF0000 print("background color is {:06x}".format(text_area[0].background_color)) time.sleep(2) for area in text_area: area.background_color = 0x000088 print("background color is {:06x}".format(text_area[0].background_color)) time.sleep(2) for area in text_area: area.background_color = 0x00FF00 print("background color is {:06x}".format(text_area[0].background_color)) time.sleep(2) for area in text_area: area.background_color = 0xFF0000 print("background color is {:06x}".format(text_area[0].background_color)) time.sleep(2) for area in text_area: area.background_color = None print("background color is {}".format(text_area[0].background_color))
29.276596
102
0.679506
9ded438115563294e79cbac2450a0b153011e746
387
py
Python
client_side/enums.py
botexpert/Chatter
8cd3262e38c58eae0fcbc49b3973f883a3185a48
[ "MIT" ]
null
null
null
client_side/enums.py
botexpert/Chatter
8cd3262e38c58eae0fcbc49b3973f883a3185a48
[ "MIT" ]
null
null
null
client_side/enums.py
botexpert/Chatter
8cd3262e38c58eae0fcbc49b3973f883a3185a48
[ "MIT" ]
2
2019-07-18T08:35:23.000Z
2019-07-18T09:24:00.000Z
import os class Intervals: POLL_REFRESH_INTERVAL = 1 # Receiving message check frequency HEARTBEAT_INTERVAL = 30 # Sent heartbeat frequency LOGIN_POLL_INTERVAL = 5000 # Wait time at login request class Host: LOGIN_PORT = os.getenv('HOST', '5557') # Login server port PORT = os.getenv('HOST', '5555') # Main server port # TODO make it an actual system value
27.642857
65
0.70801
089dd16719afbe9729c1ada777b1780015f91177
21,032
py
Python
python/ray/tests/test_advanced_3.py
bynoud/ray
8204717eed71923f1e14fa6bd17ca4588c140c09
[ "Apache-2.0" ]
1
2020-09-10T06:33:46.000Z
2020-09-10T06:33:46.000Z
python/ray/tests/test_advanced_3.py
Nada-Bu/ray
bfa06052828f83ab790e6b6bbfa5b56edb42b45e
[ "Apache-2.0" ]
null
null
null
python/ray/tests/test_advanced_3.py
Nada-Bu/ray
bfa06052828f83ab790e6b6bbfa5b56edb42b45e
[ "Apache-2.0" ]
1
2021-02-02T02:24:12.000Z
2021-02-02T02:24:12.000Z
# coding: utf-8 import glob import logging import os import shutil import json import sys import socket import tempfile import time import numpy as np import pickle import pytest import ray import ray.ray_constants as ray_constants import ray.cluster_utils import ray.test_utils import setproctitle from ray.test_utils import (check_call_ray, RayTestTimeoutException, wait_for_num_actors) logger = logging.getLogger(__name__) def attempt_to_load_balance(remote_function, args, total_tasks, num_nodes, minimum_count, num_attempts=100): attempts = 0 while attempts < num_attempts: locations = ray.get( [remote_function.remote(*args) for _ in range(total_tasks)]) names = set(locations) counts = [locations.count(name) for name in names] logger.info("Counts are {}.".format(counts)) if (len(names) == num_nodes and all(count >= minimum_count for count in counts)): break attempts += 1 assert attempts < num_attempts def test_load_balancing(ray_start_cluster): # This test ensures that tasks are being assigned to all raylets # in a roughly equal manner. cluster = ray_start_cluster num_nodes = 3 num_cpus = 7 for _ in range(num_nodes): cluster.add_node(num_cpus=num_cpus) ray.init(address=cluster.address) @ray.remote def f(): time.sleep(0.01) return ray.worker.global_worker.node.unique_id attempt_to_load_balance(f, [], 100, num_nodes, 10) attempt_to_load_balance(f, [], 1000, num_nodes, 100) def test_load_balancing_with_dependencies(ray_start_cluster): # This test ensures that tasks are being assigned to all raylets in a # roughly equal manner even when the tasks have dependencies. cluster = ray_start_cluster num_nodes = 3 for _ in range(num_nodes): cluster.add_node(num_cpus=1) ray.init(address=cluster.address) @ray.remote def f(x): time.sleep(0.010) return ray.worker.global_worker.node.unique_id # This object will be local to one of the raylets. Make sure # this doesn't prevent tasks from being scheduled on other raylets. x = ray.put(np.zeros(1000000)) attempt_to_load_balance(f, [x], 100, num_nodes, 25) def wait_for_num_objects(num_objects, timeout=10): start_time = time.time() while time.time() - start_time < timeout: if len(ray.objects()) >= num_objects: return time.sleep(0.1) raise RayTestTimeoutException("Timed out while waiting for global state.") def test_global_state_api(shutdown_only): error_message = ("The ray global state API cannot be used " "before ray.init has been called.") with pytest.raises(Exception, match=error_message): ray.objects() with pytest.raises(Exception, match=error_message): ray.actors() with pytest.raises(Exception, match=error_message): ray.nodes() with pytest.raises(Exception, match=error_message): ray.jobs() ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1}) assert ray.cluster_resources()["CPU"] == 5 assert ray.cluster_resources()["GPU"] == 3 assert ray.cluster_resources()["CustomResource"] == 1 # A driver/worker creates a temporary object during startup. Although the # temporary object is freed immediately, in a rare case, we can still find # the object ref in GCS because Raylet removes the object ref from GCS # asynchronously. # Because we can't control when workers create the temporary objects, so # We can't assert that `ray.objects()` returns an empty dict. Here we just # make sure `ray.objects()` succeeds. assert len(ray.objects()) >= 0 job_id = ray.utils.compute_job_id_from_driver( ray.WorkerID(ray.worker.global_worker.worker_id)) client_table = ray.nodes() node_ip_address = ray.worker.global_worker.node_ip_address assert len(client_table) == 1 assert client_table[0]["NodeManagerAddress"] == node_ip_address @ray.remote class Actor: def __init__(self): pass _ = Actor.remote() # noqa: F841 # Wait for actor to be created wait_for_num_actors(1) actor_table = ray.actors() assert len(actor_table) == 1 actor_info, = actor_table.values() assert actor_info["JobID"] == job_id.hex() assert "IPAddress" in actor_info["Address"] assert "IPAddress" in actor_info["OwnerAddress"] assert actor_info["Address"]["Port"] != actor_info["OwnerAddress"]["Port"] job_table = ray.jobs() assert len(job_table) == 1 assert job_table[0]["JobID"] == job_id.hex() assert job_table[0]["DriverIPAddress"] == node_ip_address # TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we # should use those, but they seem to conflict with Ray's use of faulthandler. class CaptureOutputAndError: """Capture stdout and stderr of some span. This can be used as follows. captured = {} with CaptureOutputAndError(captured): # Do stuff. # Access captured["out"] and captured["err"]. """ def __init__(self, captured_output_and_error): import io self.output_buffer = io.StringIO() self.error_buffer = io.StringIO() self.captured_output_and_error = captured_output_and_error def __enter__(self): sys.stdout.flush() sys.stderr.flush() self.old_stdout = sys.stdout self.old_stderr = sys.stderr sys.stdout = self.output_buffer sys.stderr = self.error_buffer def __exit__(self, exc_type, exc_value, traceback): sys.stdout.flush() sys.stderr.flush() sys.stdout = self.old_stdout sys.stderr = self.old_stderr self.captured_output_and_error["out"] = self.output_buffer.getvalue() self.captured_output_and_error["err"] = self.error_buffer.getvalue() def test_logging_to_driver(shutdown_only): ray.init(num_cpus=1, log_to_driver=True) @ray.remote def f(): # It's important to make sure that these print statements occur even # without calling sys.stdout.flush() and sys.stderr.flush(). for i in range(100): print(i) print(100 + i, file=sys.stderr) captured = {} with CaptureOutputAndError(captured): ray.get(f.remote()) time.sleep(1) output_lines = captured["out"] for i in range(200): assert str(i) in output_lines # TODO(rkn): Check that no additional logs appear beyond what we expect # and that there are no duplicate logs. Once we address the issue # described in https://github.com/ray-project/ray/pull/5462, we should # also check that nothing is logged to stderr. def test_not_logging_to_driver(shutdown_only): ray.init(num_cpus=1, log_to_driver=False) @ray.remote def f(): for i in range(100): print(i) print(100 + i, file=sys.stderr) sys.stdout.flush() sys.stderr.flush() captured = {} with CaptureOutputAndError(captured): ray.get(f.remote()) time.sleep(1) output_lines = captured["out"] assert len(output_lines) == 0 # TODO(rkn): Check that no additional logs appear beyond what we expect # and that there are no duplicate logs. Once we address the issue # described in https://github.com/ray-project/ray/pull/5462, we should # also check that nothing is logged to stderr. @pytest.mark.skipif( os.environ.get("RAY_USE_NEW_GCS") == "on", reason="New GCS API doesn't have a Python API yet.") def test_workers(shutdown_only): num_workers = 3 ray.init(num_cpus=num_workers) @ray.remote def f(): return id(ray.worker.global_worker), os.getpid() # Wait until all of the workers have started. worker_ids = set() while len(worker_ids) != num_workers: worker_ids = set(ray.get([f.remote() for _ in range(10)])) def test_specific_job_id(): dummy_driver_id = ray.JobID.from_int(1) ray.init(num_cpus=1, job_id=dummy_driver_id) # in driver assert dummy_driver_id == ray.worker.global_worker.current_job_id # in worker @ray.remote def f(): return ray.worker.global_worker.current_job_id assert dummy_driver_id == ray.get(f.remote()) ray.shutdown() def test_object_ref_properties(): id_bytes = b"00112233445566778899" object_ref = ray.ObjectRef(id_bytes) assert object_ref.binary() == id_bytes object_ref = ray.ObjectRef.nil() assert object_ref.is_nil() with pytest.raises(ValueError, match=r".*needs to have length 20.*"): ray.ObjectRef(id_bytes + b"1234") with pytest.raises(ValueError, match=r".*needs to have length 20.*"): ray.ObjectRef(b"0123456789") object_ref = ray.ObjectRef.from_random() assert not object_ref.is_nil() assert object_ref.binary() != id_bytes id_dumps = pickle.dumps(object_ref) id_from_dumps = pickle.loads(id_dumps) assert id_from_dumps == object_ref @pytest.fixture def shutdown_only_with_initialization_check(): yield None # The code after the yield will run as teardown code. ray.shutdown() assert not ray.is_initialized() def test_initialized(shutdown_only_with_initialization_check): assert not ray.is_initialized() ray.init(num_cpus=0) assert ray.is_initialized() def test_initialized_local_mode(shutdown_only_with_initialization_check): assert not ray.is_initialized() ray.init(num_cpus=0, local_mode=True) assert ray.is_initialized() def test_wait_reconstruction(shutdown_only): ray.init( num_cpus=1, object_store_memory=int(10**8), _internal_config=json.dumps({ "object_pinning_enabled": 0 })) @ray.remote def f(): return np.zeros(6 * 10**7, dtype=np.uint8) x_id = f.remote() ray.wait([x_id]) ray.wait([f.remote()]) assert not ray.worker.global_worker.core_worker.object_exists(x_id) ready_ids, _ = ray.wait([x_id]) assert len(ready_ids) == 1 def test_ray_setproctitle(ray_start_2_cpus): @ray.remote class UniqueName: def __init__(self): assert setproctitle.getproctitle() == "ray::UniqueName.__init__()" def f(self): assert setproctitle.getproctitle() == "ray::UniqueName.f()" @ray.remote def unique_1(): assert "unique_1" in setproctitle.getproctitle() actor = UniqueName.remote() ray.get(actor.f.remote()) ray.get(unique_1.remote()) def test_duplicate_error_messages(shutdown_only): ray.init(num_cpus=0) driver_id = ray.WorkerID.nil() error_data = ray.gcs_utils.construct_error_message(driver_id, "test", "message", 0) # Push the same message to the GCS twice (they are the same because we # do not include a timestamp). r = ray.worker.global_worker.redis_client r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.Value("ERROR_INFO"), ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"), driver_id.binary(), error_data) # Before https://github.com/ray-project/ray/pull/3316 this would # give an error r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.Value("ERROR_INFO"), ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"), driver_id.binary(), error_data) @pytest.mark.skipif( os.getenv("TRAVIS") is None, reason="This test should only be run on Travis.") def test_ray_stack(ray_start_2_cpus): def unique_name_1(): time.sleep(1000) @ray.remote def unique_name_2(): time.sleep(1000) @ray.remote def unique_name_3(): unique_name_1() unique_name_2.remote() unique_name_3.remote() success = False start_time = time.time() while time.time() - start_time < 30: # Attempt to parse the "ray stack" call. output = ray.utils.decode( check_call_ray(["stack"], capture_stdout=True)) if ("unique_name_1" in output and "unique_name_2" in output and "unique_name_3" in output): success = True break if not success: raise Exception("Failed to find necessary information with " "'ray stack'") def test_pandas_parquet_serialization(): # Only test this if pandas is installed pytest.importorskip("pandas") import pandas as pd import pyarrow as pa import pyarrow.parquet as pq tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, "parquet-test") pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename) with open(os.path.join(tempdir, "parquet-compression"), "wb") as f: table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"]) pq.write_table(table, f, compression="lz4") # Clean up shutil.rmtree(tempdir) def test_socket_dir_not_existing(shutdown_only): if sys.platform != "win32": random_name = ray.ObjectRef.from_random().hex() temp_raylet_socket_dir = os.path.join(ray.utils.get_ray_temp_dir(), "tests", random_name) temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir, "raylet_socket") ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name) def test_raylet_is_robust_to_random_messages(ray_start_regular): node_manager_address = None node_manager_port = None for client in ray.nodes(): if "NodeManagerAddress" in client: node_manager_address = client["NodeManagerAddress"] node_manager_port = client["NodeManagerPort"] assert node_manager_address assert node_manager_port # Try to bring down the node manager: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((node_manager_address, node_manager_port)) s.send(1000 * b"asdf") @ray.remote def f(): return 1 assert ray.get(f.remote()) == 1 def test_non_ascii_comment(ray_start_regular): @ray.remote def f(): # 日本語 Japanese comment return 1 assert ray.get(f.remote()) == 1 def test_shutdown_disconnect_global_state(): ray.init(num_cpus=0) ray.shutdown() with pytest.raises(Exception) as e: ray.objects() assert str(e.value).endswith("ray.init has been called.") @pytest.mark.parametrize( "ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True) def test_put_pins_object(ray_start_object_store_memory): obj = np.ones(200 * 1024, dtype=np.uint8) x_id = ray.put(obj) x_binary = x_id.binary() assert (ray.get(ray.ObjectRef(x_binary)) == obj).all() # x cannot be evicted since x_id pins it for _ in range(10): ray.put(np.zeros(10 * 1024 * 1024)) assert (ray.get(x_id) == obj).all() assert (ray.get(ray.ObjectRef(x_binary)) == obj).all() # now it can be evicted since x_id pins it but x_binary does not del x_id for _ in range(10): ray.put(np.zeros(10 * 1024 * 1024)) assert not ray.worker.global_worker.core_worker.object_exists( ray.ObjectRef(x_binary)) # weakref put y_id = ray.put(obj, weakref=True) for _ in range(10): ray.put(np.zeros(10 * 1024 * 1024)) with pytest.raises(ray.exceptions.UnreconstructableError): ray.get(y_id) def test_decorated_function(ray_start_regular): def function_invocation_decorator(f): def new_f(args, kwargs): # Reverse the arguments. return f(args[::-1], {"d": 5}), kwargs return new_f def f(a, b, c, d=None): return a, b, c, d f.__ray_invocation_decorator__ = function_invocation_decorator f = ray.remote(f) result_id, kwargs = f.remote(1, 2, 3, d=4) assert kwargs == {"d": 4} assert ray.get(result_id) == (3, 2, 1, 5) def test_get_postprocess(ray_start_regular): def get_postprocessor(object_refs, values): return [value for value in values if value > 0] ray.worker.global_worker._post_get_hooks.append(get_postprocessor) assert ray.get( [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4] def test_export_after_shutdown(ray_start_regular): # This test checks that we can use actor and remote function definitions # across multiple Ray sessions. @ray.remote def f(): pass @ray.remote class Actor: def method(self): pass ray.get(f.remote()) a = Actor.remote() ray.get(a.method.remote()) ray.shutdown() # Start Ray and use the remote function and actor again. ray.init(num_cpus=1) ray.get(f.remote()) a = Actor.remote() ray.get(a.method.remote()) ray.shutdown() # Start Ray again and make sure that these definitions can be exported from # workers. ray.init(num_cpus=2) @ray.remote def export_definitions_from_worker(remote_function, actor_class): ray.get(remote_function.remote()) actor_handle = actor_class.remote() ray.get(actor_handle.method.remote()) ray.get(export_definitions_from_worker.remote(f, Actor)) def test_invalid_unicode_in_worker_log(shutdown_only): info = ray.init(num_cpus=1) logs_dir = os.path.join(info["session_dir"], "logs") # Wait till first worker log file is created. while True: log_file_paths = glob.glob("{}/worker*.out".format(logs_dir)) if len(log_file_paths) == 0: time.sleep(0.2) else: break with open(log_file_paths[0], "wb") as f: f.write(b"\xe5abc\nline2\nline3\n") f.write(b"\xe5abc\nline2\nline3\n") f.write(b"\xe5abc\nline2\nline3\n") f.flush() # Wait till the log monitor reads the file. time.sleep(1.0) # Make sure that nothing has died. assert ray.services.remaining_processes_alive() @pytest.mark.skip(reason="This test is too expensive to run.") def test_move_log_files_to_old(shutdown_only): info = ray.init(num_cpus=1) logs_dir = os.path.join(info["session_dir"], "logs") @ray.remote class Actor: def f(self): print("function f finished") # First create a temporary actor. actors = [ Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES) ] ray.get([a.f.remote() for a in actors]) # Make sure no log files are in the "old" directory before the actors # are killed. assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0 # Now kill the actors so the files get moved to logs/old/. [a.__ray_terminate__.remote() for a in actors] while True: log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir)) if len(log_file_paths) > 0: with open(log_file_paths[0], "r") as f: assert "function f finished\n" in f.readlines() break # Make sure that nothing has died. assert ray.services.remaining_processes_alive() def test_lease_request_leak(shutdown_only): ray.init( num_cpus=1, _internal_config=json.dumps({ "initial_reconstruction_timeout_milliseconds": 200 })) assert len(ray.objects()) == 0 @ray.remote def f(x): time.sleep(0.1) return # Submit pairs of tasks. Tasks in a pair can reuse the same worker leased # from the raylet. tasks = [] for _ in range(10): obj_ref = ray.put(1) for _ in range(2): tasks.append(f.remote(obj_ref)) del obj_ref ray.get(tasks) time.sleep( 1) # Sleep for an amount longer than the reconstruction timeout. assert len(ray.objects()) == 0, ray.objects() @pytest.mark.parametrize( "ray_start_cluster", [{ "num_cpus": 0, "num_nodes": 1, "do_init": False, }], indirect=True) def test_ray_address_environment_variable(ray_start_cluster): address = ray_start_cluster.address # In this test we use zero CPUs to distinguish between starting a local # ray cluster and connecting to an existing one. # Make sure we connect to an existing cluster if # RAY_ADDRESS is set. os.environ["RAY_ADDRESS"] = address ray.init() assert "CPU" not in ray.state.cluster_resources() del os.environ["RAY_ADDRESS"] ray.shutdown() # Make sure we start a new cluster if RAY_ADDRESS is not set. ray.init() assert "CPU" in ray.state.cluster_resources() ray.shutdown() if __name__ == "__main__": import pytest sys.exit(pytest.main(["-v", __file__]))
29.960114
79
0.651483
9d104b86ccc0993346d20247efc7184542499b75
6,480
py
Python
train.py
Abastro/PointGroup
4949b81c423cb2c93755621c79ce3bc7f9b0a92a
[ "Apache-2.0" ]
null
null
null
train.py
Abastro/PointGroup
4949b81c423cb2c93755621c79ce3bc7f9b0a92a
[ "Apache-2.0" ]
null
null
null
train.py
Abastro/PointGroup
4949b81c423cb2c93755621c79ce3bc7f9b0a92a
[ "Apache-2.0" ]
null
null
null
''' PointGroup train.py Written by Li Jiang ''' import torch import torch.optim as optim import time, sys, os, random from tensorboardX import SummaryWriter import numpy as np from util.config import cfg from util.log import logger import util.utils as utils def init(): # copy important files to backup backup_dir = os.path.join(cfg.exp_path, 'backup_files') os.makedirs(backup_dir, exist_ok=True) os.system('cp train.py {}'.format(backup_dir)) os.system('cp {} {}'.format(cfg.model_dir, backup_dir)) os.system('cp {} {}'.format(cfg.dataset_dir, backup_dir)) os.system('cp {} {}'.format(cfg.config, backup_dir)) # log the config logger.info(cfg) # summary writer global writer writer = SummaryWriter(cfg.exp_path) # random seed random.seed(cfg.manual_seed) np.random.seed(cfg.manual_seed) torch.manual_seed(cfg.manual_seed) torch.cuda.manual_seed_all(cfg.manual_seed) def train_epoch(train_loader, model, model_fn, optimizer, epoch): iter_time = utils.AverageMeter() data_time = utils.AverageMeter() am_dict = {} model.train() start_epoch = time.time() end = time.time() for i, batch in enumerate(train_loader): data_time.update(time.time() - end) torch.cuda.empty_cache() ##### adjust learning rate utils.step_learning_rate(optimizer, cfg.lr, epoch - 1, cfg.step_epoch, cfg.multiplier) ##### prepare input and forward loss, _, visual_dict, meter_dict = model_fn(batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### backward optimizer.zero_grad() loss.backward() optimizer.step() ##### time and print current_iter = (epoch - 1) * len(train_loader) + i + 1 max_iter = cfg.epochs * len(train_loader) remain_iter = max_iter - current_iter iter_time.update(time.time() - end) end = time.time() remain_time = remain_iter * iter_time.avg t_m, t_s = divmod(remain_time, 60) t_h, t_m = divmod(t_m, 60) remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s)) sys.stdout.write( "epoch: {}/{} iter: {}/{} loss: {:.4f}({:.4f}) data_time: {:.2f}({:.2f}) iter_time: {:.2f}({:.2f}) remain_time: {remain_time}\n".format (epoch, cfg.epochs, i + 1, len(train_loader), am_dict['loss'].val, am_dict['loss'].avg, data_time.val, data_time.avg, iter_time.val, iter_time.avg, remain_time=remain_time)) if (i == len(train_loader) - 1): print() logger.info("epoch: {}/{}, train loss: {:.4f}, time: {}s".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) utils.checkpoint_save(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], epoch, cfg.save_freq, use_cuda) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k+'_train', am_dict[k].avg, epoch) def eval_epoch(val_loader, model, model_fn, epoch): logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>') am_dict = {} with torch.no_grad(): model.eval() start_epoch = time.time() for i, batch in enumerate(val_loader): ##### prepare input and forward loss, preds, visual_dict, meter_dict = model_fn(batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### print sys.stdout.write("\riter: {}/{} loss: {:.4f}({:.4f})".format(i + 1, len(val_loader), am_dict['loss'].val, am_dict['loss'].avg)) if (i == len(val_loader) - 1): print() logger.info("epoch: {}/{}, val loss: {:.4f}, time: {}s".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k + '_eval', am_dict[k].avg, epoch) if __name__ == '__main__': ##### init init() ##### get model version and data version exp_name = cfg.config.split('/')[-1][:-5] model_name = exp_name.split('_')[0] data_name = exp_name.split('_')[-1] ##### model logger.info('=> creating model ...') if model_name == 'pointgroup': from model.pointgroup.pointgroup import PointGroup as Network from model.pointgroup.pointgroup import model_fn_decorator elif model_name == 'pointgroup_alt': from model.pointgroup.pointgroup_alt import PointGroup as Network from model.pointgroup.pointgroup_alt import model_fn_decorator else: print("Error: no model - " + model_name) exit(0) model = Network(cfg) use_cuda = torch.cuda.is_available() logger.info('cuda available: {}'.format(use_cuda)) assert use_cuda model = model.cuda() # logger.info(model) logger.info('#classifier parameters: {}'.format(sum([x.nelement() for x in model.parameters()]))) ##### optimizer if cfg.optim == 'Adam': optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr) elif cfg.optim == 'SGD': optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay) ##### model_fn (criterion) model_fn = model_fn_decorator() ##### dataset if cfg.dataset == 'scannetv2': if data_name == 'scannet': import data.scannetv2_inst dataset = data.scannetv2_inst.Dataset() dataset.trainLoader() dataset.valLoader() else: print("Error: no data loader - " + data_name) exit(0) ##### resume start_epoch = utils.checkpoint_restore(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], use_cuda) # resume from the latest epoch, or specify the epoch to restore ##### train and val for epoch in range(start_epoch, cfg.epochs + 1): train_epoch(dataset.train_data_loader, model, model_fn, optimizer, epoch) if utils.is_multiple(epoch, cfg.save_freq) or utils.is_power2(epoch): eval_epoch(dataset.val_data_loader, model, model_fn, epoch)
34.83871
174
0.610802
69329637575842a17e8218497b44874986876049
6,756
py
Python
src/visualization/single_participant/visualize_default.py
oesst/HRTF_Model
c60b5e91c6c181a9df6655eabb82183ca8f4d503
[ "MIT" ]
null
null
null
src/visualization/single_participant/visualize_default.py
oesst/HRTF_Model
c60b5e91c6c181a9df6655eabb82183ca8f4d503
[ "MIT" ]
4
2021-06-02T04:10:35.000Z
2022-03-12T00:55:01.000Z
src/visualization/single_participant/visualize_default.py
oesst/HRTF_Model
c60b5e91c6c181a9df6655eabb82183ca8f4d503
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import src.features.helpers_vis as hp_vis import src.features.helpers as hp import logging import pickle from pathlib import Path import numpy as np import click hp_vis.set_layout(15) ROOT = Path(__file__).resolve().parents[3] SOUND_FILES = ROOT / 'data/raw/sound_samples/' # create a list of the sound files SOUND_FILES = list(SOUND_FILES.glob('**/*.wav')) # Define whether figures should be saved @click.command() @click.option('--save_figs', type=click.BOOL, default=False, help='Save figures') @click.option('--save_type', default='svg', help='Define the format figures are saved.') @click.option('--model_name', default='single_participant', help='Defines the model name.') @click.option('--exp_name', default='single_participant_default', help='Defines the experiment name') @click.option('--azimuth', default=12, help='Azimuth for which localization is done. Default is 12') @click.option('--participant_number', default=9, help='CIPIC participant number. Default is 9') @click.option('--snr', default=0.2, help='Signal to noise ration to use. Default is 0.2') @click.option('--freq_bands', default=128, help='Amount of frequency bands to use. Default is 128') @click.option('--max_freq', default=20000, help='Max frequency to use. Default is 20000') @click.option('--elevations', default=25, help='Number of elevations to use 0-n. Default is 25 which equals 0-90 deg') @click.option('--mean_subtracted_map', default=True, help='Should the learned map be mean subtracted. Default is True') @click.option('--ear', default='contra', help='Which ear should be used, contra or ipsi. Default is contra') @click.option('--normalization_type', default='sum_1', help='Which normalization type should be used sum_1, l1, l2. Default is sum_1') @click.option('--sigma_smoothing', default=0.0, help='Sigma for smoothing kernel. 0 is off. Default is 0.') @click.option('--sigma_gauss_norm', default=1.0, help='Sigma for gauss normalization. 0 is off. Default is 1.') def main(save_figs=False, save_type='svg', model_name='single_participant', exp_name='single_participant_default', azimuth=12, participant_number=9, snr=0.2, freq_bands=24, max_freq=20000, elevations=25, mean_subtracted_map=True, ear='ipsi', normalization_type='sum_1', sigma_smoothing=0, sigma_gauss_norm=1): logger = logging.getLogger(__name__) logger.info('Showing localization results for a single participant') # make sure save type is given if not save_type or len(save_type) == 0: save_type = 'svg' ######################################################################## ######################## Set parameters ################################ ######################################################################## normalize = False time_window = 0.1 # time window in sec elevations = np.arange(0, elevations, 1) ######################################################################## ######################################################################## exp_name_str = hp.create_exp_name([exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm, mean_subtracted_map, time_window, int( snr * 100), freq_bands, max_freq, participant_number, (azimuth - 12) * 10, normalize, len(elevations), ear]) exp_path = ROOT / 'models' / model_name exp_file = exp_path / exp_name_str # check if model results exist already and load if exp_path.exists() and exp_file.is_file(): # try to load the model files with open(exp_file.as_posix(), 'rb') as f: logger.info('Reading model data from file') [x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin, x_bin_mean, y_bin_mean] = pickle.load(f) # define which elevations should be used print(y_mono.shape) x_mono = x_mono[:, elevations, :] y_mono = y_mono[:, elevations] x_mono_mean = x_mono_mean[:, elevations, :] y_mono_mean = y_mono_mean[:, elevations] x_bin = x_bin[:, elevations, :] y_bin = y_bin[:, elevations] x_bin_mean = x_bin_mean[:, elevations, :] y_bin_mean = y_bin_mean[:, elevations] fig = plt.figure(figsize=(20, 5)) # plt.suptitle('Single Participant') # Monoaural Data (Ipsilateral), No Mean Subtracted ax = fig.add_subplot(1, 4, 1) hp_vis.plot_localization_result( x_mono, y_mono, ax, SOUND_FILES, scale_values=True, linear_reg=True, disp_values=True) ax.set_title('Monoaural') hp_vis.set_axis(ax, len(elevations)) ax.set_ylabel('Estimated Elevation [deg]') ax.set_xlabel('True Elevation [deg]') # Monoaural Data (Ipsilateral),Mean Subtracted ax = fig.add_subplot(1, 4, 2) hp_vis.plot_localization_result( x_mono_mean, y_mono_mean, ax, SOUND_FILES, scale_values=True, linear_reg=True, disp_values=True) ax.set_title('Mono - Prior') hp_vis.set_axis(ax, len(elevations)) ax.set_xlabel('True Elevation [deg]') # Binaural Data (Ipsilateral), No Mean Subtracted ax = fig.add_subplot(1, 4, 3) hp_vis.plot_localization_result( x_bin, y_bin, ax, SOUND_FILES, scale_values=True, linear_reg=True, disp_values=True) ax.set_title('Binaural') hp_vis.set_axis(ax, len(elevations)) ax.set_xlabel('True Elevation [deg]') # Binaural Data (Ipsilateral), Mean Subtracted ax = fig.add_subplot(1, 4, 4) hp_vis.plot_localization_result( x_bin_mean, y_bin_mean, ax, SOUND_FILES, scale_values=True, linear_reg=True, disp_values=True) ax.set_title('Bin - Prior') hp_vis.set_axis(ax, len(elevations)) ax.set_xlabel('True Elevation [deg]') plt.tight_layout() if save_figs: exp_name_str = hp.create_exp_name([exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm, mean_subtracted_map, time_window, int( snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize, len(elevations), ear]) fig_save_path = ROOT / 'reports' / 'figures' / exp_name_str / model_name if not fig_save_path.exists(): fig_save_path.mkdir(parents=True, exist_ok=True) plt.savefig((fig_save_path / (model_name + '_' + exp_name + '_participant_'+ str(participant_number)+'_localization.' + save_type)).as_posix(), dpi=300) else: plt.show() else: logger.error('No data set found. Run model first!') logger.error(exp_file) if __name__ == '__main__': log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) main()
48.956522
309
0.647128
6525105406048485797da8d096dca4b8ecb45181
6,420
py
Python
homeassistant/components/uvc/camera.py
VirtualL/home-assistant
301829d02be8d865ab46c8901ac046d060849320
[ "Apache-2.0" ]
null
null
null
homeassistant/components/uvc/camera.py
VirtualL/home-assistant
301829d02be8d865ab46c8901ac046d060849320
[ "Apache-2.0" ]
3
2021-09-08T03:34:57.000Z
2022-03-12T00:59:48.000Z
homeassistant/components/uvc/camera.py
VirtualL/home-assistant
301829d02be8d865ab46c8901ac046d060849320
[ "Apache-2.0" ]
null
null
null
"""Support for Ubiquiti's UVC cameras.""" import logging import socket import requests import voluptuous as vol from homeassistant.const import CONF_PORT, CONF_SSL from homeassistant.components.camera import Camera, PLATFORM_SCHEMA import homeassistant.helpers.config_validation as cv from homeassistant.exceptions import PlatformNotReady REQUIREMENTS = ['uvcclient==0.11.0'] _LOGGER = logging.getLogger(__name__) CONF_NVR = 'nvr' CONF_KEY = 'key' CONF_PASSWORD = 'password' DEFAULT_PASSWORD = 'ubnt' DEFAULT_PORT = 7080 DEFAULT_SSL = False PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_NVR): cv.string, vol.Required(CONF_KEY): cv.string, vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Discover cameras on a Unifi NVR.""" addr = config[CONF_NVR] key = config[CONF_KEY] password = config[CONF_PASSWORD] port = config[CONF_PORT] ssl = config[CONF_SSL] from uvcclient import nvr try: # Exceptions may be raised in all method calls to the nvr library. nvrconn = nvr.UVCRemote(addr, port, key, ssl=ssl) cameras = nvrconn.index() identifier = 'id' if nvrconn.server_version >= (3, 2, 0) else 'uuid' # Filter out airCam models, which are not supported in the latest # version of UnifiVideo and which are EOL by Ubiquiti cameras = [ camera for camera in cameras if 'airCam' not in nvrconn.get_camera(camera[identifier])['model']] except nvr.NotAuthorized: _LOGGER.error("Authorization failure while connecting to NVR") return False except nvr.NvrError as ex: _LOGGER.error("NVR refuses to talk to me: %s", str(ex)) raise PlatformNotReady except requests.exceptions.ConnectionError as ex: _LOGGER.error("Unable to connect to NVR: %s", str(ex)) raise PlatformNotReady add_entities([UnifiVideoCamera(nvrconn, camera[identifier], camera['name'], password) for camera in cameras]) return True class UnifiVideoCamera(Camera): """A Ubiquiti Unifi Video Camera.""" def __init__(self, nvr, uuid, name, password): """Initialize an Unifi camera.""" super(UnifiVideoCamera, self).__init__() self._nvr = nvr self._uuid = uuid self._name = name self._password = password self.is_streaming = False self._connect_addr = None self._camera = None self._motion_status = False @property def name(self): """Return the name of this camera.""" return self._name @property def is_recording(self): """Return true if the camera is recording.""" caminfo = self._nvr.get_camera(self._uuid) return caminfo['recordingSettings']['fullTimeRecordEnabled'] @property def motion_detection_enabled(self): """Camera Motion Detection Status.""" caminfo = self._nvr.get_camera(self._uuid) return caminfo['recordingSettings']['motionRecordEnabled'] @property def brand(self): """Return the brand of this camera.""" return 'Ubiquiti' @property def model(self): """Return the model of this camera.""" caminfo = self._nvr.get_camera(self._uuid) return caminfo['model'] def _login(self): """Login to the camera.""" from uvcclient import camera as uvc_camera caminfo = self._nvr.get_camera(self._uuid) if self._connect_addr: addrs = [self._connect_addr] else: addrs = [caminfo['host'], caminfo['internalHost']] if self._nvr.server_version >= (3, 2, 0): client_cls = uvc_camera.UVCCameraClientV320 else: client_cls = uvc_camera.UVCCameraClient if caminfo['username'] is None: caminfo['username'] = 'ubnt' camera = None for addr in addrs: try: camera = client_cls( addr, caminfo['username'], self._password) camera.login() _LOGGER.debug("Logged into UVC camera %(name)s via %(addr)s", dict(name=self._name, addr=addr)) self._connect_addr = addr break except socket.error: pass except uvc_camera.CameraConnectError: pass except uvc_camera.CameraAuthError: pass if not self._connect_addr: _LOGGER.error("Unable to login to camera") return None self._camera = camera return True def camera_image(self): """Return the image of this camera.""" from uvcclient import camera as uvc_camera if not self._camera: if not self._login(): return def _get_image(retry=True): try: return self._camera.get_snapshot() except uvc_camera.CameraConnectError: _LOGGER.error("Unable to contact camera") except uvc_camera.CameraAuthError: if retry: self._login() return _get_image(retry=False) _LOGGER.error( "Unable to log into camera, unable to get snapshot") raise return _get_image() def set_motion_detection(self, mode): """Set motion detection on or off.""" from uvcclient.nvr import NvrError if mode is True: set_mode = 'motion' else: set_mode = 'none' try: self._nvr.set_recordmode(self._uuid, set_mode) self._motion_status = mode except NvrError as err: _LOGGER.error("Unable to set recordmode to %s", set_mode) _LOGGER.debug(err) def enable_motion_detection(self): """Enable motion detection in camera.""" self.set_motion_detection(True) def disable_motion_detection(self): """Disable motion detection in camera.""" self.set_motion_detection(False)
32.1
79
0.606698
06eb6a47b1200e9fc67e7795fa62e1052a700881
1,230
py
Python
src/parameters/parameterparser.py
Marco9412/PyMusicServer3
fa264c4344a56ff15d08ec5ffdc9b24cc61db29e
[ "Apache-2.0" ]
null
null
null
src/parameters/parameterparser.py
Marco9412/PyMusicServer3
fa264c4344a56ff15d08ec5ffdc9b24cc61db29e
[ "Apache-2.0" ]
null
null
null
src/parameters/parameterparser.py
Marco9412/PyMusicServer3
fa264c4344a56ff15d08ec5ffdc9b24cc61db29e
[ "Apache-2.0" ]
null
null
null
import argparse import logging import globals from settings.settingsprovider import new_settings_file from utils.logging_utils import init_logging # from utils.printer import redirect_output # from utils.debugUtils import enable_debug def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--settingsfile', type=str, help='the settings file to use', default='settings.cfg') # parser.add_argument('-d', '--debug', action='store_true', help='enable debug mode') # parser.add_argument('-r', '--redirect', type=str, metavar='F', help='redirect output to F') parser.add_argument('-l', '--log_level', type=str, default='info', choices=['info', 'debug', 'warning'], help='The level of logging required, default info') parser.add_argument('-v', '--version', action='store_true', help='show version') params = parser.parse_args() if params.version: print('Version: {}, Build date: {}'.format(globals.REVISION, globals.DATE)) exit(0) init_logging(params.log_level) if params.settingsfile: new_settings_file(params.settingsfile) logging.info('[PARAMETERPARSER] New settings file is %s' % params.settingsfile)
41
114
0.697561
a98cfce8d547b3d964e72ac0d5a67726b6d26167
6,878
py
Python
client/watchdog/core/cloud.py
cispa/bitahoy
ffc2004930a033cfb94d13671bc6068b473ce226
[ "MIT" ]
null
null
null
client/watchdog/core/cloud.py
cispa/bitahoy
ffc2004930a033cfb94d13671bc6068b473ce226
[ "MIT" ]
null
null
null
client/watchdog/core/cloud.py
cispa/bitahoy
ffc2004930a033cfb94d13671bc6068b473ce226
[ "MIT" ]
2
2021-12-30T16:48:15.000Z
2022-01-14T14:21:15.000Z
import asyncio import time import aiohttp from bitahoy_sdk.backend import BackendWS requests_exceptions = (aiohttp.ClientError,) default_backends = { "auth": "https://auth.bitahoy.cloud", "control": "https://control.bitahoy.cloud", "monitoring": "https://monitoring.bitahoy.cloud", "update": "https://update.bitahoy.cloud", "addon": "https://addon.bitahoy.cloud", "ai": "https://ml.bitahoy.cloud", } class BaseBackend: # noqa: SIM119 def __init__(self): self.s = None async def session(self): if not self.s: self.s = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=5)) return self.s class AuthBackend(BaseBackend): def __init__(self, config=None, logger=None): self.url = default_backends["auth"] self.wdcode = "" try: with open("/sys/firmware/devicetree/base/serial-number", "rb") as f: self.wdcode = f.read().replace(b"\x00", b"").decode().strip() assert len(self.wdcode) == 16 except FileNotFoundError: if config and "wdcode" in config: self.wdcode = config["wdcode"] try: with open("./secret.txt", "r") as f: self.secret = f.read().strip() except FileNotFoundError: self.secret = "" if len(self.secret) != 16: with open("./secret.txt", "w") as f: import secrets self.secret = secrets.token_hex(8) assert len(self.secret) == 16 f.write(self.secret) if config and "url" in config: self.url = config["url"] if config and "secret" in config: self.secret = config["secret"] self.__cached_token = (0, b"") super().__init__() async def request_token(self, nocache=False): if not nocache and self.__cached_token[0] > time.time(): return self.__cached_token[1] for i in range(3): try: async with (await self.session()).get( self.url + "/authenticateWatchdog", json={"wdcode": self.wdcode, "secret": self.secret} ) as response: try: assert response.status == 200 res = await response.json() assert res["success"] is True self.__cached_token = (time.time() + 60, res["token"]) return res["token"] except Exception: raise Exception( (await response.text()) + " " + repr({"wdcode": self.wdcode, "secret": self.secret}), response.status ) except (aiohttp.ClientError, asyncio.exceptions.TimeoutError) as e: await asyncio.sleep(0.5 * (i + 1)) if i >= 2: raise e class ControlBackend(BaseBackend): def __init__(self, config, auth_backend: AuthBackend, logger=None): self.url = default_backends["control"] if config and "url" in config: self.url = config["url"] self.request_token = auth_backend.request_token self.logger = logger.asyncio() self.ws = BackendWS(self.url + "/ws", self.request_token, logger=logger) super().__init__() async def ping(self): await self.logger.info(await self.ws.request({"action": "ping"})) async def register_device(self, deviceid, devicetype): resp = await self.ws.request({"action": "registerDevice", "deviceid": deviceid, "devicetype": devicetype}) await self.logger.info("register_device", deviceid, devicetype, resp) return resp async def update_option(self, key, value, deviceid=0): return await self.ws.request({"action": "updateOption", "key": key, "value": value, "deviceid": deviceid}) async def request_info(self): # await self.logger.error(await self.request_token()) return (await self.ws.request({"action": "requestInfo"}))["data"] class MonitoringBackend(BaseBackend): def __init__(self, config, auth_backend: AuthBackend, logger=None): self.url = default_backends["monitoring"] if config and "url" in config: self.url = config["url"] self.request_token = auth_backend.request_token self.ws = BackendWS(self.url + "/ws", self.request_token, logger=logger) self.logger = logger.asyncio() super().__init__() async def ping(self): await self.logger.info(await self.ws.request({"action": "ping"})) async def uploadLogs(self, logs): resp = await self.ws.request({"action": "uploadLogs", "logs": logs}) await self.logger.info("uploadLogs", resp) return resp async def uploadNotifications(self, notifications): await self.logger.info("uploadNotifications", notifications) resp = await self.ws.request({"action": "uploadNotifications", "notifications": notifications}) await self.logger.info("uploadNotifications", resp) return resp class AddonBackend(BaseBackend): def __init__(self, config, auth_backend: AuthBackend, logger=None): self.url = default_backends["addon"] if config and "url" in config: self.url = config["url"] self.request_token = auth_backend.request_token self.ws = BackendWS(self.url + "/ws", self.request_token, logger=logger) self.logger = logger.asyncio() self.typesList = None super().__init__() async def get_installed_addons(self): resp = await self.ws.request({"action": "getAddon", "info": {"timestamp": time.time()}}) if resp["success"]: return resp else: return None async def get_config(self, addonName, deviceName): resp = await self.ws.request( {"action": "getAddon", "info": {"addonName": addonName.lower(), "deviceName": deviceName, "timestamp": time.time()}} ) if resp["success"]: return resp["addon"]["config"] else: return None async def ping(self): await self.logger.info(await self.ws.request({"action": "ping"})) class AIBackend(BaseBackend): def __init__(self, config, auth_backend: AuthBackend, logger=None): self.url = default_backends["ai"] if config and "url" in config: self.url = config["url"] self.request_token = auth_backend.request_token self.ws = BackendWS(self.url + "/ws", self.request_token, logger=logger) self.logger = logger.asyncio() super().__init__() async def ping(self): await self.logger.info("ping now") await self.logger.info(await self.ws.request({"action": "ping"})) await self.logger.info("done")
38
129
0.590869
1d4e99d5e669b240519143e3d141a9ac9f45ea5b
6,654
py
Python
bindings/python/ensmallen_graph/datasets/string/oryzabrachyantha.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
bindings/python/ensmallen_graph/datasets/string/oryzabrachyantha.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
bindings/python/ensmallen_graph/datasets/string/oryzabrachyantha.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
""" This file offers the methods to automatically retrieve the graph Oryza brachyantha. The graph is automatically retrieved from the STRING repository. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 16:44:08.829185 The undirected graph Oryza brachyantha has 21000 nodes and 3433756 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.01557 and has 9 connected components, where the component with most nodes has 20969 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 151, the mean node degree is 327.02, and the node degree mode is 5. The top 5 most central nodes are 4533.OB02G20940.1 (degree 6064), 4533.OB01G52720.1 (degree 5937), 4533.OB05G25810.1 (degree 4793), 4533.OB12G19370.1 (degree 4578) and 4533.OB06G26320.1 (degree 4164). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import OryzaBrachyantha # Then load the graph graph = OryzaBrachyantha() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error def OryzaBrachyantha( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph: """Return new instance of the Oryza brachyantha graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Oryza brachyantha graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 16:44:08.829185 The undirected graph Oryza brachyantha has 21000 nodes and 3433756 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.01557 and has 9 connected components, where the component with most nodes has 20969 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 151, the mean node degree is 327.02, and the node degree mode is 5. The top 5 most central nodes are 4533.OB02G20940.1 (degree 6064), 4533.OB01G52720.1 (degree 5937), 4533.OB05G25810.1 (degree 4793), 4533.OB12G19370.1 (degree 4578) and 4533.OB06G26320.1 (degree 4164). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import OryzaBrachyantha # Then load the graph graph = OryzaBrachyantha() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ return AutomaticallyRetrievedGraph( graph_name="OryzaBrachyantha", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
35.206349
223
0.700781
5521ba3f86a362306d3ee925ec75414b985d4892
9,459
py
Python
qiskit/opflow/evolutions/pauli_trotter_evolution.py
Roshan-Thomas/qiskit-terra
77219b5c7b7146b1545c5e5190739b36f4064b2f
[ "Apache-2.0" ]
1,599
2018-07-10T10:59:12.000Z
2022-03-31T23:56:25.000Z
qiskit/opflow/evolutions/pauli_trotter_evolution.py
Roshan-Thomas/qiskit-terra
77219b5c7b7146b1545c5e5190739b36f4064b2f
[ "Apache-2.0" ]
5,244
2018-07-10T06:20:13.000Z
2022-03-31T22:18:48.000Z
qiskit/opflow/evolutions/pauli_trotter_evolution.py
Roshan-Thomas/qiskit-terra
77219b5c7b7146b1545c5e5190739b36f4064b2f
[ "Apache-2.0" ]
1,409
2018-07-10T02:16:12.000Z
2022-03-31T09:01:32.000Z
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ PauliTrotterEvolution Class """ import logging from typing import Optional, Union, cast import numpy as np from qiskit.circuit.library import PauliEvolutionGate from qiskit.synthesis import LieTrotter, SuzukiTrotter from qiskit.opflow.converters.pauli_basis_change import PauliBasisChange from qiskit.opflow.evolutions.evolution_base import EvolutionBase from qiskit.opflow.evolutions.evolved_op import EvolvedOp from qiskit.opflow.evolutions.trotterizations.trotterization_base import TrotterizationBase from qiskit.opflow.evolutions.trotterizations.trotterization_factory import TrotterizationFactory from qiskit.opflow.list_ops.list_op import ListOp from qiskit.opflow.list_ops.summed_op import SummedOp from qiskit.opflow.operator_base import OperatorBase from qiskit.opflow.operator_globals import I, Z from qiskit.opflow.primitive_ops.pauli_op import PauliOp from qiskit.opflow.primitive_ops.circuit_op import CircuitOp from qiskit.opflow.primitive_ops.pauli_sum_op import PauliSumOp from qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp # TODO uncomment when we implement Abelian grouped evolution. # from qiskit.opflow.converters.abelian_grouper import AbelianGrouper logger = logging.getLogger(__name__) class PauliTrotterEvolution(EvolutionBase): r""" An Evolution algorithm replacing exponentiated sums of Paulis by changing them each to the Z basis, rotating with an rZ, changing back, and Trotterizing. More specifically, we compute basis change circuits for each Pauli into a single-qubit Z, evolve the Z by the desired evolution time with an rZ gate, and change the basis back using the adjoint of the original basis change circuit. For sums of Paulis, the individual Pauli evolution circuits are composed together by Trotterization scheme. """ def __init__( self, trotter_mode: Optional[Union[str, TrotterizationBase]] = "trotter", reps: Optional[int] = 1, # TODO uncomment when we implement Abelian grouped evolution. # group_paulis: Optional[bool] = False ) -> None: """ Args: trotter_mode: A string ('trotter', 'suzuki', or 'qdrift') to pass to the TrotterizationFactory, or a TrotterizationBase, indicating how to combine individual Pauli evolution circuits to equal the exponentiation of the Pauli sum. reps: How many Trotterization repetitions to make, to improve the approximation accuracy. # TODO uncomment when we implement Abelian grouped evolution. # group_paulis: Whether to group Pauli sums into Abelian # sub-groups, so a single diagonalization circuit can be used for each group # rather than each Pauli. """ if isinstance(trotter_mode, TrotterizationBase): self._trotter = trotter_mode else: self._trotter = TrotterizationFactory.build(mode=trotter_mode, reps=reps) # TODO uncomment when we implement Abelian grouped evolution. # self._grouper = AbelianGrouper() if group_paulis else None @property def trotter(self) -> TrotterizationBase: """TrotterizationBase used to evolve SummedOps.""" return self._trotter @trotter.setter def trotter(self, trotter: TrotterizationBase) -> None: """Set TrotterizationBase used to evolve SummedOps.""" self._trotter = trotter def convert(self, operator: OperatorBase) -> OperatorBase: r""" Traverse the operator, replacing ``EvolvedOps`` with ``CircuitOps`` containing Trotterized evolutions equalling the exponentiation of -i * operator. Args: operator: The Operator to convert. Returns: The converted operator. """ # TODO uncomment when we implement Abelian grouped evolution. # if self._grouper: # # Sort into commuting groups # operator = self._grouper.convert(operator).reduce() return self._recursive_convert(operator) def _get_evolution_synthesis(self): """Return the ``EvolutionSynthesis`` corresponding to this Trotterization.""" if self.trotter.order == 1: return LieTrotter(reps=self.trotter.reps) return SuzukiTrotter(reps=self.trotter.reps, order=self.trotter.order) def _recursive_convert(self, operator: OperatorBase) -> OperatorBase: if isinstance(operator, EvolvedOp): if isinstance(operator.primitive, (PauliOp, PauliSumOp)): pauli = operator.primitive.primitive time = operator.coeff * operator.primitive.coeff evo = PauliEvolutionGate( pauli, time=time, synthesis=self._get_evolution_synthesis() ) return CircuitOp(evo) # operator = EvolvedOp(operator.primitive.to_pauli_op(), coeff=operator.coeff) if not {"Pauli"} == operator.primitive_strings(): logger.warning( "Evolved Hamiltonian is not composed of only Paulis, converting to " "Pauli representation, which can be expensive." ) # Setting massive=False because this conversion is implicit. User can perform this # action on the Hamiltonian with massive=True explicitly if they so choose. # TODO explore performance to see whether we should avoid doing this repeatedly pauli_ham = operator.primitive.to_pauli_op(massive=False) operator = EvolvedOp(pauli_ham, coeff=operator.coeff) if isinstance(operator.primitive, SummedOp): # TODO uncomment when we implement Abelian grouped evolution. # if operator.primitive.abelian: # return self.evolution_for_abelian_paulisum(operator.primitive) # else: # Collect terms that are not the identity. oplist = [ x for x in operator.primitive if not isinstance(x, PauliOp) or sum(x.primitive.x + x.primitive.z) != 0 ] # Collect the coefficients of any identity terms, # which become global phases when exponentiated. identity_phases = [ x.coeff for x in operator.primitive if isinstance(x, PauliOp) and sum(x.primitive.x + x.primitive.z) == 0 ] # Construct sum without the identity operators. new_primitive = SummedOp(oplist, coeff=operator.primitive.coeff) trotterized = self.trotter.convert(new_primitive) circuit_no_identities = self._recursive_convert(trotterized) # Set the global phase of the QuantumCircuit to account for removed identity terms. global_phase = -sum(identity_phases) * operator.primitive.coeff circuit_no_identities.primitive.global_phase = global_phase return circuit_no_identities # Covers ListOp, ComposedOp, TensoredOp elif isinstance(operator.primitive, ListOp): converted_ops = [self._recursive_convert(op) for op in operator.primitive.oplist] return operator.primitive.__class__(converted_ops, coeff=operator.coeff) elif isinstance(operator, ListOp): return operator.traverse(self.convert).reduce() return operator def evolution_for_pauli(self, pauli_op: PauliOp) -> PrimitiveOp: r""" Compute evolution Operator for a single Pauli using a ``PauliBasisChange``. Args: pauli_op: The ``PauliOp`` to evolve. Returns: A ``PrimitiveOp``, either the evolution ``CircuitOp`` or a ``PauliOp`` equal to the identity if pauli_op is the identity. """ def replacement_fn(cob_instr_op, dest_pauli_op): z_evolution = dest_pauli_op.exp_i() # Remember, circuit composition order is mirrored operator composition order. return cob_instr_op.adjoint().compose(z_evolution).compose(cob_instr_op) # Note: PauliBasisChange will pad destination with identities # to produce correct CoB circuit sig_bits = np.logical_or(pauli_op.primitive.z, pauli_op.primitive.x) a_sig_bit = int(max(np.extract(sig_bits, np.arange(pauli_op.num_qubits)[::-1]))) destination = (I.tensorpower(a_sig_bit)) ^ (Z * pauli_op.coeff) cob = PauliBasisChange(destination_basis=destination, replacement_fn=replacement_fn) return cast(PrimitiveOp, cob.convert(pauli_op)) # TODO implement Abelian grouped evolution. def evolution_for_abelian_paulisum(self, op_sum: SummedOp) -> PrimitiveOp: """Evolution for abelian pauli sum""" raise NotImplementedError
47.532663
99
0.674596
e34d310d846d1f44ff16eee48cc888c3888263cf
383
py
Python
HackerRank Solutions/Python/Strings/Find a String.py
DevashishPathrabe/Competetive-Coding
91049459359854b7834cbfb31415682600dc9c57
[ "MIT" ]
13
2021-09-02T07:30:02.000Z
2022-03-22T19:32:03.000Z
HackerRank Solutions/Python/Strings/Find a String.py
DevashishPathrabe/Competetive-Coding
91049459359854b7834cbfb31415682600dc9c57
[ "MIT" ]
null
null
null
HackerRank Solutions/Python/Strings/Find a String.py
DevashishPathrabe/Competetive-Coding
91049459359854b7834cbfb31415682600dc9c57
[ "MIT" ]
3
2021-08-24T16:06:22.000Z
2021-09-17T15:39:53.000Z
def count_substring(string, sub_string): noOfOcurrence = 0 for i in range(len(string)-len(sub_string)+1): if string[i:i+len(sub_string)] == sub_string: noOfOcurrence += 1 return noOfOcurrence if __name__ == '__main__': string = input().strip() sub_string = input().strip() count = count_substring(string, sub_string) print(count)
29.461538
53
0.64752
f4562187dff299f969f74c37225884c8633126ad
906
py
Python
downloader/markdown_model.py
wmpscc/FocusAX
7bd1d10b4bdb84dc12b00d3fe3ec3a72a6d7fe48
[ "Apache-2.0" ]
2
2022-01-09T12:32:31.000Z
2022-03-03T12:33:24.000Z
downloader/markdown_model.py
wmpscc/FocusAX
7bd1d10b4bdb84dc12b00d3fe3ec3a72a6d7fe48
[ "Apache-2.0" ]
null
null
null
downloader/markdown_model.py
wmpscc/FocusAX
7bd1d10b4bdb84dc12b00d3fe3ec3a72a6d7fe48
[ "Apache-2.0" ]
null
null
null
import os def set_detail(summary, content): content = content.strip() s = f''' <details> <summary>{summary}</summary> {content} </details> ''' return s def set_img(path_img_dir, is_all_typing): files = os.listdir(path_img_dir) if len(files) == 0: return '' s = '' if is_all_typing: path_img_dir = path_img_dir.replace('\\', '/') prefix = path_img_dir.split('/')[-2] + '/' else: prefix = '' for f in files: s = s + f'<img src="./{prefix}img/{f}" align="middle">\n' return set_detail('论文截图', s) def typing(title_en, abs_en, url, title_cn, abs_cn, img_dir, is_all_typing=False): if abs_cn != '': cn = set_detail("中文摘要", abs_cn) else: cn = '' img = set_img(img_dir, is_all_typing) dsc = f''' ### {title_en} **{title_cn}** {abs_en} {cn} [download]({url}) {img} ''' return dsc
18.875
82
0.56181
3dc8bff9202f36afd0ba4bd7762cf74c3b8e6219
15,797
py
Python
python/pyarrow/tests/test_schema.py
bob-skowron/arrow
6d23cccef064a3fc99d4e9286f62058cba0ee6b0
[ "Apache-2.0" ]
3
2021-09-10T09:06:22.000Z
2021-09-26T02:07:23.000Z
python/pyarrow/tests/test_schema.py
bob-skowron/arrow
6d23cccef064a3fc99d4e9286f62058cba0ee6b0
[ "Apache-2.0" ]
1
2020-01-03T07:39:04.000Z
2020-01-03T07:39:04.000Z
python/pyarrow/tests/test_schema.py
bob-skowron/arrow
6d23cccef064a3fc99d4e9286f62058cba0ee6b0
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import OrderedDict import pickle import sys import pytest import numpy as np import pyarrow as pa def test_schema_constructor_errors(): msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` " "instead") with pytest.raises(TypeError, match=msg): pa.Schema() def test_type_integers(): dtypes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] for name in dtypes: factory = getattr(pa, name) t = factory() assert str(t) == name def test_type_to_pandas_dtype(): M8_ns = np.dtype('datetime64[ns]') cases = [ (pa.null(), np.float64), (pa.bool_(), np.bool_), (pa.int8(), np.int8), (pa.int16(), np.int16), (pa.int32(), np.int32), (pa.int64(), np.int64), (pa.uint8(), np.uint8), (pa.uint16(), np.uint16), (pa.uint32(), np.uint32), (pa.uint64(), np.uint64), (pa.float16(), np.float16), (pa.float32(), np.float32), (pa.float64(), np.float64), (pa.date32(), M8_ns), (pa.date64(), M8_ns), (pa.timestamp('ms'), M8_ns), (pa.binary(), np.object_), (pa.binary(12), np.object_), (pa.string(), np.object_), (pa.list_(pa.int8()), np.object_), # (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion ] for arrow_type, numpy_type in cases: assert arrow_type.to_pandas_dtype() == numpy_type def test_type_list(): value_type = pa.int32() list_type = pa.list_(value_type) assert str(list_type) == 'list<item: int32>' field = pa.field('my_item', pa.string()) l2 = pa.list_(field) assert str(l2) == 'list<my_item: string>' def test_type_comparisons(): val = pa.int32() assert val == pa.int32() assert val == 'int32' assert val != 5 def test_type_for_alias(): cases = [ ('i1', pa.int8()), ('int8', pa.int8()), ('i2', pa.int16()), ('int16', pa.int16()), ('i4', pa.int32()), ('int32', pa.int32()), ('i8', pa.int64()), ('int64', pa.int64()), ('u1', pa.uint8()), ('uint8', pa.uint8()), ('u2', pa.uint16()), ('uint16', pa.uint16()), ('u4', pa.uint32()), ('uint32', pa.uint32()), ('u8', pa.uint64()), ('uint64', pa.uint64()), ('f4', pa.float32()), ('float32', pa.float32()), ('f8', pa.float64()), ('float64', pa.float64()), ('date32', pa.date32()), ('date64', pa.date64()), ('string', pa.string()), ('str', pa.string()), ('binary', pa.binary()), ('time32[s]', pa.time32('s')), ('time32[ms]', pa.time32('ms')), ('time64[us]', pa.time64('us')), ('time64[ns]', pa.time64('ns')), ('timestamp[s]', pa.timestamp('s')), ('timestamp[ms]', pa.timestamp('ms')), ('timestamp[us]', pa.timestamp('us')), ('timestamp[ns]', pa.timestamp('ns')), ('duration[s]', pa.duration('s')), ('duration[ms]', pa.duration('ms')), ('duration[us]', pa.duration('us')), ('duration[ns]', pa.duration('ns')), ] for val, expected in cases: assert pa.type_for_alias(val) == expected def test_type_string(): t = pa.string() assert str(t) == 'string' def test_type_timestamp_with_tz(): tz = 'America/Los_Angeles' t = pa.timestamp('ns', tz=tz) assert t.unit == 'ns' assert t.tz == tz def test_time_types(): t1 = pa.time32('s') t2 = pa.time32('ms') t3 = pa.time64('us') t4 = pa.time64('ns') assert t1.unit == 's' assert t2.unit == 'ms' assert t3.unit == 'us' assert t4.unit == 'ns' assert str(t1) == 'time32[s]' assert str(t4) == 'time64[ns]' with pytest.raises(ValueError): pa.time32('us') with pytest.raises(ValueError): pa.time64('s') def test_from_numpy_dtype(): cases = [ (np.dtype('bool'), pa.bool_()), (np.dtype('int8'), pa.int8()), (np.dtype('int16'), pa.int16()), (np.dtype('int32'), pa.int32()), (np.dtype('int64'), pa.int64()), (np.dtype('uint8'), pa.uint8()), (np.dtype('uint16'), pa.uint16()), (np.dtype('uint32'), pa.uint32()), (np.dtype('float16'), pa.float16()), (np.dtype('float32'), pa.float32()), (np.dtype('float64'), pa.float64()), (np.dtype('U'), pa.string()), (np.dtype('S'), pa.binary()), (np.dtype('datetime64[s]'), pa.timestamp('s')), (np.dtype('datetime64[ms]'), pa.timestamp('ms')), (np.dtype('datetime64[us]'), pa.timestamp('us')), (np.dtype('datetime64[ns]'), pa.timestamp('ns')), (np.dtype('timedelta64[s]'), pa.duration('s')), (np.dtype('timedelta64[ms]'), pa.duration('ms')), (np.dtype('timedelta64[us]'), pa.duration('us')), (np.dtype('timedelta64[ns]'), pa.duration('ns')), ] for dt, pt in cases: result = pa.from_numpy_dtype(dt) assert result == pt # Things convertible to numpy dtypes work assert pa.from_numpy_dtype('U') == pa.string() assert pa.from_numpy_dtype(np.unicode) == pa.string() assert pa.from_numpy_dtype('int32') == pa.int32() assert pa.from_numpy_dtype(bool) == pa.bool_() with pytest.raises(NotImplementedError): pa.from_numpy_dtype(np.dtype('O')) with pytest.raises(TypeError): pa.from_numpy_dtype('not_convertible_to_dtype') def test_schema(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert sch[0].name == 'foo' assert sch[0].type == fields[0].type assert sch.field('foo').name == 'foo' assert sch.field('foo').type == fields[0].type assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" with pytest.raises(TypeError): pa.schema([None]) def test_schema_from_tuples(): fields = [ ('foo', pa.int32()), ('bar', pa.string()), ('baz', pa.list_(pa.int8())), ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" with pytest.raises(TypeError): pa.schema([('foo', None)]) def test_schema_from_mapping(): fields = OrderedDict([ ('foo', pa.int32()), ('bar', pa.string()), ('baz', pa.list_(pa.int8())), ]) sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" fields = OrderedDict([('foo', None)]) with pytest.raises(TypeError): pa.schema(fields) def test_schema_duplicate_fields(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('foo', pa.list_(pa.int8())), ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'foo'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string foo: list<item: int8> child 0, item: int8""" assert sch[0].name == 'foo' assert sch[0].type == fields[0].type with pytest.warns(FutureWarning): assert sch.field_by_name('bar') == fields[1] with pytest.warns(FutureWarning): assert sch.field_by_name('xxx') is None with pytest.warns((UserWarning, FutureWarning)): assert sch.field_by_name('foo') is None def test_field_flatten(): f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'}) assert f0.flatten() == [f0] f1 = pa.field('bar', pa.float64(), nullable=False) ff = pa.field('ff', pa.struct([f0, f1]), nullable=False) assert ff.flatten() == [ pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), pa.field('ff.bar', pa.float64(), nullable=False)] # XXX # Nullable parent makes flattened child nullable ff = pa.field('ff', pa.struct([f0, f1])) assert ff.flatten() == [ pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), pa.field('ff.bar', pa.float64())] fff = pa.field('fff', pa.struct([ff])) assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))] def test_schema_add_remove_metadata(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] s1 = pa.schema(fields) assert s1.metadata is None metadata = {b'foo': b'bar', b'pandas': b'badger'} s2 = s1.with_metadata(metadata) assert s2.metadata == metadata s3 = s2.remove_metadata() assert s3.metadata is None # idempotent s4 = s3.remove_metadata() assert s4.metadata is None def test_schema_equals(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] metadata = {b'foo': b'bar', b'pandas': b'badger'} sch1 = pa.schema(fields) sch2 = pa.schema(fields) sch3 = pa.schema(fields, metadata=metadata) sch4 = pa.schema(fields, metadata=metadata) assert sch1.equals(sch2) assert sch3.equals(sch4) assert sch1.equals(sch3, check_metadata=False) assert not sch1.equals(sch3, check_metadata=True) assert not sch1.equals(sch3) del fields[-1] sch3 = pa.schema(fields) assert not sch1.equals(sch3) def test_schema_equals_propagates_check_metadata(): # ARROW-4088 schema1 = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()) ]) schema2 = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string(), metadata={'a': 'alpha'}), ]) assert not schema1.equals(schema2) assert schema1.equals(schema2, check_metadata=False) def test_schema_equals_invalid_type(): # ARROW-5873 schema = pa.schema([pa.field("a", pa.int64())]) for val in [None, 'string', pa.array([1, 2])]: with pytest.raises(TypeError): schema.equals(val) def test_schema_equality_operators(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] metadata = {b'foo': b'bar', b'pandas': b'badger'} sch1 = pa.schema(fields) sch2 = pa.schema(fields) sch3 = pa.schema(fields, metadata=metadata) sch4 = pa.schema(fields, metadata=metadata) assert sch1 == sch2 assert sch3 == sch4 assert sch1 != sch3 assert sch2 != sch4 # comparison with other types doesn't raise assert sch1 != [] assert sch3 != 'foo' def test_schema_get_fields(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] schema = pa.schema(fields) assert schema.field('foo').name == 'foo' assert schema.field(0).name == 'foo' assert schema.field(-1).name == 'baz' with pytest.raises(KeyError): schema.field('other') with pytest.raises(TypeError): schema.field(0.0) with pytest.raises(IndexError): schema.field(4) def test_schema_negative_indexing(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] schema = pa.schema(fields) assert schema[-1].equals(schema[2]) assert schema[-2].equals(schema[1]) assert schema[-3].equals(schema[0]) with pytest.raises(IndexError): schema[-4] with pytest.raises(IndexError): schema[3] def test_schema_repr_with_dictionaries(): fields = [ pa.field('one', pa.dictionary(pa.int16(), pa.string())), pa.field('two', pa.int32()) ] sch = pa.schema(fields) expected = ( """\ one: dictionary<values=string, indices=int16, ordered=0> two: int32""") assert repr(sch) == expected def test_type_schema_pickling(): cases = [ pa.int8(), pa.string(), pa.binary(), pa.binary(10), pa.list_(pa.string()), pa.map_(pa.string(), pa.int8()), pa.struct([ pa.field('a', 'int8'), pa.field('b', 'string') ]), pa.union([ pa.field('a', pa.int8()), pa.field('b', pa.int16()) ], pa.lib.UnionMode_SPARSE), pa.union([ pa.field('a', pa.int8()), pa.field('b', pa.int16()) ], pa.lib.UnionMode_DENSE), pa.time32('s'), pa.time64('us'), pa.date32(), pa.date64(), pa.timestamp('ms'), pa.timestamp('ns'), pa.decimal128(12, 2), pa.field('a', 'string', metadata={b'foo': b'bar'}) ] for val in cases: roundtripped = pickle.loads(pickle.dumps(val)) assert val == roundtripped fields = [] for i, f in enumerate(cases): if isinstance(f, pa.Field): fields.append(f) else: fields.append(pa.field('_f{}'.format(i), f)) schema = pa.schema(fields, metadata={b'foo': b'bar'}) roundtripped = pickle.loads(pickle.dumps(schema)) assert schema == roundtripped def test_empty_table(): schema = pa.schema([ pa.field('oneField', pa.int64()) ]) table = schema.empty_table() assert isinstance(table, pa.Table) assert table.num_rows == 0 assert table.schema == schema @pytest.mark.pandas def test_schema_from_pandas(): import pandas as pd inputs = [ list(range(10)), pd.Categorical(list(range(10))), ['foo', 'bar', None, 'baz', 'qux'], np.array([ '2007-07-13T01:23:34.123456789', '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912' ], dtype='datetime64[ns]') ] for data in inputs: df = pd.DataFrame({'a': data}) schema = pa.Schema.from_pandas(df) expected = pa.Table.from_pandas(df).schema assert schema == expected def test_schema_sizeof(): schema = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), ]) assert sys.getsizeof(schema) > 30 schema2 = schema.with_metadata({"key": "some metadata"}) assert sys.getsizeof(schema2) > sys.getsizeof(schema) schema3 = schema.with_metadata({"key": "some more metadata"}) assert sys.getsizeof(schema3) > sys.getsizeof(schema2)
28.108541
79
0.569412
108ff0d8d3e02c2ac96101e0a29dcd785154950f
3,840
py
Python
getlogin/views.py
chungyan5/MigrateEmailServer
0f1452ddc7e7ec93d18a49c013973b3476138e04
[ "Apache-2.0" ]
null
null
null
getlogin/views.py
chungyan5/MigrateEmailServer
0f1452ddc7e7ec93d18a49c013973b3476138e04
[ "Apache-2.0" ]
null
null
null
getlogin/views.py
chungyan5/MigrateEmailServer
0f1452ddc7e7ec93d18a49c013973b3476138e04
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render from .forms import LoginPairForm import subprocess from .models import LoginPair import shlex import StringIO def home(request): ############################## # user already login and save its login info. if request.method == "POST": form = LoginPairForm(request.POST) ############################## # get all accounts one by one to sync testCnt = 0 for thisLoginUser in LoginPair.objects.all(): ############################## # get this account info. ############################## # imapsync and its parameters oldHost = "127.0.0.1" oldUserName = thisLoginUser.email oldPw = thisLoginUser.newPw newHost = "amonicscom.securemail.hk" newUserName = thisLoginUser.email newPw = thisLoginUser.newPw cmd = "imapsync --syncinternaldates --sep1 / --prefix1 / --nofoldersizes --skipsize" \ + " --useuid --usecache --subscribe_all" \ + " --host1 " + oldHost +" --authmech1 LOGIN --user1 " + oldUserName + " --password1 " + oldPw \ + " --host2 " + newHost + " --authmech2 LOGIN --user2 " + newUserName + " --password2 " + newPw argu = shlex.split(cmd) ############################## # monitor tx error or success oneAccSyncErrFlag = True while (oneAccSyncErrFlag): ############################## # do a imapsync dry connection and chk how many msg to sync proc = subprocess.Popen(argu, stdout=subprocess.PIPE, cwd="/opt/MigrateEmailServer/") stdoutdata = proc.communicate()[0] ############################## # find err. loginAuthFailure = "Failure: error login on" if loginAuthFailure in stdoutdata: return render(request, 'getlogin/main.html', { 'form': form, 'statusMsg': "your login email or password wrong, please try again.", }) ############################## # find sucessful tx or not detectStr = "Detected" buf = StringIO.StringIO(stdoutdata) for oneLine in buf.readlines(): if detectStr in oneLine: txCntArr = oneLine.split( ); if txCntArr[2] == "errors": if txCntArr[1] == '0' : oneAccSyncErrFlag = False #testCnt += 1 #if testCnt >=3: # break txCnt = "all done" return render(request, 'getlogin/progress.html', { 'txCnt' : txCnt, }) ############################## # fresh load this page, show the form to let the user to input his/her login info. else: form = LoginPairForm() return render(request, 'getlogin/main.html', { 'form': form, 'statusMsg': "", }) def listUsers(request): lp = LoginPair.objects.filter(pw="null") return render(request, 'getlogin/list.html', {'unDidloginPairs':lp})
43.146067
135
0.409115
d997d5cddad18c1d2db98b4f8970a43d6d4a0a7e
1,248
py
Python
cosmpy/crypto/interface.py
evsmithx/cosmpy
7dfc81528b287f90190d6d4387942340f8ab88cf
[ "Apache-2.0" ]
15
2021-09-08T05:27:14.000Z
2022-03-29T06:48:08.000Z
cosmpy/crypto/interface.py
evsmithx/cosmpy
7dfc81528b287f90190d6d4387942340f8ab88cf
[ "Apache-2.0" ]
36
2021-09-01T08:58:33.000Z
2022-03-30T11:40:56.000Z
cosmpy/crypto/interface.py
evsmithx/cosmpy
7dfc81528b287f90190d6d4387942340f8ab88cf
[ "Apache-2.0" ]
4
2021-10-04T09:29:56.000Z
2022-03-18T15:43:06.000Z
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2021 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Interface for a Signer.""" from abc import ABC, abstractmethod class Signer(ABC): """Signer abstract class.""" @abstractmethod def sign( self, message: bytes, deterministic=False, canonicalise: bool = True ) -> bytes: """Perform signing.""" @abstractmethod def sign_digest( self, digest: bytes, deterministic=False, canonicalise: bool = True ) -> bytes: """Perform digest signing."""
32
80
0.592147
f1db4cfbd59bf9326fac2eef8175c230d9c067df
8,326
py
Python
content/test/gpu/unexpected_passes/builders.py
iridium-browser/iridium-browser
907e31cf5ce5ad14d832796e3a7c11e496828959
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
575
2015-06-18T23:58:20.000Z
2022-03-23T09:32:39.000Z
content/test/gpu/unexpected_passes/builders.py
iridium-browser/iridium-browser
907e31cf5ce5ad14d832796e3a7c11e496828959
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
113
2015-05-04T09:58:14.000Z
2022-01-31T19:35:03.000Z
content/test/gpu/unexpected_passes/builders.py
iridium-browser/iridium-browser
907e31cf5ce5ad14d832796e3a7c11e496828959
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
52
2015-07-14T10:40:50.000Z
2022-03-15T01:11:49.000Z
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Methods related to querying builder information from Buildbucket.""" import fnmatch import json import logging import os import subprocess from unexpected_passes import multiprocessing_utils TESTING_BUILDBOT_DIR = os.path.realpath( os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'testing', 'buildbot')) AUTOGENERATED_JSON_KEY = 'AAAAA1 AUTOGENERATED FILE DO NOT EDIT' GPU_TELEMETRY_ISOLATES = { 'fuchsia_telemetry_gpu_integration_test', 'telemetry_gpu_integration_test', } # There are a few CI builders that don't actually exist, but have trybot # mirrors. So, define a manual mapping here. # Go from try -> CI then reverse the mapping so that there's less of a chance of # typos being introduced in the repeated trybot names. FAKE_TRY_BUILDERS = { # chromium.gpu.fyi 'android_angle_rel_ng': [ 'ANGLE GPU Android Release (Nexus 5X)', ], 'android_optional_gpu_tests_rel': [ 'Optional Android Release (Nexus 5X)', ], 'linux-angle-rel': [ 'ANGLE GPU Linux Release (Intel HD 630)', 'ANGLE GPU Linux Release (NVIDIA)', ], 'linux_optional_gpu_tests_rel': [ 'Optional Linux Release (Intel HD 630)', 'Optional Linux Release (NVIDIA)', ], 'mac_optional_gpu_tests_rel': [ 'Optional Mac Release (Intel)', 'Optional Mac Retina Release (AMD)', 'Optional Mac Retina Release (NVIDIA)', ], 'win_optional_gpu_tests_rel': [ 'Optional Win10 x64 Release (Intel HD 630)', 'Optional Win10 x64 Release (NVIDIA)', ], } FAKE_CI_BUILDERS = {} for try_builder, ci_builder_list in FAKE_TRY_BUILDERS.iteritems(): for ci in ci_builder_list: FAKE_CI_BUILDERS[ci] = try_builder # There are some builders that aren't under the Chromium Buildbucket project # but are listed in the Chromium //testing/buildbot files. These don't use the # same recipes as Chromium builders, and thus don't have the list of trybot # mirrors. NON_CHROMIUM_BUILDERS = { 'Win V8 FYI Release (NVIDIA)', 'Mac V8 FYI Release (Intel)', 'Linux V8 FYI Release - pointer compression (NVIDIA)', 'Linux V8 FYI Release (NVIDIA)', 'Android V8 FYI Release (Nexus 5X)', } def GetCiBuilders(suite): """Gets the set of CI builders to query. Args: suite: A string containing the suite (as known by Telemetry) that will be queried. Used to filter out builders that don't actually run the suite in question. Returns: A set of strings, each element being the name of a Chromium CI builder to query results from. """ logging.info('Getting CI builders') ci_builders = set() for buildbot_file in os.listdir(TESTING_BUILDBOT_DIR): if not buildbot_file.endswith('.json'): continue filepath = os.path.join(TESTING_BUILDBOT_DIR, buildbot_file) with open(filepath) as f: buildbot_json = json.load(f) # Skip any JSON files that don't contain builder information. if AUTOGENERATED_JSON_KEY not in buildbot_json: continue for builder, test_map in buildbot_json.iteritems(): # Remove compile-only builders and the auto-generated comments. if 'Builder' in builder or 'AAAA' in builder: continue # Filter out any builders that don't run the suite in question. if not _SuiteInTests(suite, test_map.get('isolated_scripts', [])): continue ci_builders.add(builder) logging.debug('Got %d CI builders after trimming: %s', len(ci_builders), ci_builders) return ci_builders def _SuiteInTests(suite, tests): """Determines if |suite| is run as part of |tests|. Args: suite: A string containing the suite (as known by Telemetry). tests: A list of dictionaries, each dictionary containing a test definition as found in the //testing/buildbot JSON files. Returns: True if |suite| is run as part of |tests|, else False. """ for t in tests: if t.get('isolate_name') not in GPU_TELEMETRY_ISOLATES: continue if suite in t.get('args', []): return True return False def GetTryBuilders(ci_builders): """Gets the set of try builders to query. A try builder is of interest if it mirrors a builder in |ci_builders|. Args: ci_builders: An iterable of strings, each element being the name of a Chromium CI builder that results will be/were queried from. Returns: A set of strings, each element being the name of a Chromium try builder to query results from. """ logging.info('Getting try builders') mirrored_builders = set() no_output_builders = set() pool = multiprocessing_utils.GetProcessPool() results = pool.map(_GetMirroredBuildersForCiBuilder, ci_builders) for (builders, found_mirror) in results: if found_mirror: mirrored_builders |= builders else: no_output_builders |= builders if no_output_builders: raise RuntimeError( 'Did not get Buildbucket output for the following builders. They may ' 'need to be added to the FAKE_TRY_BUILDERS or NON_CHROMIUM_BUILDERS ' 'mappings.\n%s' % '\n'.join(no_output_builders)) logging.debug('Got %d try builders: %s', len(mirrored_builders), mirrored_builders) return mirrored_builders def _GetMirroredBuildersForCiBuilder(ci_builder): """Gets the set of try builders that mirror a CI builder. Args: ci_builder: A string containing the name of a Chromium CI builder. Returns: A tuple (builders, found_mirror). |builders| is a set of strings, either the set of try builders that mirror |ci_builder| or |ci_builder|, depending on the value of |found_mirror|. |found_mirror| is True if mirrors were actually found, in which case |builders| contains the try builders. Otherwise, |found_mirror| is False and |builders| contains |ci_builder|. """ mirrored_builders = set() if ci_builder in NON_CHROMIUM_BUILDERS: logging.debug('%s is a non-Chromium CI builder', ci_builder) return mirrored_builders, True if ci_builder in FAKE_CI_BUILDERS: mirrored_builders.add(FAKE_CI_BUILDERS[ci_builder]) logging.debug('%s is a fake CI builder mirrored by %s', ci_builder, FAKE_CI_BUILDERS[ci_builder]) return mirrored_builders, True bb_output = _GetBuildbucketOutputForCiBuilder(ci_builder) if not bb_output: mirrored_builders.add(ci_builder) logging.debug('Did not get Buildbucket output for builder %s', ci_builder) return mirrored_builders, False bb_json = json.loads(bb_output) mirrored = bb_json.get('output', {}).get('properties', {}).get('mirrored_builders', []) # The mirror names from Buildbucket include the group separated by :, e.g. # tryserver.chromium.android:gpu-fyi-try-android-m-nexus-5x-64, so only grab # the builder name. for mirror in mirrored: split = mirror.split(':') assert len(split) == 2 logging.debug('Got mirrored builder for %s: %s', ci_builder, split[1]) mirrored_builders.add(split[1]) return mirrored_builders, True def _GetBuildbucketOutputForCiBuilder(ci_builder): # Ensure the user is logged in to bb. if not _GetBuildbucketOutputForCiBuilder.authenticated: try: with open(os.devnull, 'w') as devnull: subprocess.check_call(['bb', 'auth-info'], stdout=devnull, stderr=devnull) except: raise RuntimeError('You are not logged into bb - run `bb auth-login`.') _GetBuildbucketOutputForCiBuilder.authenticated = True # Split out for ease of testing. # Get the Buildbucket ID for the most recent completed build for a builder. p = subprocess.Popen([ 'bb', 'ls', '-id', '-1', '-status', 'ended', 'chromium/ci/%s' % ci_builder, ], stdout=subprocess.PIPE) # Use the ID to get the most recent build. bb_output = subprocess.check_output([ 'bb', 'get', '-A', '-json', ], stdin=p.stdout) return bb_output _GetBuildbucketOutputForCiBuilder.authenticated = False
34.263374
80
0.684602
e54fc615874b9d4bbc71175fb9aba5ff41d2c7f1
6,589
py
Python
second/pytorch/builder/second_builder.py
jerry99s/second.pytorch
80143908a349b9f3ff1642d21dacaf23455b3cf8
[ "MIT" ]
1,541
2018-10-04T00:32:01.000Z
2022-03-30T17:54:59.000Z
second/pytorch/builder/second_builder.py
jerry99s/second.pytorch
80143908a349b9f3ff1642d21dacaf23455b3cf8
[ "MIT" ]
466
2018-10-06T01:05:28.000Z
2022-03-31T08:49:44.000Z
second/pytorch/builder/second_builder.py
jerry99s/second.pytorch
80143908a349b9f3ff1642d21dacaf23455b3cf8
[ "MIT" ]
537
2018-10-04T07:36:13.000Z
2022-03-27T10:12:02.000Z
# Copyright 2017 yanyan. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """VoxelNet builder. """ from second.protos import second_pb2 from second.pytorch.builder import losses_builder from second.pytorch.models.voxelnet import LossNormType, get_voxelnet_class def build(model_cfg: second_pb2.VoxelNet, voxel_generator, target_assigner, measure_time=False): """build second pytorch instance. """ if not isinstance(model_cfg, second_pb2.VoxelNet): raise ValueError('model_cfg not of type ' 'second_pb2.VoxelNet.') vfe_num_filters = list(model_cfg.voxel_feature_extractor.num_filters) vfe_with_distance = model_cfg.voxel_feature_extractor.with_distance grid_size = voxel_generator.grid_size dense_shape = [1] + grid_size[::-1].tolist() + [vfe_num_filters[-1]] classes_cfg = model_cfg.target_assigner.class_settings num_class = len(classes_cfg) use_mcnms = [c.use_multi_class_nms for c in classes_cfg] use_rotate_nms = [c.use_rotate_nms for c in classes_cfg] if len(model_cfg.target_assigner.nms_pre_max_sizes) != 0: nms_pre_max_sizes = list(model_cfg.target_assigner.nms_pre_max_sizes) assert len(nms_pre_max_sizes) == num_class else: nms_pre_max_sizes = [c.nms_pre_max_size for c in classes_cfg] if len(model_cfg.target_assigner.nms_post_max_sizes) != 0: nms_post_max_sizes = list(model_cfg.target_assigner.nms_post_max_sizes) assert len(nms_post_max_sizes) == num_class else: nms_post_max_sizes = [c.nms_post_max_size for c in classes_cfg] if len(model_cfg.target_assigner.nms_score_thresholds) != 0: nms_score_thresholds = list(model_cfg.target_assigner.nms_score_thresholds) assert len(nms_score_thresholds) == num_class else: nms_score_thresholds = [c.nms_score_threshold for c in classes_cfg] if len(model_cfg.target_assigner.nms_iou_thresholds) != 0: nms_iou_thresholds = list(model_cfg.target_assigner.nms_iou_thresholds) assert len(nms_iou_thresholds) == num_class else: nms_iou_thresholds = [c.nms_iou_threshold for c in classes_cfg] assert all(use_mcnms) or all([not b for b in use_mcnms]), "not implemented" assert all(use_rotate_nms) or all([not b for b in use_rotate_nms]), "not implemented" if all([not b for b in use_mcnms]): assert all([e == nms_pre_max_sizes[0] for e in nms_pre_max_sizes]) assert all([e == nms_post_max_sizes[0] for e in nms_post_max_sizes]) assert all([e == nms_score_thresholds[0] for e in nms_score_thresholds]) assert all([e == nms_iou_thresholds[0] for e in nms_iou_thresholds]) num_input_features = model_cfg.num_point_features loss_norm_type_dict = { 0: LossNormType.NormByNumExamples, 1: LossNormType.NormByNumPositives, 2: LossNormType.NormByNumPosNeg, 3: LossNormType.DontNorm, } loss_norm_type = loss_norm_type_dict[model_cfg.loss_norm_type] losses = losses_builder.build(model_cfg.loss) encode_rad_error_by_sin = model_cfg.encode_rad_error_by_sin cls_loss_ftor, loc_loss_ftor, cls_weight, loc_weight, _ = losses pos_cls_weight = model_cfg.pos_class_weight neg_cls_weight = model_cfg.neg_class_weight direction_loss_weight = model_cfg.direction_loss_weight sin_error_factor = model_cfg.sin_error_factor if sin_error_factor == 0: sin_error_factor = 1.0 net = get_voxelnet_class(model_cfg.network_class_name)( dense_shape, num_class=num_class, vfe_class_name=model_cfg.voxel_feature_extractor.module_class_name, vfe_num_filters=vfe_num_filters, middle_class_name=model_cfg.middle_feature_extractor.module_class_name, middle_num_input_features=model_cfg.middle_feature_extractor.num_input_features, middle_num_filters_d1=list( model_cfg.middle_feature_extractor.num_filters_down1), middle_num_filters_d2=list( model_cfg.middle_feature_extractor.num_filters_down2), rpn_class_name=model_cfg.rpn.module_class_name, rpn_num_input_features=model_cfg.rpn.num_input_features, rpn_layer_nums=list(model_cfg.rpn.layer_nums), rpn_layer_strides=list(model_cfg.rpn.layer_strides), rpn_num_filters=list(model_cfg.rpn.num_filters), rpn_upsample_strides=list(model_cfg.rpn.upsample_strides), rpn_num_upsample_filters=list(model_cfg.rpn.num_upsample_filters), use_norm=True, use_rotate_nms=all(use_rotate_nms), multiclass_nms=all(use_mcnms), nms_score_thresholds=nms_score_thresholds, nms_pre_max_sizes=nms_pre_max_sizes, nms_post_max_sizes=nms_post_max_sizes, nms_iou_thresholds=nms_iou_thresholds, use_sigmoid_score=model_cfg.use_sigmoid_score, encode_background_as_zeros=model_cfg.encode_background_as_zeros, use_direction_classifier=model_cfg.use_direction_classifier, num_input_features=num_input_features, num_groups=model_cfg.rpn.num_groups, use_groupnorm=model_cfg.rpn.use_groupnorm, with_distance=vfe_with_distance, cls_loss_weight=cls_weight, loc_loss_weight=loc_weight, pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, direction_loss_weight=direction_loss_weight, loss_norm_type=loss_norm_type, encode_rad_error_by_sin=encode_rad_error_by_sin, loc_loss_ftor=loc_loss_ftor, cls_loss_ftor=cls_loss_ftor, target_assigner=target_assigner, measure_time=measure_time, voxel_generator=voxel_generator, post_center_range=list(model_cfg.post_center_limit_range), dir_offset=model_cfg.direction_offset, sin_error_factor=sin_error_factor, nms_class_agnostic=model_cfg.nms_class_agnostic, num_direction_bins=model_cfg.num_direction_bins, direction_limit_offset=model_cfg.direction_limit_offset, ) return net
49.171642
89
0.741387
d9e35077e422b38f69c6e8dc862306606d98d2f0
938
py
Python
imma/image_manipulation.py
mjirik/imma
c666e8280028a672a0816cadd4d0ca9fe57489c5
[ "MIT" ]
null
null
null
imma/image_manipulation.py
mjirik/imma
c666e8280028a672a0816cadd4d0ca9fe57489c5
[ "MIT" ]
null
null
null
imma/image_manipulation.py
mjirik/imma
c666e8280028a672a0816cadd4d0ca9fe57489c5
[ "MIT" ]
1
2019-05-20T05:46:02.000Z
2019-05-20T05:46:02.000Z
#! /usr/bin/python # -*- coding: utf-8 -*- from loguru import logger import os.path import sys import numpy as np import scipy import scipy.ndimage from . import dili from .image import ( as_seeds_inds, fit_to_shape, combinecrinfo, crop, fix_crinfo, extend_crinfo, manualcrop, resize_to_shape, random_rotate_paramteres, resize_to_mm, rotate, uncrop, ) from .labeled import ( select_labels, squeeze_labels, select_objects_by_seeds, crinfo_from_specific_data, distance_segmentation, get_one_biggest_object, max_area_index, ) from .segmentation_labels import ( get_nlabel, add_missing_labels, add_slab_label_carefully, get_nlabels, update_slab, ) from .sparse import isSparseMatrix, SparseMatrix # path_to_script = os.path.dirname(os.path.abspath(__file__)) # sys.path.append(os.path.join(path_to_script, "../extern/sed3")) # import sed3
18.392157
65
0.719616
ece85262446d899a425ac62a0bb1d7a8ff754a50
3,587
py
Python
deploy/utils/logger.py
TxT1212/PaddleClas
5a24c8700f738f036bf27f80ca12dbe8471a11b0
[ "Apache-2.0" ]
3,763
2020-04-10T04:48:11.000Z
2022-03-31T13:24:37.000Z
deploy/utils/logger.py
TxT1212/PaddleClas
5a24c8700f738f036bf27f80ca12dbe8471a11b0
[ "Apache-2.0" ]
633
2020-04-08T18:27:31.000Z
2022-03-31T01:09:43.000Z
deploy/utils/logger.py
TxT1212/PaddleClas
5a24c8700f738f036bf27f80ca12dbe8471a11b0
[ "Apache-2.0" ]
846
2020-04-08T08:13:18.000Z
2022-03-31T12:28:37.000Z
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import datetime logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") def time_zone(sec, fmt): real_time = datetime.datetime.now() return real_time.timetuple() logging.Formatter.converter = time_zone _logger = logging.getLogger(__name__) Color = { 'RED': '\033[31m', 'HEADER': '\033[35m', # deep purple 'PURPLE': '\033[95m', # purple 'OKBLUE': '\033[94m', 'OKGREEN': '\033[92m', 'WARNING': '\033[93m', 'FAIL': '\033[91m', 'ENDC': '\033[0m' } def coloring(message, color="OKGREEN"): assert color in Color.keys() if os.environ.get('PADDLECLAS_COLORING', False): return Color[color] + str(message) + Color["ENDC"] else: return message def anti_fleet(log): """ logs will print multi-times when calling Fleet API. Only display single log and ignore the others. """ def wrapper(fmt, *args): if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0: log(fmt, *args) return wrapper @anti_fleet def info(fmt, *args): _logger.info(fmt, *args) @anti_fleet def warning(fmt, *args): _logger.warning(coloring(fmt, "RED"), *args) @anti_fleet def error(fmt, *args): _logger.error(coloring(fmt, "FAIL"), *args) def scaler(name, value, step, writer): """ This function will draw a scalar curve generated by the visualdl. Usage: Install visualdl: pip3 install visualdl==2.0.0b4 and then: visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 to preview loss corve in real time. """ writer.add_scalar(tag=name, step=step, value=value) def advertise(): """ Show the advertising message like the following: =========================================================== == PaddleClas is powered by PaddlePaddle ! == =========================================================== == == == For more info please go to the following website. == == == == https://github.com/PaddlePaddle/PaddleClas == =========================================================== """ copyright = "PaddleClas is powered by PaddlePaddle !" ad = "For more info please go to the following website." website = "https://github.com/PaddlePaddle/PaddleClas" AD_LEN = 6 + len(max([copyright, ad, website], key=len)) info( coloring("\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n".format( "=" * (AD_LEN + 4), "=={}==".format(copyright.center(AD_LEN)), "=" * (AD_LEN + 4), "=={}==".format(' ' * AD_LEN), "=={}==".format(ad.center(AD_LEN)), "=={}==".format(' ' * AD_LEN), "=={}==".format(website.center(AD_LEN)), "=" * (AD_LEN + 4), ), "RED"))
29.644628
74
0.560636
a43b6204278e3e416b873ebef1c838f188779aa4
8,183
py
Python
numba/tests/test_conversion.py
gdementen/numba
78486e86ff9fbd343cac3dadbc63ec3bc66c75aa
[ "BSD-2-Clause" ]
null
null
null
numba/tests/test_conversion.py
gdementen/numba
78486e86ff9fbd343cac3dadbc63ec3bc66c75aa
[ "BSD-2-Clause" ]
null
null
null
numba/tests/test_conversion.py
gdementen/numba
78486e86ff9fbd343cac3dadbc63ec3bc66c75aa
[ "BSD-2-Clause" ]
null
null
null
from __future__ import print_function import array import gc import itertools import sys import numpy as np import numba.unittest_support as unittest from numba.compiler import compile_isolated, Flags from numba import types, jit, numpy_support def identity(x): return x def addition(x, y): return x + y def equality(x, y): return x == y def foobar(x, y, z): return x class TestConversion(unittest.TestCase): """ Testing Python to Native conversion """ def test_complex_identity(self): pyfunc = identity cres = compile_isolated(pyfunc, [types.complex64], return_type=types.complex64) xs = [1.0j, (1+1j), (-1-1j), (1+0j)] for x in xs: self.assertEqual(cres.entry_point(x), x) for x in np.complex64(xs): self.assertEqual(cres.entry_point(x), x) cres = compile_isolated(pyfunc, [types.complex128], return_type=types.complex128) xs = [1.0j, (1+1j), (-1-1j), (1+0j)] for x in xs: self.assertEqual(cres.entry_point(x), x) for x in np.complex128(xs): self.assertEqual(cres.entry_point(x), x) def test_complex_addition(self): pyfunc = addition cres = compile_isolated(pyfunc, [types.complex64, types.complex64], return_type=types.complex64) xs = [1.0j, (1+1j), (-1-1j), (1+0j)] for x in xs: y = x self.assertEqual(cres.entry_point(x, y), x + y) for x in np.complex64(xs): y = x self.assertEqual(cres.entry_point(x, y), x + y) cres = compile_isolated(pyfunc, [types.complex128, types.complex128], return_type=types.complex128) xs = [1.0j, (1+1j), (-1-1j), (1+0j)] for x in xs: y = x self.assertEqual(cres.entry_point(x, y), x + y) for x in np.complex128(xs): y = x self.assertEqual(cres.entry_point(x, y), x + y) def test_boolean_as_int(self): pyfunc = equality cres = compile_isolated(pyfunc, [types.boolean, types.intp]) cfunc = cres.entry_point xs = True, False ys = -1, 0, 1 for xs, ys in itertools.product(xs, ys): self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) def test_boolean_as_float(self): pyfunc = equality cres = compile_isolated(pyfunc, [types.boolean, types.float64]) cfunc = cres.entry_point xs = True, False ys = -1, 0, 1 for xs, ys in itertools.product(xs, ys): self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) def test_boolean_eq_boolean(self): pyfunc = equality cres = compile_isolated(pyfunc, [types.boolean, types.boolean]) cfunc = cres.entry_point xs = True, False ys = True, False for xs, ys in itertools.product(xs, ys): self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) # test when a function parameters are jitted as unsigned types # the function is called with negative parameters the Python error # that it generates is correctly handled -- a Python error is returned to the user # For more info, see the comment in Include/longobject.h for _PyArray_AsByteArray # which PyLong_AsUnsignedLongLong calls def test_negative_to_unsigned(self): def f(x): return x # TypeError is for 2.6 if sys.version_info >= (2, 7): with self.assertRaises(OverflowError): jit('uintp(uintp)', nopython=True)(f)(-5) else: with self.assertRaises(TypeError): jit('uintp(uintp)', nopython=True)(f)(-5) # test the switch logic in callwraper.py:build_wrapper() works for more than one argument # and where the error occurs def test_multiple_args_negative_to_unsigned(self): pyfunc = foobar cres = compile_isolated(pyfunc, [types.uint64, types.uint64, types.uint64], return_type=types.uint64) cfunc = cres.entry_point test_fail_args = ((-1, 0, 1), (0, -1, 1), (0, 1, -1)) # TypeError is for 2.6 if sys.version_info >= (2, 7): with self.assertRaises(OverflowError): for a, b, c in test_fail_args: cfunc(a, b, c) else: with self.assertRaises(TypeError): for a, b, c in test_fail_args: cfunc(a, b, c) # test switch logic of callwraper.py:build_wrapper() with records as function parameters def test_multiple_args_records(self): pyfunc = foobar mystruct_dt = np.dtype([('p', np.float64), ('row', np.float64), ('col', np.float64)]) mystruct = numpy_support.from_dtype(mystruct_dt) cres = compile_isolated(pyfunc, [mystruct[:], types.uint64, types.uint64], return_type=mystruct[:]) cfunc = cres.entry_point st1 = np.recarray(3, dtype=mystruct_dt) st1.p = np.arange(st1.size) + 1 st1.row = np.arange(st1.size) + 1 st1.col = np.arange(st1.size) + 1 old_refcnt_st1 = sys.getrefcount(st1) test_fail_args = ((st1, -1, 1), (st1, 1, -1)) # TypeError is for 2.6 exc_type = OverflowError if sys.version_info >= (2, 7) else TypeError for a, b, c in test_fail_args: with self.assertRaises(exc_type): cfunc(a, b, c) del test_fail_args, a, b, c gc.collect() self.assertEqual(sys.getrefcount(st1), old_refcnt_st1) # test switch logic of callwraper.py:build_wrapper() with no function parameters def test_with_no_parameters(self): def f(): pass self.assertEqual(f(), jit('()', nopython=True)(f)()) def check_argument_cleanup(self, typ, obj): """ Check that argument cleanup doesn't leak references. """ def f(x, y): pass # The exception raised when passing a negative number # to PyLong_AsUnsignedLongLong exc_type = OverflowError if sys.version_info >= (2, 7) else TypeError def _refcounts(obj): refs = [sys.getrefcount(obj)] if isinstance(obj, tuple): refs += [_refcounts(v) for v in obj] return refs cres = compile_isolated(f, (typ, types.uint32)) old_refcnt = _refcounts(obj) cres.entry_point(obj, 1) self.assertEqual(_refcounts(obj), old_refcnt) with self.assertRaises(exc_type): cres.entry_point(obj, -1) self.assertEqual(_refcounts(obj), old_refcnt) cres = compile_isolated(f, (types.uint32, typ)) old_refcnt = _refcounts(obj) cres.entry_point(1, obj) self.assertEqual(_refcounts(obj), old_refcnt) with self.assertRaises(exc_type): cres.entry_point(-1, obj) self.assertEqual(_refcounts(obj), old_refcnt) @unittest.skipUnless(sys.version_info >= (2, 7), "test uses memoryview") def test_cleanup_buffer(self): mem = memoryview(bytearray(b"xyz")) self.check_argument_cleanup(types.Buffer(types.intc, 1, 'C'), mem) def test_cleanup_record(self): dtype = np.dtype([('x', np.float64), ('y', np.float64)]) recarr = np.zeros(1, dtype=dtype) self.check_argument_cleanup(numpy_support.from_dtype(dtype), recarr[0]) @unittest.skipUnless(sys.version_info >= (2, 7), "test uses memoryview") def test_cleanup_tuple(self): mem = memoryview(bytearray(b"xyz")) tp = types.UniTuple(types.Buffer(types.intc, 1, 'C'), 2) self.check_argument_cleanup(tp, (mem, mem)) @unittest.skipUnless(sys.version_info >= (2, 7), "test uses memoryview") def test_cleanup_optional(self): mem = memoryview(bytearray(b"xyz")) tp = types.Optional(types.Buffer(types.intc, 1, 'C')) self.check_argument_cleanup(tp, mem) if __name__ == '__main__': unittest.main()
33.954357
93
0.590492
7009551f64bdb8f5e39a86ef30079de3dbcc6eb7
32,912
py
Python
spikeinterface/core/base.py
vncntprvst/spikeinterface
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
[ "MIT" ]
null
null
null
spikeinterface/core/base.py
vncntprvst/spikeinterface
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
[ "MIT" ]
null
null
null
spikeinterface/core/base.py
vncntprvst/spikeinterface
dd5ae94f85fe5d9082b45321d2c96ba316eb4b77
[ "MIT" ]
null
null
null
from pathlib import Path import importlib from copy import deepcopy import weakref import json import pickle import os import random import string from packaging.version import parse import numpy as np from .default_folders import get_global_tmp_folder, is_set_global_tmp_folder from .core_tools import check_json, is_dict_extractor, recursive_path_modifier from .job_tools import _shared_job_kwargs_doc class BaseExtractor: """ Base class for Recording/Sorting Handle serialization save/load to/from a folder. """ # This replaces the old key_properties # These are annotations/properties/features that always need to be # dumped (for instance locations, groups, is_fileterd, etc.) _main_annotations = [] _main_properties = [] _main_features = [] def __init__(self, main_ids): # store init kwargs for nested serialisation self._kwargs = {} # 'main_ids' will either be channel_ids or units_ids # They is used for properties and features self._main_ids = np.array(main_ids) # dict at object level self._annotations = {} # properties is a dict of arrays # array length is : # * number of channel for recording # * number of units for sorting self._properties = {} # features is a dict of arrays (at spike level) self._features = {} self.is_dumpable = True # Extractor specific list of pip extra requirements self.extra_requirements = [] def get_num_segments(self): # This is implemented in BaseRecording or BaseSorting raise NotImplementedError def _check_segment_index(self, segment_index=None): if segment_index is None: if self.get_num_segments() == 1: return 0 else: raise ValueError("Multi-segment object. Provide 'segment_index'") else: return segment_index def ids_to_indices(self, ids, prefer_slice=False): """ Transform a ids list (aka channel_ids or unit_ids) into a indices array. Useful to manipulate: * data * properties * features 'prefer_slice' is an efficient option that tries to make a slice object when indices are consecutive. """ if ids is None: if prefer_slice: indices = slice(None) else: indices = self._main_ids else: _main_ids = self._main_ids.tolist() indices = np.array([_main_ids.index(id) for id in ids], dtype=int) if prefer_slice: if np.all(np.diff(indices) == 1): indices = slice(indices[0], indices[-1] + 1) return indices def id_to_index(self, id): ind = list(self._main_ids).index(id) return ind def annotate(self, **new_annotations): self._annotations.update(new_annotations) def set_annotation(self, annotation_key, value, overwrite=False): """This function adds an entry to the annotations dictionary. Parameters ---------- annotation_key: str An annotation stored by the Extractor value: The data associated with the given property name. Could be many formats as specified by the user overwrite: bool If True and the annotation already exists, it is overwritten """ if annotation_key not in self._annotations.keys(): self._annotations[annotation_key] = value else: if overwrite: self._annotations[annotation_key] = value else: raise ValueError(f"{annotation_key} is already an annotation key. Use 'overwrite=True' to overwrite it") def get_annotation(self, key, copy=True): """ Get a annotation. Return a copy by default """ v = self._annotations.get(key, None) if copy: v = deepcopy(v) return v def get_annotation_keys(self): return list(self._annotations.keys()) def set_property(self, key, values, ids=None, missing_value=None): """ Set property vector for main ids. If ids is given AND property already exists, then it is modified only on a subset of channels/units. missing_values allows to specify the values of unset properties if ids is used Parameters ---------- key : str The property name values : np.array Array of values for the property ids : list/np.array, optional List of subset of ids to set the values, by default None missing_value : object, optional In case the property is set on a subset of values ('ids' not None), it specifies the how the missing values should be filled, by default None. The missing_value has to be specified for types int and unsigned int. """ default_missing_values = {"f": np.nan, "S": "", "U": ""} if values is None: if key in self._properties: self._properties.pop(key) return size = self._main_ids.size values = np.asarray(values) dtype = values.dtype dtype_kind = dtype.kind if ids is None: assert values.shape[0] == size self._properties[key] = values else: ids = np.array(ids) assert np.unique(ids).size == ids.size, "'ids' are not unique!" if ids.size < size: if key not in self._properties: # create the property with nan or empty shape = (size,) + values.shape[1:] if missing_value is None: if dtype_kind not in default_missing_values.keys(): raise Exception("For values dtypes other than float, string or unicode, the missing value " "cannot be automatically inferred. Please specify it with the 'missing_value' " "argument.") else: missing_value = default_missing_values[dtype_kind] else: assert dtype_kind == np.array(missing_value).dtype.kind, ("Mismatch between values and " "missing_value types. Provide a " "missing_value with the same type " "as the values.") empty_values = np.zeros(shape, dtype=dtype) empty_values[:] = missing_value self._properties[key] = empty_values if ids.size==0: return else: assert dtype_kind == self._properties[key].dtype.kind, ("Mismatch between existing property dtype " "values dtype.") indices = self.ids_to_indices(ids) self._properties[key][indices] = values else: indices = self.ids_to_indices(ids) self._properties[key] = np.zeros_like(values, dtype=values.dtype) self._properties[key][indices] = values def get_property(self, key, ids=None): values = self._properties.get(key, None) if ids is not None and values is not None: inds = self.ids_to_indices(ids) values = values[inds] return values def get_property_keys(self): return list(self._properties.keys()) def copy_metadata(self, other, only_main=False, ids=None): """ Copy annotations/properties/features to another extractor. If 'only main' is True, then only "main" annotations/properties/features one are copied. """ if ids is None: inds = slice(None) elif len(ids) == 0: inds = slice(0, 0) else: inds = self.ids_to_indices(ids) if only_main: ann_keys = BaseExtractor._main_annotations prop_keys = BaseExtractor._main_properties # feat_keys = BaseExtractor._main_features else: ann_keys = self._annotations.keys() prop_keys = self._properties.keys() # TODO include features # feat_keys = ExtractorBase._features.keys() other._annotations = deepcopy({k: self._annotations[k] for k in ann_keys}) for k in prop_keys: values = self._properties[k] if values is not None: other.set_property(k, values[inds]) # TODO: copy features also other.extra_requirements.extend(self.extra_requirements) def to_dict(self, include_annotations=False, include_properties=False, include_features=False, relative_to=None, folder_metadata=None): """ Make a nested serialized dictionary out of the extractor. The dictionary be used to re-initialize an extractor with load_extractor_from_dict(dump_dict) Parameters ---------- include_annotations: bool If True, all annotations are added to the dict include_properties: bool If True, all properties are added to the dict include_features: bool If True, all features are added to the dict relative_to: str, Path, or None If not None, file_paths are serialized relative to this path Returns ------- dump_dict: dict Serialized dictionary """ class_name = str(type(self)).replace("<class '", "").replace("'>", '') module = class_name.split('.')[0] imported_module = importlib.import_module(module) try: version = imported_module.__version__ except AttributeError: version = 'unknown' dump_dict = { 'class': class_name, 'module': module, 'kwargs': self._kwargs, 'dumpable': self.is_dumpable, 'version': version, 'relative_paths': (relative_to is not None), } try: dump_dict['version'] = imported_module.__version__ except AttributeError: dump_dict['version'] = 'unknown' if include_annotations: dump_dict['annotations'] = self._annotations else: # include only main annotations dump_dict['annotations'] = {k: self._annotations.get(k, None) for k in self._main_annotations} if include_properties: dump_dict['properties'] = self._properties else: # include only main properties dump_dict['properties'] = {k: self._properties.get(k, None) for k in self._main_properties} if relative_to is not None: relative_to = Path(relative_to).absolute() assert relative_to.is_dir(), "'relative_to' must be an existing directory" dump_dict = _make_paths_relative(dump_dict, relative_to) if folder_metadata is not None: if relative_to is not None: folder_metadata = Path(folder_metadata).absolute().relative_to(relative_to) dump_dict['folder_metadata'] = str(folder_metadata) return dump_dict @staticmethod def from_dict(d, base_folder=None): """ Instantiate extractor from dictionary Parameters ---------- d: dictionary Python dictionary base_folder: str, Path, or None If given, the parent folder of the file and folder paths Returns ------- extractor: RecordingExtractor or SortingExtractor The loaded extractor object """ if d['relative_paths']: assert base_folder is not None, 'When relative_paths=True, need to provide base_folder' d = _make_paths_absolute(d, base_folder) extractor = _load_extractor_from_dict(d) folder_metadata = d.get('folder_metadata', None) if folder_metadata is not None: folder_metadata = Path(folder_metadata) if d['relative_paths']: folder_metadata = base_folder / folder_metadata extractor.load_metadata_from_folder(folder_metadata) return extractor def load_metadata_from_folder(self, folder_metadata): # hack to load probe for recording folder_metadata = Path(folder_metadata) self._extra_metadata_from_folder(folder_metadata) # load properties prop_folder = folder_metadata / 'properties' for prop_file in prop_folder.iterdir(): if prop_file.suffix == '.npy': values = np.load(prop_file, allow_pickle=True) key = prop_file.stem self.set_property(key, values) def save_metadata_to_folder(self, folder_metadata): self._extra_metadata_to_folder(folder_metadata) # save properties prop_folder = folder_metadata / 'properties' prop_folder.mkdir(parents=True, exist_ok=False) for key in self.get_property_keys(): values = self.get_property(key) np.save(prop_folder / (key + '.npy'), values) def clone(self): """ Clones an existing extractor into a new instance. """ d = self.to_dict(include_annotations=True, include_properties=True, include_features=True) clone = BaseExtractor.from_dict(d) return clone def check_if_dumpable(self): return _check_if_dumpable(self.to_dict()) @staticmethod def _get_file_path(file_path, extensions): """ Helper function to be used by various dump_to_file utilities. Returns default file_path (if not specified), makes sure that target directory exists, adds correct file extension if none, and checks that the provided file extension is allowed. Parameters ---------- file_path: str or None extensions: list or tuple List of possible extensions. The first one provided is used as an extension for the default file_path. Returns ------- Path Path object with file path to the file Raises ------ NotDumpableExtractorError """ ext = extensions[0] file_path = Path(file_path) file_path.parent.mkdir(parents=True, exist_ok=True) folder_path = file_path.parent if Path(file_path).suffix == '': file_path = folder_path / (str(file_path) + ext) assert file_path.suffix in extensions, \ "'file_path' should have one of the following extensions:" \ " %s" % (', '.join(extensions)) return file_path def dump(self, file_path, relative_to=None, folder_metadata=None): """ Dumps extractor to json or pickle Parameters ---------- file_path: str or Path The output file (either .json or .pkl/.pickle) relative_to: str, Path, or None If not None, file_paths are serialized relative to this path """ if str(file_path).endswith('.json'): self.dump_to_json(file_path, relative_to=relative_to, folder_metadata=folder_metadata) elif str(file_path).endswith('.pkl') or str(file_path).endswith('.pickle'): self.dump_to_pickle(file_path, relative_to=relative_to, folder_metadata=folder_metadata) else: raise ValueError('Dump: file must .json or .pkl') def dump_to_json(self, file_path=None, relative_to=None, folder_metadata=None): """ Dump recording extractor to json file. The extractor can be re-loaded with load_extractor_from_json(json_file) Parameters ---------- file_path: str Path of the json file relative_to: str, Path, or None If not None, file_paths are serialized relative to this path """ assert self.check_if_dumpable() dump_dict = self.to_dict(include_annotations=True, include_properties=False, include_features=False, relative_to=relative_to, folder_metadata=folder_metadata) file_path = self._get_file_path(file_path, ['.json']) file_path.write_text( json.dumps(check_json(dump_dict), indent=4), encoding='utf8' ) def dump_to_pickle(self, file_path=None, include_properties=True, include_features=True, relative_to=None, folder_metadata=None): """ Dump recording extractor to a pickle file. The extractor can be re-loaded with load_extractor_from_json(json_file) Parameters ---------- file_path: str Path of the json file include_properties: bool If True, all properties are dumped include_features: bool If True, all features are dumped relative_to: str, Path, or None If not None, file_paths are serialized relative to this path """ assert self.check_if_dumpable() dump_dict = self.to_dict(include_annotations=True, include_properties=False, include_features=False, relative_to=relative_to, folder_metadata=folder_metadata) file_path = self._get_file_path(file_path, ['.pkl', '.pickle']) file_path.write_bytes(pickle.dumps(dump_dict)) @staticmethod def load(file_path, base_folder=None): """ Load extractor from file path (.json or .pkl) Used both after: * dump(...) json or pickle file * save (...) a folder which contain data + json (or pickle) + metadata. """ file_path = Path(file_path) if file_path.is_file(): # standard case based on a file (json or pickle) if str(file_path).endswith('.json'): with open(str(file_path), 'r') as f: d = json.load(f) elif str(file_path).endswith('.pkl') or str(file_path).endswith('.pickle'): with open(str(file_path), 'rb') as f: d = pickle.load(f) else: raise ValueError(f'Impossible to load {file_path}') if 'warning' in d and 'not dumpable' in d['warning']: print('The extractor was not dumpable') return None extractor = BaseExtractor.from_dict(d, base_folder=base_folder) return extractor elif file_path.is_dir(): # case from a folder after a calling extractor.save(...) folder = file_path file = None for dump_ext in ('json', 'pkl', 'pickle'): f = folder / f'cached.{dump_ext}' if f.is_file(): file = f if file is None: raise ValueError(f'This folder is not a cached folder {file_path}') extractor = BaseExtractor.load(file, base_folder=folder) return extractor else: raise ValueError('spikeinterface.Base.load() file_path must be an existing folder or file') @staticmethod def load_from_folder(folder): return BaseExtractor.load(folder) def _save(self, folder, **save_kwargs): # This implemented in BaseRecording or baseSorting # this is internally call by cache(...) main function raise NotImplementedError def _extra_metadata_from_folder(self, folder): # This implemented in BaseRecording for probe pass def _extra_metadata_to_folder(self, folder): # This implemented in BaseRecording for probe pass def save(self, **kwargs): """ Save a SpikeInterface object. Parameters ---------- kwargs: Keyword arguments for saving. * format: "memory", "zarr", or "binary" (for recording) / "memory" or "npz" for sorting. In case format is not memory, the recording is saved to a folder. See format specific functions for more info (`save_to_memory()`, `save_to_folder()`, `save_to_zarr()`) * folder: if provided, the folder path where the object is saved * name: if provided and folder is not given, the name of the folder in the global temporary folder (use set_global_tmp_folder() to change this folder) where the object is saved. If folder and name are not given, the object is saved in the global temporary folder with a random string * dump_ext: 'json' or 'pkl', default 'json' (if format is "folder") * verbose: if True output is verbose * **save_kwargs: additional kwargs format-dependent and job kwargs for recording {} Returns ------- loaded_extractor: BaseRecording or BaseSorting The reference to the saved object after it is loaded back """ format = kwargs.get('format', None) if format == 'memory': loaded_extractor = self.save_to_memory(**kwargs) elif format == 'zarr': loaded_extractor = self.save_to_zarr(**kwargs) else: loaded_extractor = self.save_to_folder(**kwargs) return loaded_extractor save.__doc__ = save.__doc__.format(_shared_job_kwargs_doc) def save_to_memory(self, **kwargs): # used only by recording at the moment cached = self._save(**kwargs) self.copy_metadata(cached) return cached # TODO rename to saveto_binary_folder def save_to_folder(self, name=None, folder=None, dump_ext='json', verbose=True, **save_kwargs): """ Save extractor to folder. The save consist of: * extracting traces by calling get_trace() method in chunks * saving data into file (memmap with BinaryRecordingExtractor) * dumping to json/pickle the original extractor for provenance * dumping to json/pickle the cached extractor (memmap with BinaryRecordingExtractor) This replaces the use of the old CacheRecordingExtractor and CacheSortingExtractor. There are 2 option for the 'folder' argument: * explicit folder: `extractor.save(folder="/path-for-saving/")` * explicit sub-folder, implicit base-folder : `extractor.save(name="extarctor_name")` * generated: `extractor.save()` The second option saves to subfolder "extarctor_name" in "get_global_tmp_folder()". You can set the global tmp folder with: "set_global_tmp_folder("path-to-global-folder")" The folder must not exist. If it exists, remove it before. Parameters ---------- name: None str or Path Name of the subfolder in get_global_tmp_folder() If 'name' is given, 'folder' must be None. folder: None str or Path Name of the folder. If 'folder' is given, 'name' must be None. Returns ------- cached: saved copy of the extractor. """ if folder is None: cache_folder = get_global_tmp_folder() if name is None: name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8)) folder = cache_folder / name if verbose: print(f'Use cache_folder={folder}') else: folder = cache_folder / name if not is_set_global_tmp_folder(): if verbose: print(f'Use cache_folder={folder}') else: folder = Path(folder) assert not folder.exists(), f'folder {folder} already exists, choose another name' folder.mkdir(parents=True, exist_ok=False) # dump provenance provenance_file = folder / f'provenance.{dump_ext}' if self.check_if_dumpable(): self.dump(provenance_file) else: provenance_file.write_text( json.dumps({'warning': 'the provenace is not dumpable!!!'}), encoding='utf8' ) # save data (done the subclass) cached = self._save(folder=folder, verbose=verbose, **save_kwargs) self.save_metadata_to_folder(folder) # copy properties/ self.copy_metadata(cached) # dump cached.dump(folder / f'cached.{dump_ext}', relative_to=folder, folder_metadata=folder) return cached def save_to_zarr(self, name=None, zarr_path=None, storage_options=None, channel_chunk_size=None, verbose=True, **save_kwargs): """ Save extractor to zarr. The save consist of: * extracting traces by calling get_trace() method in chunks * saving data into a zarr file * dumping the original extractor for provenance in attributes Parameters ---------- name: str or None Name of the subfolder in get_global_tmp_folder() If 'name' is given, 'folder' must be None. zarr_path: str, Path, or None Name of the zarr folder (.zarr). storage_options: dict or None Storage options for zarr `store`. E.g., if "s3://" or "gcs://" they can provide authentication methods, etc. For cloud storage locations, this should not be None (in case of default values, use an empty dict) channel_chunk_size: int or None Channels per chunk. Default None (chunking in time only) Returns ------- cached: saved copy of the extractor. """ import zarr if zarr_path is None: cache_folder = get_global_tmp_folder() if name is None: name = ''.join(random.choices( string.ascii_uppercase + string.digits, k=8)) zarr_path = cache_folder / f"{name}.zarr" if verbose: print(f'Use zarr_path={zarr_path}') else: zarr_path = cache_folder / f"{name}.zarr" if not is_set_global_tmp_folder(): if verbose: print(f'Use zarr_path={zarr_path}') else: if storage_options is None: if isinstance(zarr_path, str): zarr_path_init = zarr_path zarr_path = Path(zarr_path) else: zarr_path_init = str(zarr_path) else: zarr_path_init = zarr_path if isinstance(zarr_path, Path): assert not zarr_path.exists(), f'Path {zarr_path} already exists, choose another name' zarr_root = zarr.open(zarr_path_init, mode="w", storage_options=storage_options) if self.check_if_dumpable(): zarr_root.attrs["provenance"] = check_json(self.to_dict()) else: zarr_root.attrs["provenance"] = None # save data (done the subclass) save_kwargs['zarr_root'] = zarr_root save_kwargs['zarr_path'] = zarr_path save_kwargs['storage_options'] = storage_options save_kwargs['channel_chunk_size'] = channel_chunk_size cached = self._save(folder=None, verbose=verbose, **save_kwargs) cached_annotations = deepcopy(cached._annotations) # save properties prop_group = zarr_root.create_group('properties') for key in self.get_property_keys(): values = self.get_property(key) prop_group.create_dataset(name=key, data=values, compressor=None) # save annotations zarr_root.attrs["annotations"] = check_json(self._annotations) # copy properties/ self.copy_metadata(cached) # append annotations on compression cached._annotations.update(cached_annotations) return cached def _make_paths_relative(d, relative): relative = str(Path(relative).absolute()) func = lambda p: os.path.relpath(str(p), start=relative) return recursive_path_modifier(d, func, target='path', copy=True) def _make_paths_absolute(d, base): base = Path(base) func = lambda p: str((base / p).resolve().absolute()) return recursive_path_modifier(d, func, target='path', copy=True) def _check_if_dumpable(d): kwargs = d['kwargs'] if np.any([isinstance(v, dict) and 'dumpable' in v.keys() for (k, v) in kwargs.items()]): # check nested for k, v in kwargs.items(): if 'dumpable' in v.keys(): return _check_if_dumpable(v) else: return d['dumpable'] def _load_extractor_from_dict(dic): cls = None class_name = None if 'kwargs' not in dic: raise Exception(f'This dict cannot be load into extractor {dic}') kwargs = deepcopy(dic['kwargs']) # handle nested for k, v in kwargs.items(): if isinstance(v, dict) and is_dict_extractor(v): kwargs[k] = _load_extractor_from_dict(v) # handle list of extractors list for k, v in kwargs.items(): if isinstance(v, list): if all(is_dict_extractor(e) for e in v): kwargs[k] = [_load_extractor_from_dict(e) for e in v] class_name = dic['class'] cls = _get_class_from_string(class_name) assert cls is not None and class_name is not None, "Could not load spikeinterface class" if not _check_same_version(class_name, dic['version']): print('Versions are not the same. This might lead to errors. Use ', class_name.split('.')[0], 'version', dic['version']) # instantiate extrator object extractor = cls(**kwargs) extractor._annotations.update(dic['annotations']) for k, v in dic['properties'].items(): # print(k, v) extractor.set_property(k, v) # TODO features return extractor def _get_class_from_string(class_string): class_name = class_string.split('.')[-1] module = '.'.join(class_string.split('.')[:-1]) imported_module = importlib.import_module(module) try: imported_class = getattr(imported_module, class_name) except: imported_class = None return imported_class def _check_same_version(class_string, version): module = class_string.split('.')[0] imported_module = importlib.import_module(module) current_version = parse(imported_module.__version__) saved_version = parse(version) try: return current_version.major == saved_version.major and current_version.minor == saved_version.minor except AttributeError: return 'unknown' def load_extractor(file_or_folder_or_dict, base_folder=None): """ Instantiate extractor from: * a dict * a json file * a pickle file * folder (after save) Parameters ---------- file_or_folder_or_dict: dictionary or folder or file (json, pickle) Returns ------- extractor: Recording or Sorting The loaded extractor object """ if isinstance(file_or_folder_or_dict, dict): return BaseExtractor.from_dict(file_or_folder_or_dict, base_folder=base_folder) else: return BaseExtractor.load(file_or_folder_or_dict, base_folder=base_folder) def load_extractor_from_dict(d, base_folder=None): print('Use load_extractor(..) instead') return BaseExtractor.from_dict(d, base_folder=base_folder) def load_extractor_from_json(json_file, base_folder=None): print('Use load_extractor(..) instead') return BaseExtractor.load(json_file, base_folder=base_folder) def load_extractor_from_pickle(pkl_file, base_folder=None): print('Use load_extractor(..) instead') return BaseExtractor.load(pkl_file, base_folder=base_folder) class BaseSegment: def __init__(self): self._parent_extractor = None @property def parent_extractor(self): return self._parent_extractor() def set_parent_extractor(self, parent_extractor): self._parent_extractor = weakref.ref(parent_extractor)
36.528302
123
0.597047
d37309a4d1c56038f4144022f85fdc73eaf3d1f6
8,122
py
Python
make_colors/make_colors.py
cumulus13/make_colors
772e77aeaaa329dc2b34085423046957449f9773
[ "BSD-3-Clause" ]
null
null
null
make_colors/make_colors.py
cumulus13/make_colors
772e77aeaaa329dc2b34085423046957449f9773
[ "BSD-3-Clause" ]
null
null
null
make_colors/make_colors.py
cumulus13/make_colors
772e77aeaaa329dc2b34085423046957449f9773
[ "BSD-3-Clause" ]
null
null
null
from __future__ import print_function import os import sys import re MODE = 0 if sys.platform == 'win32': import ctypes kernel32 = ctypes.WinDLL('kernel32') hStdOut = kernel32.GetStdHandle(-11) mode = ctypes.c_ulong() MODE = mode if not mode.value == 7: kernel32.GetConsoleMode(hStdOut, ctypes.byref(mode)) mode.value |= 4 kernel32.SetConsoleMode(hStdOut, mode) class Win10Colors(object): """docstring for Win10Colors""" def __init__(self): super(Win10Colors, self).__init__() @classmethod def supports_color(cls): plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() global MODE if plat == 'win32' and int(MODE.value) == 7: supported_platform = True return supported_platform and is_a_tty def colored(self, string, foreground, background, attrs=[]): '''win10color print out coloring for windows >= 10 Keyword Arguments: foreground {str} -- string fore color (default: {''}) background {str} -- background color (default: {''}) attrs {list} -- attribute support: reset, bold, underline, inverse ''' # attrs_bank = {} # reset = '' # bold = '' # underline = '' # inverse = '' # if attrs: # for i in attrs: # if i == 'reset': # reset = '0m' # elif i == 'bold': # bold = '1m' # elif i == 'underline': # underline = '4m' # elif i == 'inverse': # inverse = '7m' #print("foreground =", foreground) #print("background =", background) fore_color_bank = { 'black': '30m', 'red': '31m', 'green': '32m', 'yellow': '33m', 'blue': '34m', 'magenta': '35m', 'cyan': '36m', 'white': '37m', 'lightblack': '90m', 'lightgrey': '90m', 'lightred': '91m', 'lightgreen': '92m', 'lightyellow': '93m', 'lightblue': '94m', 'lightmagenta': '95m', 'lightcyan': '96m', 'lightwhite': '97m', } back_color_bank = { 'black': '40m', 'red': '41m', 'green': '42m', 'yellow': '43m', 'blue': '44m', 'magenta': '45m', 'cyan': '46m', 'white': '47m', 'on_black': '40m', 'on_red': '41m', 'on_green': '42m', 'on_yellow': '43m', 'on_blue': '44m', 'on_magenta': '45m', 'on_cyan': '46m', 'on_white': '47m', 'lightblack': '100m', 'lightgrey': '100m', 'lightred': '101m', 'lightgreen': '102m', 'lightyellow': '103m', 'lightblue': '104m', 'lightmagenta': '105m', 'lightcyan': '106m', 'lightwhite': '107m', 'on_lightblack': '100m', 'on_lightgrey': '100m', 'on_lightred': '101m', 'on_lightgreen': '102m', 'on_lightyellow': '103m', 'on_lightblue': '104m', 'on_lightmagenta': '105m', 'on_lightcyan': '106m', 'on_lightwhite': '107m', } background = back_color_bank.get(background) foreground = fore_color_bank.get(foreground) if not background: background = '40m' if not foreground: foreground = '37m' return "[%s;%s%s" % (background[:-1], foreground, string) # return "\033[%s;%s%s\033[0m" % (background[:-1], foreground, string) def getSort(data=None, foreground='', background=''): if data: if "-" in data or "_" in data: foreground, background = re.split("-|_", data) # 'black': '40m', # 'red': '41m', # 'green': '42m', # 'yellow': '43m', # 'blue': '44m', # 'magenta': '45m', # 'cyan': '46m', # 'white': '47m', if foreground or background: if foreground == 'b': foreground = 'black' elif foreground == 'bl': foreground = 'blue' elif foreground == 'r': foreground = 'red' elif foreground == 'g': foreground = 'green' elif foreground == 'y': foreground = 'yellow' elif foreground == 'm': foreground = 'magenta' elif foreground == 'c': foreground = 'cyan' elif foreground == 'w': foreground = 'white' elif foreground == 'lb': foreground = 'lightblue' elif foreground == 'lr': foreground = 'lightred' elif foreground == 'lg': foreground = 'lightgreen' elif foreground == 'ly': foreground = 'lightyellow' elif foreground == 'lm': foreground = 'lightmagenta' elif foreground == 'lc': foreground = 'lightcyan' elif foreground == 'lw': foreground = 'lightwhite' else: foreground = 'lightwhite' if background == 'b': background = 'black' elif background == 'bl': background = 'blue' elif background == 'r': background = 'red' elif background == 'g': background = 'green' elif background == 'y': background = 'yellow' elif background == 'm': background = 'magenta' elif background == 'c': background = 'cyan' elif background == 'w': background = 'white' elif background == 'lb': background = 'lightblue' elif background == 'lr': background = 'lightred' elif background == 'lg': background = 'lightgreen' elif background == 'ly': background = 'lightyellow' elif background == 'lm': background = 'lightmagenta' elif background == 'lc': background = 'lightcyan' elif background == 'lw': background = 'lightwhite' else: background = 'black' return foreground, background def make_colors(string, foreground = 'white', background=None, attrs=[]): # if not Win10Colors.supports_color() or os.getenv('MAKE_COLORS') == '0': # return string if "-" in foreground or "_" in foreground: foreground, background = getSort(foreground) elif foreground and len(foreground) < 3: if background and not len(background) < 3: foreground, background_temp = getSort(foreground=foreground, background=background) else: foreground, background = getSort(foreground=foreground, background=background) win10color = Win10Colors() if not win10color.supports_color() or os.getenv('MAKE_COLORS') == '0': return string elif os.getenv('MAKE_COLORS') == '1': return win10color.colored(string, foreground, background, attrs) else: return win10color.colored(string, foreground, background, attrs) if __name__ == '__main__': print(Win10Colors.supports_color()) print(make_colors("This is Red", 'lw', 'lr'))
33.561983
98
0.468727
a901068c8ffe230bc1f953dbfc586c1c842bc1f9
578
py
Python
tutorials/t1_sugared.py
nkoep/python-libjit
6ca42dc44432cd54c61e6066d382b7f5a9d58f38
[ "Apache-2.0" ]
4
2021-06-07T12:16:23.000Z
2021-06-07T15:05:09.000Z
tutorials/t1_sugared.py
nkoep/python-libjit
6ca42dc44432cd54c61e6066d382b7f5a9d58f38
[ "Apache-2.0" ]
null
null
null
tutorials/t1_sugared.py
nkoep/python-libjit
6ca42dc44432cd54c61e6066d382b7f5a9d58f38
[ "Apache-2.0" ]
null
null
null
"""Translation of t1.c with syntactic sugar""" import jit def run(): with jit.Context() as context: # Build the function signature. signature = jit.Type.create_signature( jit.ABI_CDECL, jit.Type.INT, [jit.Type.INT] * 3) # Create the function object. func = jit.Function(context, signature) # Construct the function body. x = func.value_get_param(0) y = func.value_get_param(1) z = func.value_get_param(2) func.insn_return(x * y + z) print "mul_add(3, 5, 2) = %d" % func(3, 5, 2)
25.130435
60
0.595156
668753b56e8f6cef9463c3beb8ebf35625b8aee7
2,500
py
Python
house_code/tutorials_altered/modules/property_reading.py
mukobi/Pozyx-Gabe
a8b444c2013b1df5043cd25106b72562409b5130
[ "MIT" ]
1
2020-06-12T07:21:56.000Z
2020-06-12T07:21:56.000Z
house_code/tutorials_altered/modules/property_reading.py
mukobi/Pozyx-Gabe
a8b444c2013b1df5043cd25106b72562409b5130
[ "MIT" ]
null
null
null
house_code/tutorials_altered/modules/property_reading.py
mukobi/Pozyx-Gabe
a8b444c2013b1df5043cd25106b72562409b5130
[ "MIT" ]
null
null
null
from pypozyx import * MASTER_PROPS_LOCATION = "../../original_programs/PSUPozyx/Configuration/MASTER_ACTIVE_CONFIG.properties" class PropertyReading: @staticmethod def getProperties(): P = dict(line.strip().split('=') for line in open(MASTER_PROPS_LOCATION) if not line.startswith('#') and not line.startswith('\n')) use_remote = P["use_remote"] == "true" remote_id = int(P["remote_id"], 16) anchor_1_id = int(P["anchor_1_id"], 16) anchor_1_x = int(P["anchor_1_x"], 16) anchor_1_y = int(P["anchor_1_y"], 16) anchor_1_z = int(P["anchor_1_z"], 16) anchor_2_id = int(P["anchor_2_id"], 16) anchor_2_x = int(P["anchor_2_x"], 16) anchor_2_y = int(P["anchor_2_y"], 16) anchor_2_z = int(P["anchor_2_z"], 16) anchor_3_id = int(P["anchor_3_id"], 16) anchor_3_x = int(P["anchor_3_x"], 16) anchor_3_y = int(P["anchor_3_y"], 16) anchor_3_z = int(P["anchor_3_z"], 16) anchor_4_id = int(P["anchor_4_id"], 16) anchor_4_x = int(P["anchor_4_x"], 16) anchor_4_y = int(P["anchor_4_y"], 16) anchor_4_z = int(P["anchor_4_z"], 16) attributes_to_log = [] if P["log_pressure"] == "true": attributes_to_log += "pressure" if P["log_acceleration"] == "true": attributes_to_log += "acceleration" if P["log_magnetic"] == "true": attributes_to_log += "magnetic" if P["log_angular_velocity"] == "true": attributes_to_log += "angular velocity" if P["log_euler_angles"] == "true": attributes_to_log += "euler angles" if P["log_quaternion"] == "true": attributes_to_log += "quaternion" if P["log_linear_acceleration"] == "true": attributes_to_log += "linear acceleration" if P["log_gravity"] == "true": attributes_to_log += "gravity" use_file = P["use_file"] == "true" filename = P["filename"] use_processing = P["use_processing"] == "true" anchors = [DeviceCoordinates(anchor_1_id, 1, Coordinates(anchor_1_x, anchor_1_y, anchor_1_z)), DeviceCoordinates(anchor_2_id, 1, Coordinates(anchor_2_x, anchor_2_y, anchor_2_z)), DeviceCoordinates(anchor_3_id, 1, Coordinates(anchor_3_x, anchor_3_y, anchor_3_z)), DeviceCoordinates(anchor_4_id, 1, Coordinates(anchor_4_x, anchor_4_y, anchor_4_z)),] return use_remote, remote_id, anchors, attributes_to_log, use_file, filename, use_processing
50
104
0.6316
c2fc9e9a8ddd6f3a410e1f413b5d3ef7bb63c4d7
6,483
bzl
Python
haskell/private/actions/info.bzl
dasormeter/rules_haskell
631531550468eca22cce0cd4619009c725e3be92
[ "Apache-2.0" ]
null
null
null
haskell/private/actions/info.bzl
dasormeter/rules_haskell
631531550468eca22cce0cd4619009c725e3be92
[ "Apache-2.0" ]
null
null
null
haskell/private/actions/info.bzl
dasormeter/rules_haskell
631531550468eca22cce0cd4619009c725e3be92
[ "Apache-2.0" ]
null
null
null
"""Defines output groups that are consumed by tools such as 'hrepl'.""" load("@bazel_skylib//lib:dicts.bzl", "dicts") load("@bazel_skylib//lib:paths.bzl", "paths") load(":providers.bzl", "all_package_ids") load(":private/cc_libraries.bzl", "get_ghci_extra_libs") load( ":private/path_utils.bzl", "get_lib_name", "is_hs_library", ) def write_proto_file(hs, output_name, proto_type, content): """Write an encoded .proto file. Writes a file with the text format encoding, and then runs "protoc" to convert it to the wire encoding. The wire encoding allows us to use released versions of tools with different versions of the Haskell rules (within reason). Args: hs: The current rule context. output_name: The output filename. The text-encoded file will be named {output_name}.txt, and the encoded file will be named {output_name}.pb. proto_type: The type of the proto (e.g. foo.Bar). It must be defined in rule_info.proto. content: The contents of the text file, as a Bazel struct. Returns: A File containing the encoded proto message, named {file_name}.pb. """ proto_txt = hs.actions.declare_file(output_name + ".txt") proto_pb = hs.actions.declare_file(output_name + ".pb") hs.actions.write(output = proto_txt, content = content.to_proto()) protoc = hs.toolchain.protoc rule_info_protos = hs.toolchain.rule_info_proto[ProtoInfo].direct_sources hs.actions.run_shell( outputs = [proto_pb], inputs = depset([proto_txt] + rule_info_protos), tools = [protoc], command = "{protoc} {rule_info_proto} --encode {proto_type} < {proto_txt} > {proto_pb}" .format( protoc = protoc.path, proto_type = proto_type, proto_txt = proto_txt.path, proto_pb = proto_pb.path, rule_info_proto = " ".join([p.path for p in rule_info_protos]), ), ) return proto_pb def _filter_package_env(flags): # Strips out -package-env from the command-line flags. Consumers of these output # groups will be responsible for setting the right GHC flags themselves, # based on the fields of haskell.LibraryInfo. result = [] for i in flags: if not flags: break if flags[0] == "-package-env": flags = flags[2:] else: result.append(flags[0]) flags = flags[1:] return result def _write_haskell_compile_info( workspace_name, name, hs, c, cc_libs, runfiles): return write_proto_file( output_name = name, hs = hs, proto_type = "haskell.CompileInfo", content = struct( # Calling to_list on c.source_files shouldn't be a performance penalty. # Despite being a depset, it only contains sources for the current rule. source_files = [f.path for f in c.source_files.to_list()], # TODO: currently, this will duplicate the common, target-independent options for # each build target. We should instead move them into GhcConfig.common_options. options = _filter_package_env(c.compile_flags), transitive_cc_libs = [lib.path for lib in cc_libs], # Follows the new runfiles tree organization of: # https://github.com/bazelbuild/bazel/wiki/Updating-the-runfiles-tree-structure runfiles = [ struct( full_path = f.path, short_path = paths.join( f.owner.workspace_name or workspace_name, f.short_path, ), ) for f in runfiles.to_list() ], ), ) def library_info_output_groups( name, hs, hs_info, lib_info): """Output groups for depending on a Haskell target. Args: name: A string; the name of the current target. hs: The Haskell context. hs_info: A HaskellInfo provider. lib_info: A HaskellLibraryInfo provider. Returns: A dict whose keys are output groups and values are depsets of Files. """ proto_file = write_proto_file( hs = hs, output_name = name + ".HaskellLibrary", proto_type = "haskell.LibraryInfo", content = struct( # TODO(google/hrepl#4): currently, we only expose the immediate dependencies. transitive_package_ids = [lib_info.package_id], transitive_package_dbs = [db.dirname for db in hs_info.package_databases.to_list()], ), ) return { "haskell_transitive_deps": depset( transitive = [ hs_info.package_databases, hs_info.interface_dirs, hs_info.dynamic_libraries, ], ), "haskell_library_info": depset([proto_file]), } def compile_info_output_groups( name, workspace_name, hs, c, posix, cc_libraries_info, cc_info, runfiles): """Output groups for compiling a Haskell target. Args: name: A string; the name of the current target. workspace_name: The workspace this target was defined in. Used for organizing its runfiles. hs: The Haskell context. c: A struct with information about the compilation step. posix: The posix toolchain. cc_info: A CcInfo provider. runfiles: A depset of Files. Returns: A dict whose keys are output groups and values are depsets of Files. """ (ghci_extra_libs, ghc_env) = get_ghci_extra_libs(hs, posix, cc_libraries_info, cc_info) cc_libs = [ lib for lib in ghci_extra_libs.to_list() if not is_hs_library(lib) and get_lib_name(lib) != "ffi" ] return { "haskell_cdep_libs": depset(cc_libs), "haskell_runfiles": runfiles, "haskell_source_files": depset(transitive = [c.source_files, c.extra_source_files]), "haskell_compile_info": depset([_write_haskell_compile_info( workspace_name = workspace_name, hs = hs, name = name + ".HaskellCompile", c = c, cc_libs = cc_libs, runfiles = runfiles, )]), }
34.668449
93
0.600339
78220406eb69bd736d760379d5b396bc8735e1e0
116,357
py
Python
tests/test_asyncio/test_commands.py
WisdomPill/redis-py
e5e265de87dfe7ef8cc5cad8c247d067c74fa44d
[ "MIT" ]
null
null
null
tests/test_asyncio/test_commands.py
WisdomPill/redis-py
e5e265de87dfe7ef8cc5cad8c247d067c74fa44d
[ "MIT" ]
null
null
null
tests/test_asyncio/test_commands.py
WisdomPill/redis-py
e5e265de87dfe7ef8cc5cad8c247d067c74fa44d
[ "MIT" ]
null
null
null
""" Tests async overrides of commands from their mixins """ import binascii import datetime import re import time from string import ascii_letters import pytest import redis from redis import exceptions from redis.client import parse_info from tests.conftest import ( skip_if_server_version_gte, skip_if_server_version_lt, skip_unless_arch_bits, ) REDIS_6_VERSION = "5.9.0" pytestmark = [pytest.mark.asyncio, pytest.mark.onlynoncluster] @pytest.fixture() async def slowlog(r: redis.Redis, event_loop): current_config = await r.config_get() old_slower_than_value = current_config["slowlog-log-slower-than"] old_max_legnth_value = current_config["slowlog-max-len"] await r.config_set("slowlog-log-slower-than", 0) await r.config_set("slowlog-max-len", 128) yield await r.config_set("slowlog-log-slower-than", old_slower_than_value) await r.config_set("slowlog-max-len", old_max_legnth_value) async def redis_server_time(client: redis.Redis): seconds, milliseconds = await client.time() timestamp = float(f"{seconds}.{milliseconds}") return datetime.datetime.fromtimestamp(timestamp) async def get_stream_message(client: redis.Redis, stream: str, message_id: str): """Fetch a stream message and format it as a (message_id, fields) pair""" response = await client.xrange(stream, min=message_id, max=message_id) assert len(response) == 1 return response[0] # RESPONSE CALLBACKS class TestResponseCallbacks: """Tests for the response callback system""" async def test_response_callbacks(self, r: redis.Redis): assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS) r.set_response_callback("GET", lambda x: "static") await r.set("a", "foo") assert await r.get("a") == "static" async def test_case_insensitive_command_names(self, r: redis.Redis): assert r.response_callbacks["del"] == r.response_callbacks["DEL"] class TestRedisCommands: async def test_command_on_invalid_key_type(self, r: redis.Redis): await r.lpush("a", "1") with pytest.raises(redis.ResponseError): await r.get("a") # SERVER INFORMATION @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_cat_no_category(self, r: redis.Redis): categories = await r.acl_cat() assert isinstance(categories, list) assert "read" in categories @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_cat_with_category(self, r: redis.Redis): commands = await r.acl_cat("read") assert isinstance(commands, list) assert "get" in commands @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_deluser(self, r: redis.Redis, request, event_loop): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) assert await r.acl_deluser(username) == 0 assert await r.acl_setuser(username, enabled=False, reset=True) assert await r.acl_deluser(username) == 1 @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_genpass(self, r: redis.Redis): password = await r.acl_genpass() assert isinstance(password, str) @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_getuser_setuser(self, r: redis.Redis, request, event_loop): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) # test enabled=False assert await r.acl_setuser(username, enabled=False, reset=True) assert await r.acl_getuser(username) == { "categories": ["-@all"], "commands": [], "channels": [b"*"], "enabled": False, "flags": ["off", "allchannels", "sanitize-payload"], "keys": [], "passwords": [], } # test nopass=True assert await r.acl_setuser(username, enabled=True, reset=True, nopass=True) assert await r.acl_getuser(username) == { "categories": ["-@all"], "commands": [], "channels": [b"*"], "enabled": True, "flags": ["on", "allchannels", "nopass", "sanitize-payload"], "keys": [], "passwords": [], } # test all args assert await r.acl_setuser( username, enabled=True, reset=True, passwords=["+pass1", "+pass2"], categories=["+set", "+@hash", "-geo"], commands=["+get", "+mget", "-hset"], keys=["cache:*", "objects:*"], ) acl = await r.acl_getuser(username) assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"} assert set(acl["commands"]) == {"+get", "+mget", "-hset"} assert acl["enabled"] is True assert acl["channels"] == [b"*"] assert acl["flags"] == ["on", "allchannels", "sanitize-payload"] assert set(acl["keys"]) == {b"cache:*", b"objects:*"} assert len(acl["passwords"]) == 2 # test reset=False keeps existing ACL and applies new ACL on top assert await r.acl_setuser( username, enabled=True, reset=True, passwords=["+pass1"], categories=["+@set"], commands=["+get"], keys=["cache:*"], ) assert await r.acl_setuser( username, enabled=True, passwords=["+pass2"], categories=["+@hash"], commands=["+mget"], keys=["objects:*"], ) acl = await r.acl_getuser(username) assert set(acl["categories"]) == {"-@all", "+@set", "+@hash"} assert set(acl["commands"]) == {"+get", "+mget"} assert acl["enabled"] is True assert acl["channels"] == [b"*"] assert acl["flags"] == ["on", "allchannels", "sanitize-payload"] assert set(acl["keys"]) == {b"cache:*", b"objects:*"} assert len(acl["passwords"]) == 2 # test removal of passwords assert await r.acl_setuser( username, enabled=True, reset=True, passwords=["+pass1", "+pass2"] ) assert len((await r.acl_getuser(username))["passwords"]) == 2 assert await r.acl_setuser(username, enabled=True, passwords=["-pass2"]) assert len((await r.acl_getuser(username))["passwords"]) == 1 # Resets and tests that hashed passwords are set properly. hashed_password = ( "5e884898da28047151d0e56f8dc629" "2773603d0d6aabbdd62a11ef721d1542d8" ) assert await r.acl_setuser( username, enabled=True, reset=True, hashed_passwords=["+" + hashed_password] ) acl = await r.acl_getuser(username) assert acl["passwords"] == [hashed_password] # test removal of hashed passwords assert await r.acl_setuser( username, enabled=True, reset=True, hashed_passwords=["+" + hashed_password], passwords=["+pass1"], ) assert len((await r.acl_getuser(username))["passwords"]) == 2 assert await r.acl_setuser( username, enabled=True, hashed_passwords=["-" + hashed_password] ) assert len((await r.acl_getuser(username))["passwords"]) == 1 @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_list(self, r: redis.Redis, request, event_loop): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) assert await r.acl_setuser(username, enabled=False, reset=True) users = await r.acl_list() assert f"user {username} off sanitize-payload &* -@all" in users @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_log(self, r: redis.Redis, request, event_loop, create_redis): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) await r.acl_setuser( username, enabled=True, reset=True, commands=["+get", "+set", "+select"], keys=["cache:*"], nopass=True, ) await r.acl_log_reset() user_client = await create_redis(username=username) # Valid operation and key assert await user_client.set("cache:0", 1) assert await user_client.get("cache:0") == b"1" # Invalid key with pytest.raises(exceptions.NoPermissionError): await user_client.get("violated_cache:0") # Invalid operation with pytest.raises(exceptions.NoPermissionError): await user_client.hset("cache:0", "hkey", "hval") assert isinstance(await r.acl_log(), list) assert len(await r.acl_log()) == 2 assert len(await r.acl_log(count=1)) == 1 assert isinstance((await r.acl_log())[0], dict) assert "client-info" in (await r.acl_log(count=1))[0] assert await r.acl_log_reset() @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_setuser_categories_without_prefix_fails( self, r: redis.Redis, request, event_loop ): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, categories=["list"]) @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_setuser_commands_without_prefix_fails( self, r: redis.Redis, request, event_loop ): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, commands=["get"]) @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_setuser_add_passwords_and_nopass_fails( self, r: redis.Redis, request, event_loop ): username = "redis-py-user" def teardown(): coro = r.acl_deluser(username) if event_loop.is_running(): event_loop.create_task(coro) else: event_loop.run_until_complete(coro) request.addfinalizer(teardown) with pytest.raises(exceptions.DataError): await r.acl_setuser(username, passwords="+mypass", nopass=True) @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_users(self, r: redis.Redis): users = await r.acl_users() assert isinstance(users, list) assert len(users) > 0 @skip_if_server_version_lt(REDIS_6_VERSION) async def test_acl_whoami(self, r: redis.Redis): username = await r.acl_whoami() assert isinstance(username, str) async def test_client_list(self, r: redis.Redis): clients = await r.client_list() assert isinstance(clients[0], dict) assert "addr" in clients[0] @skip_if_server_version_lt("5.0.0") async def test_client_list_type(self, r: redis.Redis): with pytest.raises(exceptions.RedisError): await r.client_list(_type="not a client type") for client_type in ["normal", "master", "replica", "pubsub"]: clients = await r.client_list(_type=client_type) assert isinstance(clients, list) @skip_if_server_version_lt("5.0.0") async def test_client_id(self, r: redis.Redis): assert await r.client_id() > 0 @skip_if_server_version_lt("5.0.0") async def test_client_unblock(self, r: redis.Redis): myid = await r.client_id() assert not await r.client_unblock(myid) assert not await r.client_unblock(myid, error=True) assert not await r.client_unblock(myid, error=False) @skip_if_server_version_lt("2.6.9") async def test_client_getname(self, r: redis.Redis): assert await r.client_getname() is None @skip_if_server_version_lt("2.6.9") async def test_client_setname(self, r: redis.Redis): assert await r.client_setname("redis_py_test") assert await r.client_getname() == "redis_py_test" @skip_if_server_version_lt("2.6.9") async def test_client_kill(self, r: redis.Redis, r2): await r.client_setname("redis-py-c1") await r2.client_setname("redis-py-c2") clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} client_addr = clients_by_name["redis-py-c2"].get("addr") assert await r.client_kill(client_addr) is True clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 1 assert clients[0].get("name") == "redis-py-c1" @skip_if_server_version_lt("2.8.12") async def test_client_kill_filter_invalid_params(self, r: redis.Redis): # empty with pytest.raises(exceptions.DataError): await r.client_kill_filter() # invalid skipme with pytest.raises(exceptions.DataError): await r.client_kill_filter(skipme="yeah") # type: ignore # invalid type with pytest.raises(exceptions.DataError): await r.client_kill_filter(_type="caster") # type: ignore @skip_if_server_version_lt("2.8.12") async def test_client_kill_filter_by_id(self, r: redis.Redis, r2): await r.client_setname("redis-py-c1") await r2.client_setname("redis-py-c2") clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} client_2_id = clients_by_name["redis-py-c2"].get("id") resp = await r.client_kill_filter(_id=client_2_id) assert resp == 1 clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 1 assert clients[0].get("name") == "redis-py-c1" @skip_if_server_version_lt("2.8.12") async def test_client_kill_filter_by_addr(self, r: redis.Redis, r2): await r.client_setname("redis-py-c1") await r2.client_setname("redis-py-c2") clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 2 clients_by_name = {client.get("name"): client for client in clients} client_2_addr = clients_by_name["redis-py-c2"].get("addr") resp = await r.client_kill_filter(addr=client_2_addr) assert resp == 1 clients = [ client for client in await r.client_list() if client.get("name") in ["redis-py-c1", "redis-py-c2"] ] assert len(clients) == 1 assert clients[0].get("name") == "redis-py-c1" @skip_if_server_version_lt("2.6.9") async def test_client_list_after_client_setname(self, r: redis.Redis): await r.client_setname("redis_py_test") clients = await r.client_list() # we don't know which client ours will be assert "redis_py_test" in [c["name"] for c in clients] @skip_if_server_version_lt("2.9.50") async def test_client_pause(self, r: redis.Redis): assert await r.client_pause(1) assert await r.client_pause(timeout=1) with pytest.raises(exceptions.RedisError): await r.client_pause(timeout="not an integer") async def test_config_get(self, r: redis.Redis): data = await r.config_get() assert "maxmemory" in data assert data["maxmemory"].isdigit() async def test_config_resetstat(self, r: redis.Redis): await r.ping() prior_commands_processed = int((await r.info())["total_commands_processed"]) assert prior_commands_processed >= 1 await r.config_resetstat() reset_commands_processed = int((await r.info())["total_commands_processed"]) assert reset_commands_processed < prior_commands_processed async def test_config_set(self, r: redis.Redis): data = await r.config_get() rdbname = data["dbfilename"] try: assert await r.config_set("dbfilename", "redis_py_test.rdb") assert (await r.config_get())["dbfilename"] == "redis_py_test.rdb" finally: assert await r.config_set("dbfilename", rdbname) async def test_dbsize(self, r: redis.Redis): await r.set("a", "foo") await r.set("b", "bar") assert await r.dbsize() == 2 async def test_echo(self, r: redis.Redis): assert await r.echo("foo bar") == b"foo bar" async def test_info(self, r: redis.Redis): await r.set("a", "foo") await r.set("b", "bar") info = await r.info() assert isinstance(info, dict) assert info["db9"]["keys"] == 2 async def test_lastsave(self, r: redis.Redis): assert isinstance(await r.lastsave(), datetime.datetime) async def test_object(self, r: redis.Redis): await r.set("a", "foo") assert isinstance(await r.object("refcount", "a"), int) assert isinstance(await r.object("idletime", "a"), int) assert await r.object("encoding", "a") in (b"raw", b"embstr") assert await r.object("idletime", "invalid-key") is None async def test_ping(self, r: redis.Redis): assert await r.ping() async def test_slowlog_get(self, r: redis.Redis, slowlog): assert await r.slowlog_reset() unicode_string = chr(3456) + "abcd" + chr(3421) await r.get(unicode_string) slowlog = await r.slowlog_get() assert isinstance(slowlog, list) commands = [log["command"] for log in slowlog] get_command = b" ".join((b"GET", unicode_string.encode("utf-8"))) assert get_command in commands assert b"SLOWLOG RESET" in commands # the order should be ['GET <uni string>', 'SLOWLOG RESET'], # but if other clients are executing commands at the same time, there # could be commands, before, between, or after, so just check that # the two we care about are in the appropriate order. assert commands.index(get_command) < commands.index(b"SLOWLOG RESET") # make sure other attributes are typed correctly assert isinstance(slowlog[0]["start_time"], int) assert isinstance(slowlog[0]["duration"], int) async def test_slowlog_get_limit(self, r: redis.Redis, slowlog): assert await r.slowlog_reset() await r.get("foo") slowlog = await r.slowlog_get(1) assert isinstance(slowlog, list) # only one command, based on the number we passed to slowlog_get() assert len(slowlog) == 1 async def test_slowlog_length(self, r: redis.Redis, slowlog): await r.get("foo") assert isinstance(await r.slowlog_len(), int) @skip_if_server_version_lt("2.6.0") async def test_time(self, r: redis.Redis): t = await r.time() assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) # BASIC KEY COMMANDS async def test_append(self, r: redis.Redis): assert await r.append("a", "a1") == 2 assert await r.get("a") == b"a1" assert await r.append("a", "a2") == 4 assert await r.get("a") == b"a1a2" @skip_if_server_version_lt("2.6.0") async def test_bitcount(self, r: redis.Redis): await r.setbit("a", 5, True) assert await r.bitcount("a") == 1 await r.setbit("a", 6, True) assert await r.bitcount("a") == 2 await r.setbit("a", 5, False) assert await r.bitcount("a") == 1 await r.setbit("a", 9, True) await r.setbit("a", 17, True) await r.setbit("a", 25, True) await r.setbit("a", 33, True) assert await r.bitcount("a") == 5 assert await r.bitcount("a", 0, -1) == 5 assert await r.bitcount("a", 2, 3) == 2 assert await r.bitcount("a", 2, -1) == 3 assert await r.bitcount("a", -2, -1) == 2 assert await r.bitcount("a", 1, 1) == 1 @skip_if_server_version_lt("2.6.0") async def test_bitop_not_empty_string(self, r: redis.Redis): await r.set("a", "") await r.bitop("not", "r", "a") assert await r.get("r") is None @skip_if_server_version_lt("2.6.0") async def test_bitop_not(self, r: redis.Redis): test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("a", test_str) await r.bitop("not", "r", "a") assert int(binascii.hexlify(await r.get("r")), 16) == correct @skip_if_server_version_lt("2.6.0") async def test_bitop_not_in_place(self, r: redis.Redis): test_str = b"\xAA\x00\xFF\x55" correct = ~0xAA00FF55 & 0xFFFFFFFF await r.set("a", test_str) await r.bitop("not", "a", "a") assert int(binascii.hexlify(await r.get("a")), 16) == correct @skip_if_server_version_lt("2.6.0") async def test_bitop_single_string(self, r: redis.Redis): test_str = b"\x01\x02\xFF" await r.set("a", test_str) await r.bitop("and", "res1", "a") await r.bitop("or", "res2", "a") await r.bitop("xor", "res3", "a") assert await r.get("res1") == test_str assert await r.get("res2") == test_str assert await r.get("res3") == test_str @skip_if_server_version_lt("2.6.0") async def test_bitop_string_operands(self, r: redis.Redis): await r.set("a", b"\x01\x02\xFF\xFF") await r.set("b", b"\x01\x02\xFF") await r.bitop("and", "res1", "a", "b") await r.bitop("or", "res2", "a", "b") await r.bitop("xor", "res3", "a", "b") assert int(binascii.hexlify(await r.get("res1")), 16) == 0x0102FF00 assert int(binascii.hexlify(await r.get("res2")), 16) == 0x0102FFFF assert int(binascii.hexlify(await r.get("res3")), 16) == 0x000000FF @skip_if_server_version_lt("2.8.7") async def test_bitpos(self, r: redis.Redis): key = "key:bitpos" await r.set(key, b"\xff\xf0\x00") assert await r.bitpos(key, 0) == 12 assert await r.bitpos(key, 0, 2, -1) == 16 assert await r.bitpos(key, 0, -2, -1) == 12 await r.set(key, b"\x00\xff\xf0") assert await r.bitpos(key, 1, 0) == 8 assert await r.bitpos(key, 1, 1) == 8 await r.set(key, b"\x00\x00\x00") assert await r.bitpos(key, 1) == -1 @skip_if_server_version_lt("2.8.7") async def test_bitpos_wrong_arguments(self, r: redis.Redis): key = "key:bitpos:wrong:args" await r.set(key, b"\xff\xf0\x00") with pytest.raises(exceptions.RedisError): await r.bitpos(key, 0, end=1) == 12 with pytest.raises(exceptions.RedisError): await r.bitpos(key, 7) == 12 async def test_decr(self, r: redis.Redis): assert await r.decr("a") == -1 assert await r.get("a") == b"-1" assert await r.decr("a") == -2 assert await r.get("a") == b"-2" assert await r.decr("a", amount=5) == -7 assert await r.get("a") == b"-7" async def test_decrby(self, r: redis.Redis): assert await r.decrby("a", amount=2) == -2 assert await r.decrby("a", amount=3) == -5 assert await r.get("a") == b"-5" async def test_delete(self, r: redis.Redis): assert await r.delete("a") == 0 await r.set("a", "foo") assert await r.delete("a") == 1 async def test_delete_with_multiple_keys(self, r: redis.Redis): await r.set("a", "foo") await r.set("b", "bar") assert await r.delete("a", "b") == 2 assert await r.get("a") is None assert await r.get("b") is None async def test_delitem(self, r: redis.Redis): await r.set("a", "foo") await r.delete("a") assert await r.get("a") is None @skip_if_server_version_lt("4.0.0") async def test_unlink(self, r: redis.Redis): assert await r.unlink("a") == 0 await r.set("a", "foo") assert await r.unlink("a") == 1 assert await r.get("a") is None @skip_if_server_version_lt("4.0.0") async def test_unlink_with_multiple_keys(self, r: redis.Redis): await r.set("a", "foo") await r.set("b", "bar") assert await r.unlink("a", "b") == 2 assert await r.get("a") is None assert await r.get("b") is None @skip_if_server_version_lt("2.6.0") async def test_dump_and_restore(self, r: redis.Redis): await r.set("a", "foo") dumped = await r.dump("a") await r.delete("a") await r.restore("a", 0, dumped) assert await r.get("a") == b"foo" @skip_if_server_version_lt("3.0.0") async def test_dump_and_restore_and_replace(self, r: redis.Redis): await r.set("a", "bar") dumped = await r.dump("a") with pytest.raises(redis.ResponseError): await r.restore("a", 0, dumped) await r.restore("a", 0, dumped, replace=True) assert await r.get("a") == b"bar" @skip_if_server_version_lt("5.0.0") async def test_dump_and_restore_absttl(self, r: redis.Redis): await r.set("a", "foo") dumped = await r.dump("a") await r.delete("a") ttl = int( (await redis_server_time(r) + datetime.timedelta(minutes=1)).timestamp() * 1000 ) await r.restore("a", ttl, dumped, absttl=True) assert await r.get("a") == b"foo" assert 0 < await r.ttl("a") <= 61 async def test_exists(self, r: redis.Redis): assert await r.exists("a") == 0 await r.set("a", "foo") await r.set("b", "bar") assert await r.exists("a") == 1 assert await r.exists("a", "b") == 2 async def test_exists_contains(self, r: redis.Redis): assert not await r.exists("a") await r.set("a", "foo") assert await r.exists("a") async def test_expire(self, r: redis.Redis): assert not await r.expire("a", 10) await r.set("a", "foo") assert await r.expire("a", 10) assert 0 < await r.ttl("a") <= 10 assert await r.persist("a") assert await r.ttl("a") == -1 async def test_expireat_datetime(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") assert await r.expireat("a", expire_at) assert 0 < await r.ttl("a") <= 61 async def test_expireat_no_key(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) assert not await r.expireat("a", expire_at) async def test_expireat_unixtime(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") expire_at_seconds = int(time.mktime(expire_at.timetuple())) assert await r.expireat("a", expire_at_seconds) assert 0 < await r.ttl("a") <= 61 async def test_get_and_set(self, r: redis.Redis): # get and set can't be tested independently of each other assert await r.get("a") is None byte_string = b"value" integer = 5 unicode_string = chr(3456) + "abcd" + chr(3421) assert await r.set("byte_string", byte_string) assert await r.set("integer", 5) assert await r.set("unicode_string", unicode_string) assert await r.get("byte_string") == byte_string assert await r.get("integer") == str(integer).encode() assert (await r.get("unicode_string")).decode("utf-8") == unicode_string async def test_get_set_bit(self, r: redis.Redis): # no value assert not await r.getbit("a", 5) # set bit 5 assert not await r.setbit("a", 5, True) assert await r.getbit("a", 5) # unset bit 4 assert not await r.setbit("a", 4, False) assert not await r.getbit("a", 4) # set bit 4 assert not await r.setbit("a", 4, True) assert await r.getbit("a", 4) # set bit 5 again assert await r.setbit("a", 5, True) assert await r.getbit("a", 5) async def test_getrange(self, r: redis.Redis): await r.set("a", "foo") assert await r.getrange("a", 0, 0) == b"f" assert await r.getrange("a", 0, 2) == b"foo" assert await r.getrange("a", 3, 4) == b"" async def test_getset(self, r: redis.Redis): assert await r.getset("a", "foo") is None assert await r.getset("a", "bar") == b"foo" assert await r.get("a") == b"bar" async def test_incr(self, r: redis.Redis): assert await r.incr("a") == 1 assert await r.get("a") == b"1" assert await r.incr("a") == 2 assert await r.get("a") == b"2" assert await r.incr("a", amount=5) == 7 assert await r.get("a") == b"7" async def test_incrby(self, r: redis.Redis): assert await r.incrby("a") == 1 assert await r.incrby("a", 4) == 5 assert await r.get("a") == b"5" @skip_if_server_version_lt("2.6.0") async def test_incrbyfloat(self, r: redis.Redis): assert await r.incrbyfloat("a") == 1.0 assert await r.get("a") == b"1" assert await r.incrbyfloat("a", 1.1) == 2.1 assert float(await r.get("a")) == float(2.1) async def test_keys(self, r: redis.Redis): assert await r.keys() == [] keys_with_underscores = {b"test_a", b"test_b"} keys = keys_with_underscores.union({b"testc"}) for key in keys: await r.set(key, 1) assert set(await r.keys(pattern="test_*")) == keys_with_underscores assert set(await r.keys(pattern="test*")) == keys async def test_mget(self, r: redis.Redis): assert await r.mget([]) == [] assert await r.mget(["a", "b"]) == [None, None] await r.set("a", "1") await r.set("b", "2") await r.set("c", "3") assert await r.mget("a", "other", "b", "c") == [b"1", None, b"2", b"3"] async def test_mset(self, r: redis.Redis): d = {"a": b"1", "b": b"2", "c": b"3"} assert await r.mset(d) for k, v in d.items(): assert await r.get(k) == v async def test_msetnx(self, r: redis.Redis): d = {"a": b"1", "b": b"2", "c": b"3"} assert await r.msetnx(d) d2 = {"a": b"x", "d": b"4"} assert not await r.msetnx(d2) for k, v in d.items(): assert await r.get(k) == v assert await r.get("d") is None @skip_if_server_version_lt("2.6.0") async def test_pexpire(self, r: redis.Redis): assert not await r.pexpire("a", 60000) await r.set("a", "foo") assert await r.pexpire("a", 60000) assert 0 < await r.pttl("a") <= 60000 assert await r.persist("a") assert await r.pttl("a") == -1 @skip_if_server_version_lt("2.6.0") async def test_pexpireat_datetime(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") assert await r.pexpireat("a", expire_at) assert 0 < await r.pttl("a") <= 61000 @skip_if_server_version_lt("2.6.0") async def test_pexpireat_no_key(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) assert not await r.pexpireat("a", expire_at) @skip_if_server_version_lt("2.6.0") async def test_pexpireat_unixtime(self, r: redis.Redis): expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1) await r.set("a", "foo") expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000 assert await r.pexpireat("a", expire_at_seconds) assert 0 < await r.pttl("a") <= 61000 @skip_if_server_version_lt("2.6.0") async def test_psetex(self, r: redis.Redis): assert await r.psetex("a", 1000, "value") assert await r.get("a") == b"value" assert 0 < await r.pttl("a") <= 1000 @skip_if_server_version_lt("2.6.0") async def test_psetex_timedelta(self, r: redis.Redis): expire_at = datetime.timedelta(milliseconds=1000) assert await r.psetex("a", expire_at, "value") assert await r.get("a") == b"value" assert 0 < await r.pttl("a") <= 1000 @skip_if_server_version_lt("2.6.0") async def test_pttl(self, r: redis.Redis): assert not await r.pexpire("a", 10000) await r.set("a", "1") assert await r.pexpire("a", 10000) assert 0 < await r.pttl("a") <= 10000 assert await r.persist("a") assert await r.pttl("a") == -1 @skip_if_server_version_lt("2.8.0") async def test_pttl_no_key(self, r: redis.Redis): """PTTL on servers 2.8 and after return -2 when the key doesn't exist""" assert await r.pttl("a") == -2 async def test_randomkey(self, r: redis.Redis): assert await r.randomkey() is None for key in ("a", "b", "c"): await r.set(key, 1) assert await r.randomkey() in (b"a", b"b", b"c") async def test_rename(self, r: redis.Redis): await r.set("a", "1") assert await r.rename("a", "b") assert await r.get("a") is None assert await r.get("b") == b"1" async def test_renamenx(self, r: redis.Redis): await r.set("a", "1") await r.set("b", "2") assert not await r.renamenx("a", "b") assert await r.get("a") == b"1" assert await r.get("b") == b"2" @skip_if_server_version_lt("2.6.0") async def test_set_nx(self, r: redis.Redis): assert await r.set("a", "1", nx=True) assert not await r.set("a", "2", nx=True) assert await r.get("a") == b"1" @skip_if_server_version_lt("2.6.0") async def test_set_xx(self, r: redis.Redis): assert not await r.set("a", "1", xx=True) assert await r.get("a") is None await r.set("a", "bar") assert await r.set("a", "2", xx=True) assert await r.get("a") == b"2" @skip_if_server_version_lt("2.6.0") async def test_set_px(self, r: redis.Redis): assert await r.set("a", "1", px=10000) assert await r.get("a") == b"1" assert 0 < await r.pttl("a") <= 10000 assert 0 < await r.ttl("a") <= 10 @skip_if_server_version_lt("2.6.0") async def test_set_px_timedelta(self, r: redis.Redis): expire_at = datetime.timedelta(milliseconds=1000) assert await r.set("a", "1", px=expire_at) assert 0 < await r.pttl("a") <= 1000 assert 0 < await r.ttl("a") <= 1 @skip_if_server_version_lt("2.6.0") async def test_set_ex(self, r: redis.Redis): assert await r.set("a", "1", ex=10) assert 0 < await r.ttl("a") <= 10 @skip_if_server_version_lt("2.6.0") async def test_set_ex_timedelta(self, r: redis.Redis): expire_at = datetime.timedelta(seconds=60) assert await r.set("a", "1", ex=expire_at) assert 0 < await r.ttl("a") <= 60 @skip_if_server_version_lt("2.6.0") async def test_set_multipleoptions(self, r: redis.Redis): await r.set("a", "val") assert await r.set("a", "1", xx=True, px=10000) assert 0 < await r.ttl("a") <= 10 @skip_if_server_version_lt(REDIS_6_VERSION) async def test_set_keepttl(self, r: redis.Redis): await r.set("a", "val") assert await r.set("a", "1", xx=True, px=10000) assert 0 < await r.ttl("a") <= 10 await r.set("a", "2", keepttl=True) assert await r.get("a") == b"2" assert 0 < await r.ttl("a") <= 10 async def test_setex(self, r: redis.Redis): assert await r.setex("a", 60, "1") assert await r.get("a") == b"1" assert 0 < await r.ttl("a") <= 60 async def test_setnx(self, r: redis.Redis): assert await r.setnx("a", "1") assert await r.get("a") == b"1" assert not await r.setnx("a", "2") assert await r.get("a") == b"1" async def test_setrange(self, r: redis.Redis): assert await r.setrange("a", 5, "foo") == 8 assert await r.get("a") == b"\0\0\0\0\0foo" await r.set("a", "abcdefghijh") assert await r.setrange("a", 6, "12345") == 11 assert await r.get("a") == b"abcdef12345" async def test_strlen(self, r: redis.Redis): await r.set("a", "foo") assert await r.strlen("a") == 3 async def test_substr(self, r: redis.Redis): await r.set("a", "0123456789") assert await r.substr("a", 0) == b"0123456789" assert await r.substr("a", 2) == b"23456789" assert await r.substr("a", 3, 5) == b"345" assert await r.substr("a", 3, -2) == b"345678" async def test_ttl(self, r: redis.Redis): await r.set("a", "1") assert await r.expire("a", 10) assert 0 < await r.ttl("a") <= 10 assert await r.persist("a") assert await r.ttl("a") == -1 @skip_if_server_version_lt("2.8.0") async def test_ttl_nokey(self, r: redis.Redis): """TTL on servers 2.8 and after return -2 when the key doesn't exist""" assert await r.ttl("a") == -2 async def test_type(self, r: redis.Redis): assert await r.type("a") == b"none" await r.set("a", "1") assert await r.type("a") == b"string" await r.delete("a") await r.lpush("a", "1") assert await r.type("a") == b"list" await r.delete("a") await r.sadd("a", "1") assert await r.type("a") == b"set" await r.delete("a") await r.zadd("a", {"1": 1}) assert await r.type("a") == b"zset" # LIST COMMANDS async def test_blpop(self, r: redis.Redis): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert await r.blpop(["b", "a"], timeout=1) == (b"b", b"3") assert await r.blpop(["b", "a"], timeout=1) == (b"b", b"4") assert await r.blpop(["b", "a"], timeout=1) == (b"a", b"1") assert await r.blpop(["b", "a"], timeout=1) == (b"a", b"2") assert await r.blpop(["b", "a"], timeout=1) is None await r.rpush("c", "1") assert await r.blpop("c", timeout=1) == (b"c", b"1") async def test_brpop(self, r: redis.Redis): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert await r.brpop(["b", "a"], timeout=1) == (b"b", b"4") assert await r.brpop(["b", "a"], timeout=1) == (b"b", b"3") assert await r.brpop(["b", "a"], timeout=1) == (b"a", b"2") assert await r.brpop(["b", "a"], timeout=1) == (b"a", b"1") assert await r.brpop(["b", "a"], timeout=1) is None await r.rpush("c", "1") assert await r.brpop("c", timeout=1) == (b"c", b"1") async def test_brpoplpush(self, r: redis.Redis): await r.rpush("a", "1", "2") await r.rpush("b", "3", "4") assert await r.brpoplpush("a", "b") == b"2" assert await r.brpoplpush("a", "b") == b"1" assert await r.brpoplpush("a", "b", timeout=1) is None assert await r.lrange("a", 0, -1) == [] assert await r.lrange("b", 0, -1) == [b"1", b"2", b"3", b"4"] async def test_brpoplpush_empty_string(self, r: redis.Redis): await r.rpush("a", "") assert await r.brpoplpush("a", "b") == b"" async def test_lindex(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.lindex("a", "0") == b"1" assert await r.lindex("a", "1") == b"2" assert await r.lindex("a", "2") == b"3" async def test_linsert(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.linsert("a", "after", "2", "2.5") == 4 assert await r.lrange("a", 0, -1) == [b"1", b"2", b"2.5", b"3"] assert await r.linsert("a", "before", "2", "1.5") == 5 assert await r.lrange("a", 0, -1) == [b"1", b"1.5", b"2", b"2.5", b"3"] async def test_llen(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.llen("a") == 3 async def test_lpop(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.lpop("a") == b"1" assert await r.lpop("a") == b"2" assert await r.lpop("a") == b"3" assert await r.lpop("a") is None async def test_lpush(self, r: redis.Redis): assert await r.lpush("a", "1") == 1 assert await r.lpush("a", "2") == 2 assert await r.lpush("a", "3", "4") == 4 assert await r.lrange("a", 0, -1) == [b"4", b"3", b"2", b"1"] async def test_lpushx(self, r: redis.Redis): assert await r.lpushx("a", "1") == 0 assert await r.lrange("a", 0, -1) == [] await r.rpush("a", "1", "2", "3") assert await r.lpushx("a", "4") == 4 assert await r.lrange("a", 0, -1) == [b"4", b"1", b"2", b"3"] async def test_lrange(self, r: redis.Redis): await r.rpush("a", "1", "2", "3", "4", "5") assert await r.lrange("a", 0, 2) == [b"1", b"2", b"3"] assert await r.lrange("a", 2, 10) == [b"3", b"4", b"5"] assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4", b"5"] async def test_lrem(self, r: redis.Redis): await r.rpush("a", "Z", "b", "Z", "Z", "c", "Z", "Z") # remove the first 'Z' item assert await r.lrem("a", 1, "Z") == 1 assert await r.lrange("a", 0, -1) == [b"b", b"Z", b"Z", b"c", b"Z", b"Z"] # remove the last 2 'Z' items assert await r.lrem("a", -2, "Z") == 2 assert await r.lrange("a", 0, -1) == [b"b", b"Z", b"Z", b"c"] # remove all 'Z' items assert await r.lrem("a", 0, "Z") == 2 assert await r.lrange("a", 0, -1) == [b"b", b"c"] async def test_lset(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3"] assert await r.lset("a", 1, "4") assert await r.lrange("a", 0, 2) == [b"1", b"4", b"3"] async def test_ltrim(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.ltrim("a", 0, 1) assert await r.lrange("a", 0, -1) == [b"1", b"2"] async def test_rpop(self, r: redis.Redis): await r.rpush("a", "1", "2", "3") assert await r.rpop("a") == b"3" assert await r.rpop("a") == b"2" assert await r.rpop("a") == b"1" assert await r.rpop("a") is None async def test_rpoplpush(self, r: redis.Redis): await r.rpush("a", "a1", "a2", "a3") await r.rpush("b", "b1", "b2", "b3") assert await r.rpoplpush("a", "b") == b"a3" assert await r.lrange("a", 0, -1) == [b"a1", b"a2"] assert await r.lrange("b", 0, -1) == [b"a3", b"b1", b"b2", b"b3"] async def test_rpush(self, r: redis.Redis): assert await r.rpush("a", "1") == 1 assert await r.rpush("a", "2") == 2 assert await r.rpush("a", "3", "4") == 4 assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4"] @skip_if_server_version_lt("6.0.6") async def test_lpos(self, r: redis.Redis): assert await r.rpush("a", "a", "b", "c", "1", "2", "3", "c", "c") == 8 assert await r.lpos("a", "a") == 0 assert await r.lpos("a", "c") == 2 assert await r.lpos("a", "c", rank=1) == 2 assert await r.lpos("a", "c", rank=2) == 6 assert await r.lpos("a", "c", rank=4) is None assert await r.lpos("a", "c", rank=-1) == 7 assert await r.lpos("a", "c", rank=-2) == 6 assert await r.lpos("a", "c", count=0) == [2, 6, 7] assert await r.lpos("a", "c", count=1) == [2] assert await r.lpos("a", "c", count=2) == [2, 6] assert await r.lpos("a", "c", count=100) == [2, 6, 7] assert await r.lpos("a", "c", count=0, rank=2) == [6, 7] assert await r.lpos("a", "c", count=2, rank=-1) == [7, 6] assert await r.lpos("axxx", "c", count=0, rank=2) == [] assert await r.lpos("axxx", "c") is None assert await r.lpos("a", "x", count=2) == [] assert await r.lpos("a", "x") is None assert await r.lpos("a", "a", count=0, maxlen=1) == [0] assert await r.lpos("a", "c", count=0, maxlen=1) == [] assert await r.lpos("a", "c", count=0, maxlen=3) == [2] assert await r.lpos("a", "c", count=0, maxlen=3, rank=-1) == [7, 6] assert await r.lpos("a", "c", count=0, maxlen=7, rank=2) == [6] async def test_rpushx(self, r: redis.Redis): assert await r.rpushx("a", "b") == 0 assert await r.lrange("a", 0, -1) == [] await r.rpush("a", "1", "2", "3") assert await r.rpushx("a", "4") == 4 assert await r.lrange("a", 0, -1) == [b"1", b"2", b"3", b"4"] # SCAN COMMANDS @skip_if_server_version_lt("2.8.0") async def test_scan(self, r: redis.Redis): await r.set("a", 1) await r.set("b", 2) await r.set("c", 3) cursor, keys = await r.scan() assert cursor == 0 assert set(keys) == {b"a", b"b", b"c"} _, keys = await r.scan(match="a") assert set(keys) == {b"a"} @skip_if_server_version_lt(REDIS_6_VERSION) async def test_scan_type(self, r: redis.Redis): await r.sadd("a-set", 1) await r.hset("a-hash", "foo", 2) await r.lpush("a-list", "aux", 3) _, keys = await r.scan(match="a*", _type="SET") assert set(keys) == {b"a-set"} @skip_if_server_version_lt("2.8.0") async def test_scan_iter(self, r: redis.Redis): await r.set("a", 1) await r.set("b", 2) await r.set("c", 3) keys = [k async for k in r.scan_iter()] assert set(keys) == {b"a", b"b", b"c"} keys = [k async for k in r.scan_iter(match="a")] assert set(keys) == {b"a"} @skip_if_server_version_lt("2.8.0") async def test_sscan(self, r: redis.Redis): await r.sadd("a", 1, 2, 3) cursor, members = await r.sscan("a") assert cursor == 0 assert set(members) == {b"1", b"2", b"3"} _, members = await r.sscan("a", match=b"1") assert set(members) == {b"1"} @skip_if_server_version_lt("2.8.0") async def test_sscan_iter(self, r: redis.Redis): await r.sadd("a", 1, 2, 3) members = [k async for k in r.sscan_iter("a")] assert set(members) == {b"1", b"2", b"3"} members = [k async for k in r.sscan_iter("a", match=b"1")] assert set(members) == {b"1"} @skip_if_server_version_lt("2.8.0") async def test_hscan(self, r: redis.Redis): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) cursor, dic = await r.hscan("a") assert cursor == 0 assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"} _, dic = await r.hscan("a", match="a") assert dic == {b"a": b"1"} @skip_if_server_version_lt("2.8.0") async def test_hscan_iter(self, r: redis.Redis): await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) dic = {k: v async for k, v in r.hscan_iter("a")} assert dic == {b"a": b"1", b"b": b"2", b"c": b"3"} dic = {k: v async for k, v in r.hscan_iter("a", match="a")} assert dic == {b"a": b"1"} @skip_if_server_version_lt("2.8.0") async def test_zscan(self, r: redis.Redis): await r.zadd("a", {"a": 1, "b": 2, "c": 3}) cursor, pairs = await r.zscan("a") assert cursor == 0 assert set(pairs) == {(b"a", 1), (b"b", 2), (b"c", 3)} _, pairs = await r.zscan("a", match="a") assert set(pairs) == {(b"a", 1)} @skip_if_server_version_lt("2.8.0") async def test_zscan_iter(self, r: redis.Redis): await r.zadd("a", {"a": 1, "b": 2, "c": 3}) pairs = [k async for k in r.zscan_iter("a")] assert set(pairs) == {(b"a", 1), (b"b", 2), (b"c", 3)} pairs = [k async for k in r.zscan_iter("a", match="a")] assert set(pairs) == {(b"a", 1)} # SET COMMANDS async def test_sadd(self, r: redis.Redis): members = {b"1", b"2", b"3"} await r.sadd("a", *members) assert await r.smembers("a") == members async def test_scard(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.scard("a") == 3 async def test_sdiff(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sdiff("a", "b") == {b"1", b"2", b"3"} await r.sadd("b", "2", "3") assert await r.sdiff("a", "b") == {b"1"} async def test_sdiffstore(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sdiffstore("c", "a", "b") == 3 assert await r.smembers("c") == {b"1", b"2", b"3"} await r.sadd("b", "2", "3") assert await r.sdiffstore("c", "a", "b") == 1 assert await r.smembers("c") == {b"1"} async def test_sinter(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sinter("a", "b") == set() await r.sadd("b", "2", "3") assert await r.sinter("a", "b") == {b"2", b"3"} async def test_sinterstore(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sinterstore("c", "a", "b") == 0 assert await r.smembers("c") == set() await r.sadd("b", "2", "3") assert await r.sinterstore("c", "a", "b") == 2 assert await r.smembers("c") == {b"2", b"3"} async def test_sismember(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.sismember("a", "1") assert await r.sismember("a", "2") assert await r.sismember("a", "3") assert not await r.sismember("a", "4") async def test_smembers(self, r: redis.Redis): await r.sadd("a", "1", "2", "3") assert await r.smembers("a") == {b"1", b"2", b"3"} async def test_smove(self, r: redis.Redis): await r.sadd("a", "a1", "a2") await r.sadd("b", "b1", "b2") assert await r.smove("a", "b", "a1") assert await r.smembers("a") == {b"a2"} assert await r.smembers("b") == {b"b1", b"b2", b"a1"} async def test_spop(self, r: redis.Redis): s = [b"1", b"2", b"3"] await r.sadd("a", *s) value = await r.spop("a") assert value in s assert await r.smembers("a") == set(s) - {value} @skip_if_server_version_lt("3.2.0") async def test_spop_multi_value(self, r: redis.Redis): s = [b"1", b"2", b"3"] await r.sadd("a", *s) values = await r.spop("a", 2) assert len(values) == 2 for value in values: assert value in s assert await r.spop("a", 1) == list(set(s) - set(values)) async def test_srandmember(self, r: redis.Redis): s = [b"1", b"2", b"3"] await r.sadd("a", *s) assert await r.srandmember("a") in s @skip_if_server_version_lt("2.6.0") async def test_srandmember_multi_value(self, r: redis.Redis): s = [b"1", b"2", b"3"] await r.sadd("a", *s) randoms = await r.srandmember("a", number=2) assert len(randoms) == 2 assert set(randoms).intersection(s) == set(randoms) async def test_srem(self, r: redis.Redis): await r.sadd("a", "1", "2", "3", "4") assert await r.srem("a", "5") == 0 assert await r.srem("a", "2", "4") == 2 assert await r.smembers("a") == {b"1", b"3"} async def test_sunion(self, r: redis.Redis): await r.sadd("a", "1", "2") await r.sadd("b", "2", "3") assert await r.sunion("a", "b") == {b"1", b"2", b"3"} async def test_sunionstore(self, r: redis.Redis): await r.sadd("a", "1", "2") await r.sadd("b", "2", "3") assert await r.sunionstore("c", "a", "b") == 3 assert await r.smembers("c") == {b"1", b"2", b"3"} # SORTED SET COMMANDS async def test_zadd(self, r: redis.Redis): mapping = {"a1": 1.0, "a2": 2.0, "a3": 3.0} await r.zadd("a", mapping) assert await r.zrange("a", 0, -1, withscores=True) == [ (b"a1", 1.0), (b"a2", 2.0), (b"a3", 3.0), ] # error cases with pytest.raises(exceptions.DataError): await r.zadd("a", {}) # cannot use both nx and xx options with pytest.raises(exceptions.DataError): await r.zadd("a", mapping, nx=True, xx=True) # cannot use the incr options with more than one value with pytest.raises(exceptions.DataError): await r.zadd("a", mapping, incr=True) async def test_zadd_nx(self, r: redis.Redis): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, nx=True) == 1 assert await r.zrange("a", 0, -1, withscores=True) == [ (b"a1", 1.0), (b"a2", 2.0), ] async def test_zadd_xx(self, r: redis.Redis): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, xx=True) == 0 assert await r.zrange("a", 0, -1, withscores=True) == [(b"a1", 99.0)] async def test_zadd_ch(self, r: redis.Redis): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 99, "a2": 2}, ch=True) == 2 assert await r.zrange("a", 0, -1, withscores=True) == [ (b"a2", 2.0), (b"a1", 99.0), ] async def test_zadd_incr(self, r: redis.Redis): assert await r.zadd("a", {"a1": 1}) == 1 assert await r.zadd("a", {"a1": 4.5}, incr=True) == 5.5 async def test_zadd_incr_with_xx(self, r: redis.Redis): # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should # redis-py assert await r.zadd("a", {"a1": 1}, xx=True, incr=True) is None async def test_zcard(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zcard("a") == 3 async def test_zcount(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zcount("a", "-inf", "+inf") == 3 assert await r.zcount("a", 1, 2) == 2 assert await r.zcount("a", "(" + str(1), 2) == 1 assert await r.zcount("a", 1, "(" + str(2)) == 1 assert await r.zcount("a", 10, 20) == 0 async def test_zincrby(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zincrby("a", 1, "a2") == 3.0 assert await r.zincrby("a", 5, "a3") == 8.0 assert await r.zscore("a", "a2") == 3.0 assert await r.zscore("a", "a3") == 8.0 @skip_if_server_version_lt("2.8.9") async def test_zlexcount(self, r: redis.Redis): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zlexcount("a", "-", "+") == 7 assert await r.zlexcount("a", "[b", "[f") == 5 async def test_zinterstore_sum(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zinterstore("d", ["a", "b", "c"]) == 2 assert await r.zrange("d", 0, -1, withscores=True) == [(b"a3", 8), (b"a1", 9)] async def test_zinterstore_max(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zinterstore("d", ["a", "b", "c"], aggregate="MAX") == 2 assert await r.zrange("d", 0, -1, withscores=True) == [(b"a3", 5), (b"a1", 6)] async def test_zinterstore_min(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("b", {"a1": 2, "a2": 3, "a3": 5}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zinterstore("d", ["a", "b", "c"], aggregate="MIN") == 2 assert await r.zrange("d", 0, -1, withscores=True) == [(b"a1", 1), (b"a3", 3)] async def test_zinterstore_with_weight(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zinterstore("d", {"a": 1, "b": 2, "c": 3}) == 2 assert await r.zrange("d", 0, -1, withscores=True) == [(b"a3", 20), (b"a1", 23)] @skip_if_server_version_lt("4.9.0") async def test_zpopmax(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zpopmax("a") == [(b"a3", 3)] # with count assert await r.zpopmax("a", count=2) == [(b"a2", 2), (b"a1", 1)] @skip_if_server_version_lt("4.9.0") async def test_zpopmin(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zpopmin("a") == [(b"a1", 1)] # with count assert await r.zpopmin("a", count=2) == [(b"a2", 2), (b"a3", 3)] @skip_if_server_version_lt("4.9.0") async def test_bzpopmax(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2}) await r.zadd("b", {"b1": 10, "b2": 20}) assert await r.bzpopmax(["b", "a"], timeout=1) == (b"b", b"b2", 20) assert await r.bzpopmax(["b", "a"], timeout=1) == (b"b", b"b1", 10) assert await r.bzpopmax(["b", "a"], timeout=1) == (b"a", b"a2", 2) assert await r.bzpopmax(["b", "a"], timeout=1) == (b"a", b"a1", 1) assert await r.bzpopmax(["b", "a"], timeout=1) is None await r.zadd("c", {"c1": 100}) assert await r.bzpopmax("c", timeout=1) == (b"c", b"c1", 100) @skip_if_server_version_lt("4.9.0") async def test_bzpopmin(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2}) await r.zadd("b", {"b1": 10, "b2": 20}) assert await r.bzpopmin(["b", "a"], timeout=1) == (b"b", b"b1", 10) assert await r.bzpopmin(["b", "a"], timeout=1) == (b"b", b"b2", 20) assert await r.bzpopmin(["b", "a"], timeout=1) == (b"a", b"a1", 1) assert await r.bzpopmin(["b", "a"], timeout=1) == (b"a", b"a2", 2) assert await r.bzpopmin(["b", "a"], timeout=1) is None await r.zadd("c", {"c1": 100}) assert await r.bzpopmin("c", timeout=1) == (b"c", b"c1", 100) async def test_zrange(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrange("a", 0, 1) == [b"a1", b"a2"] assert await r.zrange("a", 1, 2) == [b"a2", b"a3"] # withscores assert await r.zrange("a", 0, 1, withscores=True) == [ (b"a1", 1.0), (b"a2", 2.0), ] assert await r.zrange("a", 1, 2, withscores=True) == [ (b"a2", 2.0), (b"a3", 3.0), ] # custom score function assert await r.zrange("a", 0, 1, withscores=True, score_cast_func=int) == [ (b"a1", 1), (b"a2", 2), ] @skip_if_server_version_lt("2.8.9") async def test_zrangebylex(self, r: redis.Redis): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zrangebylex("a", "-", "[c") == [b"a", b"b", b"c"] assert await r.zrangebylex("a", "-", "(c") == [b"a", b"b"] assert await r.zrangebylex("a", "[aaa", "(g") == [b"b", b"c", b"d", b"e", b"f"] assert await r.zrangebylex("a", "[f", "+") == [b"f", b"g"] assert await r.zrangebylex("a", "-", "+", start=3, num=2) == [b"d", b"e"] @skip_if_server_version_lt("2.9.9") async def test_zrevrangebylex(self, r: redis.Redis): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zrevrangebylex("a", "[c", "-") == [b"c", b"b", b"a"] assert await r.zrevrangebylex("a", "(c", "-") == [b"b", b"a"] assert await r.zrevrangebylex("a", "(g", "[aaa") == [ b"f", b"e", b"d", b"c", b"b", ] assert await r.zrevrangebylex("a", "+", "[f") == [b"g", b"f"] assert await r.zrevrangebylex("a", "+", "-", start=3, num=2) == [b"d", b"c"] async def test_zrangebyscore(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrangebyscore("a", 2, 4) == [b"a2", b"a3", b"a4"] # slicing with start/num assert await r.zrangebyscore("a", 2, 4, start=1, num=2) == [b"a3", b"a4"] # withscores assert await r.zrangebyscore("a", 2, 4, withscores=True) == [ (b"a2", 2.0), (b"a3", 3.0), (b"a4", 4.0), ] # custom score function assert await r.zrangebyscore( "a", 2, 4, withscores=True, score_cast_func=int ) == [(b"a2", 2), (b"a3", 3), (b"a4", 4)] async def test_zrank(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrank("a", "a1") == 0 assert await r.zrank("a", "a2") == 1 assert await r.zrank("a", "a6") is None async def test_zrem(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrem("a", "a2") == 1 assert await r.zrange("a", 0, -1) == [b"a1", b"a3"] assert await r.zrem("a", "b") == 0 assert await r.zrange("a", 0, -1) == [b"a1", b"a3"] async def test_zrem_multiple_keys(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrem("a", "a1", "a2") == 2 assert await r.zrange("a", 0, 5) == [b"a3"] @skip_if_server_version_lt("2.8.9") async def test_zremrangebylex(self, r: redis.Redis): await r.zadd("a", {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}) assert await r.zremrangebylex("a", "-", "[c") == 3 assert await r.zrange("a", 0, -1) == [b"d", b"e", b"f", b"g"] assert await r.zremrangebylex("a", "[f", "+") == 2 assert await r.zrange("a", 0, -1) == [b"d", b"e"] assert await r.zremrangebylex("a", "[h", "+") == 0 assert await r.zrange("a", 0, -1) == [b"d", b"e"] async def test_zremrangebyrank(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zremrangebyrank("a", 1, 3) == 3 assert await r.zrange("a", 0, 5) == [b"a1", b"a5"] async def test_zremrangebyscore(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zremrangebyscore("a", 2, 4) == 3 assert await r.zrange("a", 0, -1) == [b"a1", b"a5"] assert await r.zremrangebyscore("a", 2, 4) == 0 assert await r.zrange("a", 0, -1) == [b"a1", b"a5"] async def test_zrevrange(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zrevrange("a", 0, 1) == [b"a3", b"a2"] assert await r.zrevrange("a", 1, 2) == [b"a2", b"a1"] # withscores assert await r.zrevrange("a", 0, 1, withscores=True) == [ (b"a3", 3.0), (b"a2", 2.0), ] assert await r.zrevrange("a", 1, 2, withscores=True) == [ (b"a2", 2.0), (b"a1", 1.0), ] # custom score function assert await r.zrevrange("a", 0, 1, withscores=True, score_cast_func=int) == [ (b"a3", 3.0), (b"a2", 2.0), ] async def test_zrevrangebyscore(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrevrangebyscore("a", 4, 2) == [b"a4", b"a3", b"a2"] # slicing with start/num assert await r.zrevrangebyscore("a", 4, 2, start=1, num=2) == [b"a3", b"a2"] # withscores assert await r.zrevrangebyscore("a", 4, 2, withscores=True) == [ (b"a4", 4.0), (b"a3", 3.0), (b"a2", 2.0), ] # custom score function assert await r.zrevrangebyscore( "a", 4, 2, withscores=True, score_cast_func=int ) == [(b"a4", 4), (b"a3", 3), (b"a2", 2)] async def test_zrevrank(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3, "a4": 4, "a5": 5}) assert await r.zrevrank("a", "a1") == 4 assert await r.zrevrank("a", "a2") == 3 assert await r.zrevrank("a", "a6") is None async def test_zscore(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) assert await r.zscore("a", "a1") == 1.0 assert await r.zscore("a", "a2") == 2.0 assert await r.zscore("a", "a4") is None async def test_zunionstore_sum(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zunionstore("d", ["a", "b", "c"]) == 4 assert await r.zrange("d", 0, -1, withscores=True) == [ (b"a2", 3), (b"a4", 4), (b"a3", 8), (b"a1", 9), ] async def test_zunionstore_max(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zunionstore("d", ["a", "b", "c"], aggregate="MAX") == 4 assert await r.zrange("d", 0, -1, withscores=True) == [ (b"a2", 2), (b"a4", 4), (b"a3", 5), (b"a1", 6), ] async def test_zunionstore_min(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 2, "a3": 3}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 4}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zunionstore("d", ["a", "b", "c"], aggregate="MIN") == 4 assert await r.zrange("d", 0, -1, withscores=True) == [ (b"a1", 1), (b"a2", 2), (b"a3", 3), (b"a4", 4), ] async def test_zunionstore_with_weight(self, r: redis.Redis): await r.zadd("a", {"a1": 1, "a2": 1, "a3": 1}) await r.zadd("b", {"a1": 2, "a2": 2, "a3": 2}) await r.zadd("c", {"a1": 6, "a3": 5, "a4": 4}) assert await r.zunionstore("d", {"a": 1, "b": 2, "c": 3}) == 4 assert await r.zrange("d", 0, -1, withscores=True) == [ (b"a2", 5), (b"a4", 12), (b"a3", 20), (b"a1", 23), ] # HYPERLOGLOG TESTS @skip_if_server_version_lt("2.8.9") async def test_pfadd(self, r: redis.Redis): members = {b"1", b"2", b"3"} assert await r.pfadd("a", *members) == 1 assert await r.pfadd("a", *members) == 0 assert await r.pfcount("a") == len(members) @skip_if_server_version_lt("2.8.9") async def test_pfcount(self, r: redis.Redis): members = {b"1", b"2", b"3"} await r.pfadd("a", *members) assert await r.pfcount("a") == len(members) members_b = {b"2", b"3", b"4"} await r.pfadd("b", *members_b) assert await r.pfcount("b") == len(members_b) assert await r.pfcount("a", "b") == len(members_b.union(members)) @skip_if_server_version_lt("2.8.9") async def test_pfmerge(self, r: redis.Redis): mema = {b"1", b"2", b"3"} memb = {b"2", b"3", b"4"} memc = {b"5", b"6", b"7"} await r.pfadd("a", *mema) await r.pfadd("b", *memb) await r.pfadd("c", *memc) await r.pfmerge("d", "c", "a") assert await r.pfcount("d") == 6 await r.pfmerge("d", "b") assert await r.pfcount("d") == 7 # HASH COMMANDS async def test_hget_and_hset(self, r: redis.Redis): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hget("a", "1") == b"1" assert await r.hget("a", "2") == b"2" assert await r.hget("a", "3") == b"3" # field was updated, redis returns 0 assert await r.hset("a", "2", 5) == 0 assert await r.hget("a", "2") == b"5" # field is new, redis returns 1 assert await r.hset("a", "4", 4) == 1 assert await r.hget("a", "4") == b"4" # key inside of hash that doesn't exist returns null value assert await r.hget("a", "b") is None # keys with bool(key) == False assert await r.hset("a", 0, 10) == 1 assert await r.hset("a", "", 10) == 1 async def test_hset_with_multi_key_values(self, r: redis.Redis): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hget("a", "1") == b"1" assert await r.hget("a", "2") == b"2" assert await r.hget("a", "3") == b"3" await r.hset("b", "foo", "bar", mapping={"1": 1, "2": 2}) assert await r.hget("b", "1") == b"1" assert await r.hget("b", "2") == b"2" assert await r.hget("b", "foo") == b"bar" async def test_hset_without_data(self, r: redis.Redis): with pytest.raises(exceptions.DataError): await r.hset("x") async def test_hdel(self, r: redis.Redis): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hdel("a", "2") == 1 assert await r.hget("a", "2") is None assert await r.hdel("a", "1", "3") == 2 assert await r.hlen("a") == 0 async def test_hexists(self, r: redis.Redis): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hexists("a", "1") assert not await r.hexists("a", "4") async def test_hgetall(self, r: redis.Redis): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) assert await r.hgetall("a") == h async def test_hincrby(self, r: redis.Redis): assert await r.hincrby("a", "1") == 1 assert await r.hincrby("a", "1", amount=2) == 3 assert await r.hincrby("a", "1", amount=-2) == 1 @skip_if_server_version_lt("2.6.0") async def test_hincrbyfloat(self, r: redis.Redis): assert await r.hincrbyfloat("a", "1") == 1.0 assert await r.hincrbyfloat("a", "1") == 2.0 assert await r.hincrbyfloat("a", "1", 1.2) == 3.2 async def test_hkeys(self, r: redis.Redis): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) local_keys = list(h.keys()) remote_keys = await r.hkeys("a") assert sorted(local_keys) == sorted(remote_keys) async def test_hlen(self, r: redis.Redis): await r.hset("a", mapping={"1": 1, "2": 2, "3": 3}) assert await r.hlen("a") == 3 async def test_hmget(self, r: redis.Redis): assert await r.hset("a", mapping={"a": 1, "b": 2, "c": 3}) assert await r.hmget("a", "a", "b", "c") == [b"1", b"2", b"3"] async def test_hmset(self, r: redis.Redis): warning_message = ( r"^Redis\.hmset\(\) is deprecated\. " r"Use Redis\.hset\(\) instead\.$" ) h = {b"a": b"1", b"b": b"2", b"c": b"3"} with pytest.warns(DeprecationWarning, match=warning_message): assert await r.hmset("a", h) assert await r.hgetall("a") == h async def test_hsetnx(self, r: redis.Redis): # Initially set the hash field assert await r.hsetnx("a", "1", 1) assert await r.hget("a", "1") == b"1" assert not await r.hsetnx("a", "1", 2) assert await r.hget("a", "1") == b"1" async def test_hvals(self, r: redis.Redis): h = {b"a1": b"1", b"a2": b"2", b"a3": b"3"} await r.hset("a", mapping=h) local_vals = list(h.values()) remote_vals = await r.hvals("a") assert sorted(local_vals) == sorted(remote_vals) @skip_if_server_version_lt("3.2.0") async def test_hstrlen(self, r: redis.Redis): await r.hset("a", mapping={"1": "22", "2": "333"}) assert await r.hstrlen("a", "1") == 2 assert await r.hstrlen("a", "2") == 3 # SORT async def test_sort_basic(self, r: redis.Redis): await r.rpush("a", "3", "2", "1", "4") assert await r.sort("a") == [b"1", b"2", b"3", b"4"] async def test_sort_limited(self, r: redis.Redis): await r.rpush("a", "3", "2", "1", "4") assert await r.sort("a", start=1, num=2) == [b"2", b"3"] async def test_sort_by(self, r: redis.Redis): await r.set("score:1", 8) await r.set("score:2", 3) await r.set("score:3", 5) await r.rpush("a", "3", "2", "1") assert await r.sort("a", by="score:*") == [b"2", b"3", b"1"] async def test_sort_get(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") assert await r.sort("a", get="user:*") == [b"u1", b"u2", b"u3"] async def test_sort_get_multi(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") assert await r.sort("a", get=("user:*", "#")) == [ b"u1", b"1", b"u2", b"2", b"u3", b"3", ] async def test_sort_get_groups_two(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") assert await r.sort("a", get=("user:*", "#"), groups=True) == [ (b"u1", b"1"), (b"u2", b"2"), (b"u3", b"3"), ] async def test_sort_groups_string_get(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") with pytest.raises(exceptions.DataError): await r.sort("a", get="user:*", groups=True) async def test_sort_groups_just_one_get(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") with pytest.raises(exceptions.DataError): await r.sort("a", get=["user:*"], groups=True) async def test_sort_groups_no_get(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.rpush("a", "2", "3", "1") with pytest.raises(exceptions.DataError): await r.sort("a", groups=True) async def test_sort_groups_three_gets(self, r: redis.Redis): await r.set("user:1", "u1") await r.set("user:2", "u2") await r.set("user:3", "u3") await r.set("door:1", "d1") await r.set("door:2", "d2") await r.set("door:3", "d3") await r.rpush("a", "2", "3", "1") assert await r.sort("a", get=("user:*", "door:*", "#"), groups=True) == [ (b"u1", b"d1", b"1"), (b"u2", b"d2", b"2"), (b"u3", b"d3", b"3"), ] async def test_sort_desc(self, r: redis.Redis): await r.rpush("a", "2", "3", "1") assert await r.sort("a", desc=True) == [b"3", b"2", b"1"] async def test_sort_alpha(self, r: redis.Redis): await r.rpush("a", "e", "c", "b", "d", "a") assert await r.sort("a", alpha=True) == [b"a", b"b", b"c", b"d", b"e"] async def test_sort_store(self, r: redis.Redis): await r.rpush("a", "2", "3", "1") assert await r.sort("a", store="sorted_values") == 3 assert await r.lrange("sorted_values", 0, -1) == [b"1", b"2", b"3"] async def test_sort_all_options(self, r: redis.Redis): await r.set("user:1:username", "zeus") await r.set("user:2:username", "titan") await r.set("user:3:username", "hermes") await r.set("user:4:username", "hercules") await r.set("user:5:username", "apollo") await r.set("user:6:username", "athena") await r.set("user:7:username", "hades") await r.set("user:8:username", "dionysus") await r.set("user:1:favorite_drink", "yuengling") await r.set("user:2:favorite_drink", "rum") await r.set("user:3:favorite_drink", "vodka") await r.set("user:4:favorite_drink", "milk") await r.set("user:5:favorite_drink", "pinot noir") await r.set("user:6:favorite_drink", "water") await r.set("user:7:favorite_drink", "gin") await r.set("user:8:favorite_drink", "apple juice") await r.rpush("gods", "5", "8", "3", "1", "2", "7", "6", "4") num = await r.sort( "gods", start=2, num=4, by="user:*:username", get="user:*:favorite_drink", desc=True, alpha=True, store="sorted", ) assert num == 4 assert await r.lrange("sorted", 0, 10) == [ b"vodka", b"milk", b"gin", b"apple juice", ] async def test_sort_issue_924(self, r: redis.Redis): # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 await r.execute_command("SADD", "issue#924", 1) await r.execute_command("SORT", "issue#924") async def test_cluster_addslots(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("ADDSLOTS", 1) is True async def test_cluster_count_failure_reports(self, mock_cluster_resp_int): assert isinstance( await mock_cluster_resp_int.cluster("COUNT-FAILURE-REPORTS", "node"), int ) async def test_cluster_countkeysinslot(self, mock_cluster_resp_int): assert isinstance( await mock_cluster_resp_int.cluster("COUNTKEYSINSLOT", 2), int ) async def test_cluster_delslots(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("DELSLOTS", 1) is True async def test_cluster_failover(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("FAILOVER", 1) is True async def test_cluster_forget(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("FORGET", 1) is True async def test_cluster_info(self, mock_cluster_resp_info): assert isinstance(await mock_cluster_resp_info.cluster("info"), dict) async def test_cluster_keyslot(self, mock_cluster_resp_int): assert isinstance(await mock_cluster_resp_int.cluster("keyslot", "asdf"), int) async def test_cluster_meet(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("meet", "ip", "port", 1) is True async def test_cluster_nodes(self, mock_cluster_resp_nodes): assert isinstance(await mock_cluster_resp_nodes.cluster("nodes"), dict) async def test_cluster_replicate(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("replicate", "nodeid") is True async def test_cluster_reset(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("reset", "hard") is True async def test_cluster_saveconfig(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.cluster("saveconfig") is True async def test_cluster_setslot(self, mock_cluster_resp_ok): assert ( await mock_cluster_resp_ok.cluster("setslot", 1, "IMPORTING", "nodeid") is True ) async def test_cluster_slaves(self, mock_cluster_resp_slaves): assert isinstance( await mock_cluster_resp_slaves.cluster("slaves", "nodeid"), dict ) @skip_if_server_version_lt("3.0.0") async def test_readwrite(self, r: redis.Redis): assert await r.readwrite() @skip_if_server_version_lt("3.0.0") async def test_readonly_invalid_cluster_state(self, r: redis.Redis): with pytest.raises(exceptions.RedisError): await r.readonly() @skip_if_server_version_lt("3.0.0") async def test_readonly(self, mock_cluster_resp_ok): assert await mock_cluster_resp_ok.readonly() is True # GEO COMMANDS @skip_if_server_version_lt("3.2.0") async def test_geoadd(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) assert await r.geoadd("barcelona", values) == 2 assert await r.zcard("barcelona") == 2 @skip_if_server_version_lt("3.2.0") async def test_geoadd_invalid_params(self, r: redis.Redis): with pytest.raises(exceptions.RedisError): await r.geoadd("barcelona", (1, 2)) @skip_if_server_version_lt("3.2.0") async def test_geodist(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) assert await r.geoadd("barcelona", values) == 2 assert await r.geodist("barcelona", "place1", "place2") == 3067.4157 @skip_if_server_version_lt("3.2.0") async def test_geodist_units(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.geodist("barcelona", "place1", "place2", "km") == 3.0674 @skip_if_server_version_lt("3.2.0") async def test_geodist_missing_one_member(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") await r.geoadd("barcelona", values) assert await r.geodist("barcelona", "place1", "missing_member", "km") is None @skip_if_server_version_lt("3.2.0") async def test_geodist_invalid_units(self, r: redis.Redis): with pytest.raises(exceptions.RedisError): assert await r.geodist("x", "y", "z", "inches") @skip_if_server_version_lt("3.2.0") async def test_geohash(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.geohash("barcelona", "place1", "place2", "place3") == [ "sp3e9yg3kd0", "sp3e9cbc3t0", None, ] @skip_if_server_version_lt("3.2.0") async def test_geopos(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) # redis uses 52 bits precision, hereby small errors may be introduced. assert await r.geopos("barcelona", "place1", "place2") == [ (2.19093829393386841, 41.43379028184083523), (2.18737632036209106, 41.40634178640635099), ] @skip_if_server_version_lt("4.0.0") async def test_geopos_no_value(self, r: redis.Redis): assert await r.geopos("barcelona", "place1", "place2") == [None, None] @skip_if_server_version_lt("3.2.0") @skip_if_server_version_gte("4.0.0") async def test_old_geopos_no_value(self, r: redis.Redis): assert await r.geopos("barcelona", "place1", "place2") == [] @skip_if_server_version_lt("3.2.0") async def test_georadius(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, b"\x80place2", ) await r.geoadd("barcelona", values) assert await r.georadius("barcelona", 2.191, 41.433, 1000) == [b"place1"] assert await r.georadius("barcelona", 2.187, 41.406, 1000) == [b"\x80place2"] @skip_if_server_version_lt("3.2.0") async def test_georadius_no_values(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.georadius("barcelona", 1, 2, 1000) == [] @skip_if_server_version_lt("3.2.0") async def test_georadius_units(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.georadius("barcelona", 2.191, 41.433, 1, unit="km") == [ b"place1" ] @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") async def test_georadius_with(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) # test a bunch of combinations to test the parse response # function. assert await r.georadius( "barcelona", 2.191, 41.433, 1, unit="km", withdist=True, withcoord=True, withhash=True, ) == [ [ b"place1", 0.0881, 3471609698139488, (2.19093829393386841, 41.43379028184083523), ] ] assert await r.georadius( "barcelona", 2.191, 41.433, 1, unit="km", withdist=True, withcoord=True ) == [[b"place1", 0.0881, (2.19093829393386841, 41.43379028184083523)]] assert await r.georadius( "barcelona", 2.191, 41.433, 1, unit="km", withhash=True, withcoord=True ) == [ [b"place1", 3471609698139488, (2.19093829393386841, 41.43379028184083523)] ] # test no values. assert ( await r.georadius( "barcelona", 2, 1, 1, unit="km", withdist=True, withcoord=True, withhash=True, ) == [] ) @skip_if_server_version_lt("3.2.0") async def test_georadius_count(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.georadius("barcelona", 2.191, 41.433, 3000, count=1) == [ b"place1" ] @skip_if_server_version_lt("3.2.0") async def test_georadius_sort(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) assert await r.georadius("barcelona", 2.191, 41.433, 3000, sort="ASC") == [ b"place1", b"place2", ] assert await r.georadius("barcelona", 2.191, 41.433, 3000, sort="DESC") == [ b"place2", b"place1", ] @skip_if_server_version_lt("3.2.0") async def test_georadius_store(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) await r.georadius("barcelona", 2.191, 41.433, 1000, store="places_barcelona") assert await r.zrange("places_barcelona", 0, -1) == [b"place1"] @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") async def test_georadius_store_dist(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, "place2", ) await r.geoadd("barcelona", values) await r.georadius( "barcelona", 2.191, 41.433, 1000, store_dist="places_barcelona" ) # instead of save the geo score, the distance is saved. assert await r.zscore("places_barcelona", "place1") == 88.05060698409301 @skip_unless_arch_bits(64) @skip_if_server_version_lt("3.2.0") async def test_georadiusmember(self, r: redis.Redis): values = (2.1909389952632, 41.433791470673, "place1") + ( 2.1873744593677, 41.406342043777, b"\x80place2", ) await r.geoadd("barcelona", values) assert await r.georadiusbymember("barcelona", "place1", 4000) == [ b"\x80place2", b"place1", ] assert await r.georadiusbymember("barcelona", "place1", 10) == [b"place1"] assert await r.georadiusbymember( "barcelona", "place1", 4000, withdist=True, withcoord=True, withhash=True ) == [ [ b"\x80place2", 3067.4157, 3471609625421029, (2.187376320362091, 41.40634178640635), ], [ b"place1", 0.0, 3471609698139488, (2.1909382939338684, 41.433790281840835), ], ] @skip_if_server_version_lt("5.0.0") async def test_xack(self, r: redis.Redis): stream = "stream" group = "group" consumer = "consumer" # xack on a stream that doesn't exist assert await r.xack(stream, group, "0-0") == 0 m1 = await r.xadd(stream, {"one": "one"}) m2 = await r.xadd(stream, {"two": "two"}) m3 = await r.xadd(stream, {"three": "three"}) # xack on a group that doesn't exist assert await r.xack(stream, group, m1) == 0 await r.xgroup_create(stream, group, 0) await r.xreadgroup(group, consumer, streams={stream: ">"}) # xack returns the number of ack'd elements assert await r.xack(stream, group, m1) == 1 assert await r.xack(stream, group, m2, m3) == 2 @skip_if_server_version_lt("5.0.0") async def test_xadd(self, r: redis.Redis): stream = "stream" message_id = await r.xadd(stream, {"foo": "bar"}) assert re.match(rb"[0-9]+\-[0-9]+", message_id) # explicit message id message_id = b"9999999999999999999-0" assert message_id == await r.xadd(stream, {"foo": "bar"}, id=message_id) # with maxlen, the list evicts the first message await r.xadd(stream, {"foo": "bar"}, maxlen=2, approximate=False) assert await r.xlen(stream) == 2 @skip_if_server_version_lt("5.0.0") async def test_xclaim(self, r: redis.Redis): stream = "stream" group = "group" consumer1 = "consumer1" consumer2 = "consumer2" message_id = await r.xadd(stream, {"john": "wick"}) message = await get_stream_message(r, stream, message_id) await r.xgroup_create(stream, group, 0) # trying to claim a message that isn't already pending doesn't # do anything response = await r.xclaim( stream, group, consumer2, min_idle_time=0, message_ids=(message_id,) ) assert response == [] # read the group as consumer1 to initially claim the messages await r.xreadgroup(group, consumer1, streams={stream: ">"}) # claim the message as consumer2 response = await r.xclaim( stream, group, consumer2, min_idle_time=0, message_ids=(message_id,) ) assert response[0] == message # reclaim the message as consumer1, but use the justid argument # which only returns message ids assert ( await r.xclaim( stream, group, consumer1, min_idle_time=0, message_ids=(message_id,), justid=True, ) == [message_id] ) @skip_if_server_version_lt("5.0.0") async def test_xclaim_trimmed(self, r: redis.Redis): # xclaim should not raise an exception if the item is not there stream = "stream" group = "group" await r.xgroup_create(stream, group, id="$", mkstream=True) # add a couple of new items sid1 = await r.xadd(stream, {"item": 0}) sid2 = await r.xadd(stream, {"item": 0}) # read them from consumer1 await r.xreadgroup(group, "consumer1", {stream: ">"}) # add a 3rd and trim the stream down to 2 items await r.xadd(stream, {"item": 3}, maxlen=2, approximate=False) # xclaim them from consumer2 # the item that is still in the stream should be returned item = await r.xclaim(stream, group, "consumer2", 0, [sid1, sid2]) assert len(item) == 2 assert item[0] == (None, None) assert item[1][0] == sid2 @skip_if_server_version_lt("5.0.0") async def test_xdel(self, r: redis.Redis): stream = "stream" # deleting from an empty stream doesn't do anything assert await r.xdel(stream, 1) == 0 m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) m3 = await r.xadd(stream, {"foo": "bar"}) # xdel returns the number of deleted elements assert await r.xdel(stream, m1) == 1 assert await r.xdel(stream, m2, m3) == 2 @skip_if_server_version_lt("5.0.0") async def test_xgroup_create(self, r: redis.Redis): # tests xgroup_create and xinfo_groups stream = "stream" group = "group" await r.xadd(stream, {"foo": "bar"}) # no group is setup yet, no info to obtain assert await r.xinfo_groups(stream) == [] assert await r.xgroup_create(stream, group, 0) expected = [ { "name": group.encode(), "consumers": 0, "pending": 0, "last-delivered-id": b"0-0", } ] assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("5.0.0") async def test_xgroup_create_mkstream(self, r: redis.Redis): # tests xgroup_create and xinfo_groups stream = "stream" group = "group" # an error is raised if a group is created on a stream that # doesn't already exist with pytest.raises(exceptions.ResponseError): await r.xgroup_create(stream, group, 0) # however, with mkstream=True, the underlying stream is created # automatically assert await r.xgroup_create(stream, group, 0, mkstream=True) expected = [ { "name": group.encode(), "consumers": 0, "pending": 0, "last-delivered-id": b"0-0", } ] assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("5.0.0") async def test_xgroup_delconsumer(self, r: redis.Redis): stream = "stream" group = "group" consumer = "consumer" await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) await r.xgroup_create(stream, group, 0) # a consumer that hasn't yet read any messages doesn't do anything assert await r.xgroup_delconsumer(stream, group, consumer) == 0 # read all messages from the group await r.xreadgroup(group, consumer, streams={stream: ">"}) # deleting the consumer should return 2 pending messages assert await r.xgroup_delconsumer(stream, group, consumer) == 2 @skip_if_server_version_lt("5.0.0") async def test_xgroup_destroy(self, r: redis.Redis): stream = "stream" group = "group" await r.xadd(stream, {"foo": "bar"}) # destroying a nonexistent group returns False assert not await r.xgroup_destroy(stream, group) await r.xgroup_create(stream, group, 0) assert await r.xgroup_destroy(stream, group) @skip_if_server_version_lt("5.0.0") async def test_xgroup_setid(self, r: redis.Redis): stream = "stream" group = "group" message_id = await r.xadd(stream, {"foo": "bar"}) await r.xgroup_create(stream, group, 0) # advance the last_delivered_id to the message_id await r.xgroup_setid(stream, group, message_id) expected = [ { "name": group.encode(), "consumers": 0, "pending": 0, "last-delivered-id": message_id, } ] assert await r.xinfo_groups(stream) == expected @skip_if_server_version_lt("5.0.0") async def test_xinfo_consumers(self, r: redis.Redis): stream = "stream" group = "group" consumer1 = "consumer1" consumer2 = "consumer2" await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) await r.xgroup_create(stream, group, 0) await r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1) await r.xreadgroup(group, consumer2, streams={stream: ">"}) info = await r.xinfo_consumers(stream, group) assert len(info) == 2 expected = [ {"name": consumer1.encode(), "pending": 1}, {"name": consumer2.encode(), "pending": 2}, ] # we can't determine the idle time, so just make sure it's an int assert isinstance(info[0].pop("idle"), int) assert isinstance(info[1].pop("idle"), int) assert info == expected @skip_if_server_version_lt("5.0.0") async def test_xinfo_stream(self, r: redis.Redis): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) info = await r.xinfo_stream(stream) assert info["length"] == 2 assert info["first-entry"] == await get_stream_message(r, stream, m1) assert info["last-entry"] == await get_stream_message(r, stream, m2) @skip_if_server_version_lt("5.0.0") async def test_xlen(self, r: redis.Redis): stream = "stream" assert await r.xlen(stream) == 0 await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) assert await r.xlen(stream) == 2 @skip_if_server_version_lt("5.0.0") async def test_xpending(self, r: redis.Redis): stream = "stream" group = "group" consumer1 = "consumer1" consumer2 = "consumer2" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) await r.xgroup_create(stream, group, 0) # xpending on a group that has no consumers yet expected = {"pending": 0, "min": None, "max": None, "consumers": []} assert await r.xpending(stream, group) == expected # read 1 message from the group with each consumer await r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1) await r.xreadgroup(group, consumer2, streams={stream: ">"}, count=1) expected = { "pending": 2, "min": m1, "max": m2, "consumers": [ {"name": consumer1.encode(), "pending": 1}, {"name": consumer2.encode(), "pending": 1}, ], } assert await r.xpending(stream, group) == expected @skip_if_server_version_lt("5.0.0") async def test_xpending_range(self, r: redis.Redis): stream = "stream" group = "group" consumer1 = "consumer1" consumer2 = "consumer2" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) await r.xgroup_create(stream, group, 0) # xpending range on a group that has no consumers yet assert await r.xpending_range(stream, group, min="-", max="+", count=5) == [] # read 1 message from the group with each consumer await r.xreadgroup(group, consumer1, streams={stream: ">"}, count=1) await r.xreadgroup(group, consumer2, streams={stream: ">"}, count=1) response = await r.xpending_range(stream, group, min="-", max="+", count=5) assert len(response) == 2 assert response[0]["message_id"] == m1 assert response[0]["consumer"] == consumer1.encode() assert response[1]["message_id"] == m2 assert response[1]["consumer"] == consumer2.encode() @skip_if_server_version_lt("5.0.0") async def test_xrange(self, r: redis.Redis): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) m3 = await r.xadd(stream, {"foo": "bar"}) m4 = await r.xadd(stream, {"foo": "bar"}) def get_ids(results): return [result[0] for result in results] results = await r.xrange(stream, min=m1) assert get_ids(results) == [m1, m2, m3, m4] results = await r.xrange(stream, min=m2, max=m3) assert get_ids(results) == [m2, m3] results = await r.xrange(stream, max=m3) assert get_ids(results) == [m1, m2, m3] results = await r.xrange(stream, max=m2, count=1) assert get_ids(results) == [m1] @skip_if_server_version_lt("5.0.0") async def test_xread(self, r: redis.Redis): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"bing": "baz"}) expected = [ [ stream.encode(), [ await get_stream_message(r, stream, m1), await get_stream_message(r, stream, m2), ], ] ] # xread starting at 0 returns both messages assert await r.xread(streams={stream: 0}) == expected expected = [ [ stream.encode(), [ await get_stream_message(r, stream, m1), ], ] ] # xread starting at 0 and count=1 returns only the first message assert await r.xread(streams={stream: 0}, count=1) == expected expected = [ [ stream.encode(), [ await get_stream_message(r, stream, m2), ], ] ] # xread starting at m1 returns only the second message assert await r.xread(streams={stream: m1}) == expected # xread starting at the last message returns an empty list assert await r.xread(streams={stream: m2}) == [] @skip_if_server_version_lt("5.0.0") async def test_xreadgroup(self, r: redis.Redis): stream = "stream" group = "group" consumer = "consumer" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"bing": "baz"}) await r.xgroup_create(stream, group, 0) expected = [ [ stream.encode(), [ await get_stream_message(r, stream, m1), await get_stream_message(r, stream, m2), ], ] ] # xread starting at 0 returns both messages assert await r.xreadgroup(group, consumer, streams={stream: ">"}) == expected await r.xgroup_destroy(stream, group) await r.xgroup_create(stream, group, 0) expected = [ [ stream.encode(), [ await get_stream_message(r, stream, m1), ], ] ] # xread with count=1 returns only the first message assert ( await r.xreadgroup(group, consumer, streams={stream: ">"}, count=1) == expected ) await r.xgroup_destroy(stream, group) # create the group using $ as the last id meaning subsequent reads # will only find messages added after this await r.xgroup_create(stream, group, "$") expected = [] # xread starting after the last message returns an empty message list assert await r.xreadgroup(group, consumer, streams={stream: ">"}) == expected # xreadgroup with noack does not have any items in the PEL await r.xgroup_destroy(stream, group) await r.xgroup_create(stream, group, "0") assert ( len( ( await r.xreadgroup( group, consumer, streams={stream: ">"}, noack=True ) )[0][1] ) == 2 ) # now there should be nothing pending assert ( len((await r.xreadgroup(group, consumer, streams={stream: "0"}))[0][1]) == 0 ) await r.xgroup_destroy(stream, group) await r.xgroup_create(stream, group, "0") # delete all the messages in the stream expected = [ [ stream.encode(), [ (m1, {}), (m2, {}), ], ] ] await r.xreadgroup(group, consumer, streams={stream: ">"}) await r.xtrim(stream, 0) assert await r.xreadgroup(group, consumer, streams={stream: "0"}) == expected @skip_if_server_version_lt("5.0.0") async def test_xrevrange(self, r: redis.Redis): stream = "stream" m1 = await r.xadd(stream, {"foo": "bar"}) m2 = await r.xadd(stream, {"foo": "bar"}) m3 = await r.xadd(stream, {"foo": "bar"}) m4 = await r.xadd(stream, {"foo": "bar"}) def get_ids(results): return [result[0] for result in results] results = await r.xrevrange(stream, max=m4) assert get_ids(results) == [m4, m3, m2, m1] results = await r.xrevrange(stream, max=m3, min=m2) assert get_ids(results) == [m3, m2] results = await r.xrevrange(stream, min=m3) assert get_ids(results) == [m4, m3] results = await r.xrevrange(stream, min=m2, count=1) assert get_ids(results) == [m4] @skip_if_server_version_lt("5.0.0") async def test_xtrim(self, r: redis.Redis): stream = "stream" # trimming an empty key doesn't do anything assert await r.xtrim(stream, 1000) == 0 await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) await r.xadd(stream, {"foo": "bar"}) # trimming an amount large than the number of messages # doesn't do anything assert await r.xtrim(stream, 5, approximate=False) == 0 # 1 message is trimmed assert await r.xtrim(stream, 3, approximate=False) == 1 async def test_bitfield_operations(self, r: redis.Redis): # comments show affected bits await r.execute_command("SELECT", 10) bf = r.bitfield("a") resp = await ( bf.set("u8", 8, 255) # 00000000 11111111 .get("u8", 0) # 00000000 .get("u4", 8) # 1111 .get("u4", 12) # 1111 .get("u4", 13) # 111 0 .execute() ) assert resp == [0, 0, 15, 15, 14] # .set() returns the previous value... resp = await ( bf.set("u8", 4, 1) # 0000 0001 .get("u16", 0) # 00000000 00011111 .set("u16", 0, 0) # 00000000 00000000 .execute() ) assert resp == [15, 31, 31] # incrby adds to the value resp = await ( bf.incrby("u8", 8, 254) # 00000000 11111110 .incrby("u8", 8, 1) # 00000000 11111111 .get("u16", 0) # 00000000 11111111 .execute() ) assert resp == [254, 255, 255] # Verify overflow protection works as a method: await r.delete("a") resp = await ( bf.set("u8", 8, 254) # 00000000 11111110 .overflow("fail") .incrby("u8", 8, 2) # incrby 2 would overflow, None returned .incrby("u8", 8, 1) # 00000000 11111111 .incrby("u8", 8, 1) # incrby 1 would overflow, None returned .get("u16", 0) # 00000000 11111111 .execute() ) assert resp == [0, None, 255, None, 255] # Verify overflow protection works as arg to incrby: await r.delete("a") resp = await ( bf.set("u8", 8, 255) # 00000000 11111111 .incrby("u8", 8, 1) # 00000000 00000000 wrap default .set("u8", 8, 255) # 00000000 11111111 .incrby("u8", 8, 1, "FAIL") # 00000000 11111111 fail .incrby("u8", 8, 1) # 00000000 11111111 still fail .get("u16", 0) # 00000000 11111111 .execute() ) assert resp == [0, 0, 0, None, None, 255] # test default default_overflow await r.delete("a") bf = r.bitfield("a", default_overflow="FAIL") resp = await ( bf.set("u8", 8, 255) # 00000000 11111111 .incrby("u8", 8, 1) # 00000000 11111111 fail default .get("u16", 0) # 00000000 11111111 .execute() ) assert resp == [0, None, 255] @skip_if_server_version_lt("4.0.0") async def test_memory_stats(self, r: redis.Redis): # put a key into the current db to make sure that "db.<current-db>" # has data await r.set("foo", "bar") stats = await r.memory_stats() assert isinstance(stats, dict) for key, value in stats.items(): if key.startswith("db."): assert isinstance(value, dict) @skip_if_server_version_lt("4.0.0") async def test_memory_usage(self, r: redis.Redis): await r.set("foo", "bar") assert isinstance(await r.memory_usage("foo"), int) @skip_if_server_version_lt("4.0.0") async def test_module_list(self, r: redis.Redis): assert isinstance(await r.module_list(), list) assert not await r.module_list() class TestBinarySave: async def test_binary_get_set(self, r: redis.Redis): assert await r.set(" foo bar ", "123") assert await r.get(" foo bar ") == b"123" assert await r.set(" foo\r\nbar\r\n ", "456") assert await r.get(" foo\r\nbar\r\n ") == b"456" assert await r.set(" \r\n\t\x07\x13 ", "789") assert await r.get(" \r\n\t\x07\x13 ") == b"789" assert sorted(await r.keys("*")) == [ b" \r\n\t\x07\x13 ", b" foo\r\nbar\r\n ", b" foo bar ", ] assert await r.delete(" foo bar ") assert await r.delete(" foo\r\nbar\r\n ") assert await r.delete(" \r\n\t\x07\x13 ") async def test_binary_lists(self, r: redis.Redis): mapping = { b"foo bar": [b"1", b"2", b"3"], b"foo\r\nbar\r\n": [b"4", b"5", b"6"], b"foo\tbar\x07": [b"7", b"8", b"9"], } # fill in lists for key, value in mapping.items(): await r.rpush(key, *value) # check that KEYS returns all the keys as they are assert sorted(await r.keys("*")) == sorted(mapping.keys()) # check that it is possible to get list content by key name for key, value in mapping.items(): assert await r.lrange(key, 0, -1) == value async def test_22_info(self, r: redis.Redis): """ Older Redis versions contained 'allocation_stats' in INFO that was the cause of a number of bugs when parsing. """ info = ( "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," ">=256=203" ) parsed = parse_info(info) assert "allocation_stats" in parsed assert "6" in parsed["allocation_stats"] assert ">=256" in parsed["allocation_stats"] async def test_large_responses(self, r: redis.Redis): """The PythonParser has some special cases for return values > 1MB""" # load up 5MB of data into a key data = "".join([ascii_letters] * (5000000 // len(ascii_letters))) await r.set("a", data) assert await r.get("a") == data.encode() async def test_floating_point_encoding(self, r: redis.Redis): """ High precision floating point values sent to the server should keep precision. """ timestamp = 1349673917.939762 await r.zadd("a", {"a1": timestamp}) assert await r.zscore("a", "a1") == timestamp
38.112349
88
0.549103
9fac0ae404f354f71ef4c0a964679fd6510e9e87
604
py
Python
tools/show-profile-results.py
aps-sids/zulip
54fd3219411c9051f5125b7ebd941e95898b8c5d
[ "Apache-2.0" ]
1
2015-09-28T09:50:11.000Z
2015-09-28T09:50:11.000Z
tools/show-profile-results.py
aps-sids/zulip
54fd3219411c9051f5125b7ebd941e95898b8c5d
[ "Apache-2.0" ]
7
2021-04-13T15:47:58.000Z
2022-02-11T03:46:47.000Z
tools/show-profile-results.py
aps-sids/zulip
54fd3219411c9051f5125b7ebd941e95898b8c5d
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import sys import pstats ''' This is a helper script to make it easy to show profile results after using a Python decorator. It's meant to be a simple example that you can hack on, or better yet, you can find more advanced tools for showing profiler results. ''' try: fn = sys.argv[1] except: print ''' Please supply a filename. (If you use the profiled decorator, the file will have a suffix of ".profile".) ''' sys.exit(1) p = pstats.Stats(fn) p.strip_dirs().sort_stats('cumulative').print_stats(25) p.strip_dirs().sort_stats('time').print_stats(25)
24.16
66
0.705298
c0147bcfdfdd8e79d3a34ecfeb6498411be1bc46
503
py
Python
pythran/tests/cases/clip2.py
SylvainCorlay/pythran
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
[ "BSD-3-Clause" ]
1
2018-03-24T00:33:03.000Z
2018-03-24T00:33:03.000Z
pythran/tests/cases/clip2.py
SylvainCorlay/pythran
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
[ "BSD-3-Clause" ]
null
null
null
pythran/tests/cases/clip2.py
SylvainCorlay/pythran
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
[ "BSD-3-Clause" ]
null
null
null
# pythran export clip(complex128[], float64), limit (complex128[]) # runas import numpy as np ; a = np.arange(2, dtype=complex); clip(a, .5), clip(a[2::4], .5) import numpy as np def limit (x, epsilon=1e-6): out = np.empty(shape=x.shape, dtype=x.dtype) mask1 = np.abs(x) < epsilon out[mask1] = 0 mask2 = np.logical_not(mask1) out[mask2] = x[mask2] / np.abs(x[mask2]) return out def clip (z, _max): mask = np.abs(z) > _max z[mask] = limit(z[mask]) * _max return z
26.473684
92
0.610338
86b44bbe3b709582b062b99d973c610eed27f565
5,579
py
Python
test/dlc_tests/eks/mxnet/inference/test_eks_mxnet_inference.py
Yixiao99/deep-learning-containers
01f078adf5abfb92e802b326511981bdd4a8c85c
[ "Apache-2.0" ]
1
2021-10-22T04:36:45.000Z
2021-10-22T04:36:45.000Z
test/dlc_tests/eks/mxnet/inference/test_eks_mxnet_inference.py
Yixiao99/deep-learning-containers
01f078adf5abfb92e802b326511981bdd4a8c85c
[ "Apache-2.0" ]
32
2021-06-10T21:21:29.000Z
2021-08-06T22:37:37.000Z
test/dlc_tests/eks/mxnet/inference/test_eks_mxnet_inference.py
Yixiao99/deep-learning-containers
01f078adf5abfb92e802b326511981bdd4a8c85c
[ "Apache-2.0" ]
1
2021-04-20T05:05:11.000Z
2021-04-20T05:05:11.000Z
import os import random import pytest from time import sleep from invoke import run import test.test_utils.eks as eks_utils import test.test_utils as test_utils @pytest.mark.model("resnet50") def test_eks_mxnet_neuron_inference(mxnet_inference, neuron_only): if "eia" in mxnet_inference or "neuron" not in mxnet_inference: pytest.skip("Skipping EKS Neuron Test for EIA and Non Neuron Images") num_replicas = "1" rand_int = random.randint(4001, 6000) processor = "neuron" server_cmd = "/usr/local/bin/entrypoint.sh -m mxnet-resnet50=https://aws-dlc-sample-models.s3.amazonaws.com/mxnet/Resnet50-neuron.mar -t /home/model-server/config.properties" yaml_path = os.path.join(os.sep, "tmp", f"mxnet_single_node_{processor}_inference_{rand_int}.yaml") inference_service_name = selector_name = f"resnet50-{processor}-{rand_int}" search_replace_dict = { "<NUM_REPLICAS>": num_replicas, "<SELECTOR_NAME>": selector_name, "<INFERENCE_SERVICE_NAME>": inference_service_name, "<DOCKER_IMAGE_BUILD_ID>": mxnet_inference, "<SERVER_CMD>": server_cmd } search_replace_dict["<NUM_INF1S>"] = "1" eks_utils.write_eks_yaml_file_from_template( eks_utils.get_single_node_inference_template_path("mxnet", processor), yaml_path, search_replace_dict ) try: run("kubectl apply -f {}".format(yaml_path)) port_to_forward = random.randint(49152, 65535) if eks_utils.is_service_running(selector_name): eks_utils.eks_forward_port_between_host_and_container(selector_name, port_to_forward, "8080") assert test_utils.request_mxnet_inference(port=port_to_forward, model="mxnet-resnet50") except ValueError as excp: eks_utils.LOGGER.error("Service is not running: %s", excp) finally: run("kubectl cluster-info dump") run(f"kubectl delete deployment {selector_name}") run(f"kubectl delete service {selector_name}") @pytest.mark.model("squeezenet") def test_eks_mxnet_squeezenet_inference(mxnet_inference): if "eia" in mxnet_inference or "neuron" in mxnet_inference: pytest.skip("Skipping EKS Test for EIA and neuron images") num_replicas = "1" rand_int = random.randint(4001, 6000) processor = "gpu" if "gpu" in mxnet_inference else "cpu" model = "squeezenet=https://s3.amazonaws.com/model-server/models/squeezenet_v1.1/squeezenet_v1.1.model" yaml_path = os.path.join(os.sep, "tmp", f"mxnet_single_node_{processor}_inference_{rand_int}.yaml") inference_service_name = selector_name = f"squeezenet-service-{rand_int}" search_replace_dict = { "<MODELS>": model, "<NUM_REPLICAS>": num_replicas, "<SELECTOR_NAME>": selector_name, "<INFERENCE_SERVICE_NAME>": inference_service_name, "<DOCKER_IMAGE_BUILD_ID>": mxnet_inference } if processor == "gpu": search_replace_dict["<NUM_GPUS>"] = "1" eks_utils.write_eks_yaml_file_from_template( eks_utils.get_single_node_inference_template_path("mxnet", processor), yaml_path, search_replace_dict ) try: run("kubectl apply -f {}".format(yaml_path)) port_to_forward = random.randint(49152, 65535) if eks_utils.is_service_running(selector_name): eks_utils.eks_forward_port_between_host_and_container(selector_name, port_to_forward, "8080") assert test_utils.request_mxnet_inference(port=port_to_forward) except ValueError as excp: eks_utils.LOGGER.error("Service is not running: %s", excp) finally: run(f"kubectl delete deployment {selector_name}") run(f"kubectl delete service {selector_name}") @pytest.mark.skip("Flaky test. Same test passes on EC2. Fails for gpu-inference for mx1.7. Refer: https://github.com/aws/deep-learning-containers/issues/587") @pytest.mark.integration("gluonnlp") @pytest.mark.model("bert_sst") def test_eks_mxnet_gluonnlp_inference(mxnet_inference, py3_only): if "eia" in mxnet_inference: pytest.skip("Skipping EKS Test for EIA") num_replicas = "1" rand_int = random.randint(4001, 6000) processor = "gpu" if "gpu" in mxnet_inference else "cpu" model = "https://aws-dlc-sample-models.s3.amazonaws.com/bert_sst/bert_sst.mar" yaml_path = os.path.join(os.sep, "tmp", f"mxnet_single_node_gluonnlp_{processor}_inference_{rand_int}.yaml") inference_service_name = selector_name = f"gluonnlp-service-{processor}-{rand_int}" search_replace_dict = { "<MODELS>": model, "<NUM_REPLICAS>": num_replicas, "<SELECTOR_NAME>": selector_name, "<INFERENCE_SERVICE_NAME>": inference_service_name, "<DOCKER_IMAGE_BUILD_ID>": mxnet_inference } if processor == "gpu": search_replace_dict["<NUM_GPUS>"] = "1" eks_utils.write_eks_yaml_file_from_template( eks_utils.get_single_node_inference_template_path("mxnet", processor), yaml_path, search_replace_dict ) try: run("kubectl apply -f {}".format(yaml_path)) port_to_forward = random.randint(49152, 65535) if eks_utils.is_service_running(selector_name): eks_utils.eks_forward_port_between_host_and_container(selector_name, port_to_forward, "8080") assert test_utils.request_mxnet_inference_gluonnlp(port=port_to_forward) except ValueError as excp: eks_utils.LOGGER.error("Service is not running: %s", excp) finally: run(f"kubectl delete deployment {selector_name}") run(f"kubectl delete service {selector_name}")
37.952381
178
0.714823
629e4edc04f530f036cf2fa23564ea9d65f7cf92
2,600
py
Python
dashboard/views.py
Attila-Sasvari/django_blog
7aea29932b62d0c46ba1963a3685b6320730a73e
[ "MIT", "Unlicense" ]
null
null
null
dashboard/views.py
Attila-Sasvari/django_blog
7aea29932b62d0c46ba1963a3685b6320730a73e
[ "MIT", "Unlicense" ]
null
null
null
dashboard/views.py
Attila-Sasvari/django_blog
7aea29932b62d0c46ba1963a3685b6320730a73e
[ "MIT", "Unlicense" ]
null
null
null
from django.shortcuts import get_object_or_404, render, redirect from django.http import HttpResponse, HttpResponseRedirect from django.contrib.auth.decorators import login_required from django.db.models import Avg, Max, Min, Q, Count, Sum, F from blog.models import Blog, BlogCounts from django.contrib.auth.models import User from django.db.models.functions import TruncDate from datetime import datetime from django.contrib import messages from .models import DailyStats from django.http import JsonResponse def dashboard(request): articles_count = Blog.objects.count() oldest_article = Blog.objects.aggregate(Min('created_at')) authors_count = Blog.objects.aggregate(Count('author', distinct=True)) read_number_stats = BlogCounts.objects.aggregate( Avg('read_number'), Max('read_number'), Min('read_number'), Sum('read_number')) articles_by_day = Blog.objects\ .annotate(created_day=TruncDate('created_at'))\ .values('created_day')\ .annotate(article_count=Count('created_day'))\ .order_by() highest_read = Blog.objects.order_by('-blogcounts__read_number')[:3] context = { "articles_count": articles_count, "articles_by_day": articles_by_day, "authors_count": authors_count, "oldest_article": oldest_article, "read_number_stats": read_number_stats, "highest_read": highest_read } return render(request, 'dashboard.html', context) def update_daily(request): if request.method == 'POST': articles_count = Blog.objects.count() authors_count = Blog.objects.aggregate(Count('author', distinct=True)) read_number_stats = BlogCounts.objects.aggregate( Avg('read_number'), Max('read_number'), Min('read_number'), Sum('read_number')) print(read_number_stats) """ highest_read_title = Blog.objects.order_by( '-blogcounts__read_number')[:1]["title"] """ highest_read_title = "valamilyen" daily_stats_obj = DailyStats(articles_count=articles_count, authors_count=authors_count["author__count"], highest_read_title=highest_read_title, read_number_avg=read_number_stats["read_number__avg"], read_number_max=read_number_stats["read_number__max"], read_number_min=read_number_stats["read_number__min"], read_number_sum=read_number_stats["read_number__sum"]) daily_stats_obj.save() return JsonResponse({'message': 'Daily Stats have been saved to database.'})
38.235294
146
0.698846
c5b9267f47a5afd909a23cddedd9ff39419b7ceb
3,655
py
Python
tests/thanks_tests.py
LeesahMasko/piwikibot
024af387ff48c21526ee206541178157d2653ddc
[ "MIT" ]
null
null
null
tests/thanks_tests.py
LeesahMasko/piwikibot
024af387ff48c21526ee206541178157d2653ddc
[ "MIT" ]
6
2021-02-27T03:35:42.000Z
2021-03-07T22:17:40.000Z
tests/thanks_tests.py
LeesahMasko/piwikibot
024af387ff48c21526ee206541178157d2653ddc
[ "MIT" ]
null
null
null
"""Tests for thanks-related code.""" # # (C) Pywikibot team, 2016-2021 # # Distributed under the terms of the MIT license. # import unittest from contextlib import suppress from pywikibot.page import Page, User from tests.aspects import TestCase NO_THANKABLE_REVS = 'There is no recent change which can be test thanked.' class TestThankRevision(TestCase): """Test thanks for revisions.""" family = 'wikipedia' code = 'test' write = True def test_thank_revision(self): """Test thanks for normal revisions. NOTE: This test relies on activity in recentchanges, and there must make edits made before reruns of this test. Please see https://phabricator.wikimedia.org/T137836. """ site = self.get_site() data = site.recentchanges(total=20) for rev in data: revid = rev['revid'] username = rev['user'] user = User(site, username) if user.is_thankable: break else: self.skipTest(NO_THANKABLE_REVS) before_time = site.getcurrenttimestamp() site.thank_revision(revid, source='pywikibot test') log_entries = site.logevents(logtype='thanks', total=5, page=user, start=before_time, reverse=True) try: next(iter(log_entries)) except StopIteration: found_log = False else: found_log = True self.assertTrue(found_log) def test_self_thank(self): """Test that thanking oneself causes an error. This test is not in TestThankRevisionErrors because it may require making a successful edit in order to test the API call thanking the user running the test. """ site = self.get_site() my_name = self.get_userpage().username data = site.usercontribs(user=my_name, total=1) for rev in data: revid = rev['revid'] break else: test_page = Page(site, 'Pywikibot Thanks test') test_page.text += '* ~~~~\n' test_page.save('Pywikibot Thanks test') revid = test_page.latest_revision_id self.assertAPIError('invalidrecipient', None, site.thank_revision, revid, source='pywikibot test') class TestThankRevisionErrors(TestCase): """Test errors when thanking revisions.""" family = 'wikipedia' code = 'test' write = -1 def test_bad_recipient(self): """Test that thanking a bad recipient causes an error.""" site = self.get_site() data = site.recentchanges(total=20) for rev in data: revid = rev['revid'] username = rev['user'] user = User(site, username) if not user.is_thankable: break else: self.skipTest(NO_THANKABLE_REVS) self.assertAPIError('invalidrecipient', None, site.thank_revision, revid, source='pywikibot test') def test_invalid_revision(self): """Test that passing an invalid revision ID causes an error.""" site = self.get_site() invalid_revids = (0, -1, 0.99, 'zero, minus one, and point nine nine', (0, -1, 0.99), [0, -1, 0.99]) for invalid_revid in invalid_revids: self.assertAPIError('invalidrevision', None, site.thank_revision, invalid_revid, source='pywikibot test') if __name__ == '__main__': # pragma: no cover with suppress(SystemExit): unittest.main()
31.508621
78
0.592886
12dd18931e52642d1cd15b8d2b313f2b15fc35f8
76
py
Python
python/test/lotto645.py
gangserver/py_test
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
[ "Apache-2.0" ]
null
null
null
python/test/lotto645.py
gangserver/py_test
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
[ "Apache-2.0" ]
null
null
null
python/test/lotto645.py
gangserver/py_test
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
[ "Apache-2.0" ]
null
null
null
from scipy.stats import randint rgen = randint(1, 45) print(rgen.rvs(6))
10.857143
31
0.710526
5b70f4479d64adf2ce8498dd7d210465a89e939a
12,204
py
Python
great_expectations/datasource/data_connector/configured_asset_sql_data_connector.py
Gfeuillen/great_expectations
9c1513ad8ffa92a26a0fd37bf5a4ef258f7e89a0
[ "Apache-2.0" ]
null
null
null
great_expectations/datasource/data_connector/configured_asset_sql_data_connector.py
Gfeuillen/great_expectations
9c1513ad8ffa92a26a0fd37bf5a4ef258f7e89a0
[ "Apache-2.0" ]
null
null
null
great_expectations/datasource/data_connector/configured_asset_sql_data_connector.py
Gfeuillen/great_expectations
9c1513ad8ffa92a26a0fd37bf5a4ef258f7e89a0
[ "Apache-2.0" ]
null
null
null
from copy import deepcopy from typing import Dict, List, Optional from great_expectations.core.batch import ( BatchDefinition, BatchRequest, BatchSpec, IDDict, ) from great_expectations.core.batch_spec import SqlAlchemyDatasourceBatchSpec from great_expectations.datasource.data_connector.data_connector import DataConnector from great_expectations.datasource.data_connector.util import ( batch_definition_matches_batch_request, ) from great_expectations.execution_engine import ExecutionEngine try: import sqlalchemy as sa except ImportError: sa = None class ConfiguredAssetSqlDataConnector(DataConnector): """ A DataConnector that requires explicit listing of SQL tables you want to connect to. Args: name (str): The name of this DataConnector datasource_name (str): The name of the Datasource that contains it execution_engine (ExecutionEngine): An ExecutionEngine data_assets (str): data_assets batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec """ def __init__( self, name: str, datasource_name: str, execution_engine: Optional[ExecutionEngine] = None, data_assets: Optional[Dict[str, dict]] = None, batch_spec_passthrough: Optional[dict] = None, ): if data_assets is None: data_assets = {} self._data_assets = data_assets super().__init__( name=name, datasource_name=datasource_name, execution_engine=execution_engine, batch_spec_passthrough=batch_spec_passthrough, ) @property def data_assets(self) -> Dict[str, dict]: return self._data_assets def add_data_asset( self, name: str, config: dict, ): """ Add data_asset to DataConnector using data_asset name as key, and data_asset configuration as value. """ self._data_assets[name] = config def _get_batch_identifiers_list_from_data_asset_config( self, data_asset_name, data_asset_config, ): if "table_name" in data_asset_config: table_name = data_asset_config["table_name"] else: table_name = data_asset_name if "splitter_method" in data_asset_config: splitter_fn = getattr(self, data_asset_config["splitter_method"]) split_query = splitter_fn( table_name=table_name, **data_asset_config["splitter_kwargs"] ) rows = self._execution_engine.engine.execute(split_query).fetchall() # Zip up split parameters with column names column_names = self._get_column_names_from_splitter_kwargs( data_asset_config["splitter_kwargs"] ) batch_identifiers_list = [dict(zip(column_names, row)) for row in rows] else: batch_identifiers_list = [{}] return batch_identifiers_list def _refresh_data_references_cache(self): self._data_references_cache = {} for data_asset_name in self.data_assets: data_asset = self.data_assets[data_asset_name] batch_identifiers_list = ( self._get_batch_identifiers_list_from_data_asset_config( data_asset_name, data_asset, ) ) # TODO Abe 20201029 : Apply sorters to batch_identifiers_list here # TODO Will 20201102 : add sorting code here self._data_references_cache[data_asset_name] = batch_identifiers_list def _get_column_names_from_splitter_kwargs(self, splitter_kwargs) -> List[str]: column_names: List[str] = [] if "column_names" in splitter_kwargs: column_names = splitter_kwargs["column_names"] elif "column_name" in splitter_kwargs: column_names = [splitter_kwargs["column_name"]] return column_names def get_available_data_asset_names(self) -> List[str]: """ Return the list of asset names known by this DataConnector. Returns: A list of available names """ return list(self.data_assets.keys()) def get_unmatched_data_references(self) -> List[str]: """ Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache and returning data_reference that do not have an associated data_asset. Returns: list of data_references that are not matched by configuration. """ return [] def get_batch_definition_list_from_batch_request(self, batch_request: BatchRequest): self._validate_batch_request(batch_request=batch_request) if len(self._data_references_cache) == 0: self._refresh_data_references_cache() batch_definition_list: List[BatchDefinition] = [] try: sub_cache = self._data_references_cache[batch_request.data_asset_name] except KeyError as e: raise KeyError( f"data_asset_name {batch_request.data_asset_name} is not recognized." ) for batch_identifiers in sub_cache: batch_definition: BatchDefinition = BatchDefinition( datasource_name=self.datasource_name, data_connector_name=self.name, data_asset_name=batch_request.data_asset_name, batch_identifiers=IDDict(batch_identifiers), batch_spec_passthrough=batch_request.batch_spec_passthrough, ) if batch_definition_matches_batch_request(batch_definition, batch_request): batch_definition_list.append(batch_definition) return batch_definition_list def _get_data_reference_list_from_cache_by_data_asset_name( self, data_asset_name: str ) -> List[str]: return self._data_references_cache[data_asset_name] def _map_data_reference_to_batch_definition_list( self, data_reference, data_asset_name: Optional[str] = None #: Any, ) -> Optional[List[BatchDefinition]]: # Note: This is a bit hacky, but it works. In sql_data_connectors, data references *are* dictionaries, # allowing us to invoke `IDDict(data_reference)` return [ BatchDefinition( datasource_name=self.datasource_name, data_connector_name=self.name, data_asset_name=data_asset_name, batch_identifiers=IDDict(data_reference), ) ] def build_batch_spec( self, batch_definition: BatchDefinition ) -> SqlAlchemyDatasourceBatchSpec: """ Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition """ data_asset_name: str = batch_definition.data_asset_name if ( data_asset_name in self.data_assets and self.data_assets[data_asset_name].get("batch_spec_passthrough") and isinstance( self.data_assets[data_asset_name].get("batch_spec_passthrough"), dict ) ): # batch_spec_passthrough from data_asset batch_spec_passthrough = deepcopy( self.data_assets[data_asset_name]["batch_spec_passthrough"] ) batch_definition_batch_spec_passthrough = ( deepcopy(batch_definition.batch_spec_passthrough) or {} ) # batch_spec_passthrough from Batch Definition supercedes batch_spec_passthrough from data_asset batch_spec_passthrough.update(batch_definition_batch_spec_passthrough) batch_definition.batch_spec_passthrough = batch_spec_passthrough batch_spec: BatchSpec = super().build_batch_spec( batch_definition=batch_definition ) return SqlAlchemyDatasourceBatchSpec(batch_spec) def _generate_batch_spec_parameters_from_batch_definition( self, batch_definition: BatchDefinition ) -> dict: """ Build BatchSpec parameters from batch_definition with the following components: 1. data_asset_name from batch_definition 2. batch_identifiers from batch_definition 3. data_asset from data_connector Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: dict built from batch_definition """ data_asset_name: str = batch_definition.data_asset_name return { "data_asset_name": data_asset_name, "table_name": data_asset_name, "batch_identifiers": batch_definition.batch_identifiers, **self.data_assets[data_asset_name], } # Splitter methods for listing partitions def _split_on_whole_table( self, table_name: str, ): """ 'Split' by returning the whole table Note: the table_name parameter is a required to keep the signature of this method consistent with other methods. """ return sa.select([sa.true()]) def _split_on_column_value( self, table_name: str, column_name: str, ): """Split using the values in the named column""" # query = f"SELECT DISTINCT(\"{self.column_name}\") FROM {self.table_name}" return sa.select([sa.func.distinct(sa.column(column_name))]).select_from( sa.text(table_name) ) def _split_on_converted_datetime( self, table_name: str, column_name: str, date_format_string: str = "%Y-%m-%d", ): """Convert the values in the named column to the given date_format, and split on that""" # query = f"SELECT DISTINCT( strftime(\"{date_format_string}\", \"{self.column_name}\")) as my_var FROM {self.table_name}" return sa.select( [ sa.func.distinct( sa.func.strftime( date_format_string, sa.column(column_name), ) ) ] ).select_from(sa.text(table_name)) def _split_on_divided_integer( self, table_name: str, column_name: str, divisor: int ): """Divide the values in the named column by `divisor`, and split on that""" # query = f"SELECT DISTINCT(\"{self.column_name}\" / {divisor}) AS my_var FROM {self.table_name}" return sa.select( [sa.func.distinct(sa.cast(sa.column(column_name) / divisor, sa.Integer))] ).select_from(sa.text(table_name)) def _split_on_mod_integer(self, table_name: str, column_name: str, mod: int): """Divide the values in the named column by `divisor`, and split on that""" # query = f"SELECT DISTINCT(\"{self.column_name}\" / {divisor}) AS my_var FROM {self.table_name}" return sa.select( [sa.func.distinct(sa.cast(sa.column(column_name) % mod, sa.Integer))] ).select_from(sa.text(table_name)) def _split_on_multi_column_values( self, table_name: str, column_names: List[str], ): """Split on the joint values in the named columns""" # query = f"SELECT DISTINCT(\"{self.column_name}\") FROM {self.table_name}" return ( sa.select([sa.column(column_name) for column_name in column_names]) .distinct() .select_from(sa.text(table_name)) ) def _split_on_hashed_column( self, table_name: str, column_name: str, hash_digits: int, ): """Note: this method is experimental. It does not work with all SQL dialects.""" # query = f"SELECT MD5(\"{self.column_name}\") = {matching_hash}) AS hashed_var FROM {self.table_name}" return sa.select([sa.func.md5(sa.column(column_name))]).select_from( sa.text(table_name) )
36
130
0.643559
15fda50cf1926ad81468beac9b37f60c12b3a4e9
16,982
py
Python
sparsecontainer.py
asaluja/cca-mt
c57fcdd5cff9550bcde81d861b004f254d1b22bf
[ "Apache-2.0" ]
null
null
null
sparsecontainer.py
asaluja/cca-mt
c57fcdd5cff9550bcde81d861b004f254d1b22bf
[ "Apache-2.0" ]
null
null
null
sparsecontainer.py
asaluja/cca-mt
c57fcdd5cff9550bcde81d861b004f254d1b22bf
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python -tt ''' File: sparsecontainer.py (formerly eigentypepy) Date: December 25, 2014 (major changes: January 19, 2015) Description: contains SparseContainer, SparseContext (child of SparseContainer), and ContextExtractor classes. ''' import sys, commands, string, gc import numpy as np import scipy.sparse as sp ''' base class for eigenphrase and eigencontext classes (below). Contains in-built OOV handling, with the cut-off as an argument to the constructor. Types with frequency <= cutoff will be aggregated into an <unk> token with a parameter based on the aggregated <= cutoff count types. Eigentype and its child class eigencontext are meant for feature extraction and should contain sparse token matrices (as well as the means to construct them), OOV information, and a dictionary that maps between type strings and IDs ''' class SparseContainer(object): def __init__(self, oov_cutoff, estimate_oov_param): self.type_id_map = {} #read/write DS self.id_type_map = {} #held-out eval self._rows = [] self._cols = [] self._vals = [] self._cutoff = oov_cutoff self._oov_map = {} self._counter = 0 self.token_matrix = None oov_id = 0 if estimate_oov_param else -1 #OOV assigned ID 0, or -1 if OOV not being used self.type_id_map["<unk> ||| <unk>"] = oov_id self.id_type_map[oov_id] = "<unk> ||| <unk>" self.estimate_oov = estimate_oov_param def add_token(self, token, excluded_tokens): if token in self.type_id_map: #features make it here only after they exceed the oov cutoff, so we increment as normal self._rows.append(self._counter) self._cols.append(self.type_id_map[token]) self._vals.append(1.) else: #either token is in oov map or this is the first time we've seen this token count, row_idxs = self._oov_map[token] if token in self._oov_map else (0, []) #pull up count and row_idxs, else assign init count += 1 #add current token row_idxs.append(self._counter) if count > self._cutoff: #if cut off exceeded, add to seen tokens token_id = -1 if token in excluded_tokens: #if phrase pair is in excluded pairs token_id = self.type_id_map["<unk> ||| <unk>"] #either OOV if estimating OOV param or -1 else: #seen enough times and not in excluded pairs token_id = len(self.type_id_map) if self.estimate_oov else len(self.type_id_map)-1 self.type_id_map[token] = token_id #and add to observed features self.id_type_map[token_id] = token if token_id > -1: #meeans we are estimating OOV param or the phrase pair is valid and we add it for row_idx in row_idxs: self._rows.append(row_idx) self._cols.append(token_id) self._vals.append(1.) self._oov_map.pop(token, None) #and remove from potential OOV features (returns None if not there) else: #otherwise, add to OOV map self._oov_map[token] = (count, row_idxs) self._counter += 1 ''' creates scipy csr matrix based on collected features, and also handles OOVs To do: should we filter zero_rows here and then return? ''' def create_sparse_matrix(self): if self.estimate_oov: if len(self._oov_map) > 0: #there is at least one feature that we have seen <= self.cut_off times print "Number of phrase pair types <= oov cut-off %d: %d"%(self._cutoff, len(self._oov_map)) oov_feat_id = self.type_id_map["<unk> ||| <unk>"] for low_freq_token in self._oov_map: #loop through and add indicator to <unk> (pos-dep if flagged) count, row_idxs = self._oov_map[low_freq_token] for row_idx in row_idxs: self._rows.append(row_idx) self._cols.append(oov_feat_id) self._vals.append(1.) self.token_matrix = sp.csr_matrix((self._vals, (self._rows, self._cols)), shape = (self._counter, len(self.type_id_map))) else: #don't want to include OOV entry in token matrix self.token_matrix = sp.csr_matrix((self._vals, (self._rows, self._cols)), shape = (self._counter, len(self.type_id_map)-1)) def filter_zero_rows(self): assert self.token_matrix is not None rows = [] cols = [] vals = [] counter = 0 zero_rows = [] for row_idx in xrange(self.token_matrix.shape[0]): dummy, row_cols = self.token_matrix[row_idx,:].nonzero() if len(row_cols) > 0: #not a zero row for row_col in row_cols: rows.append(counter) cols.append(row_col) vals.append(1.) counter += 1 else: zero_rows.append(row_idx) self.token_matrix = sp.csr_matrix((vals, (rows, cols)), shape = (counter, len(self.type_id_map)-1)) return zero_rows def get_type_map(self): return self.type_id_map def get_token_matrix(self, subset_idxs=None): if self.token_matrix is not None: if subset_idxs is None: return self.token_matrix else: return self.token_matrix[subset_idxs,:] else: sys.stderr.write("ERROR! Token matrix has not been estimated or set; returning None\n") return None def get_token_phrase(self, token_id): if token_id in self.id_type_map: return self.id_type_map[token_id] else: sys.stderr.write("ERROR! Provided ID not in map!\n") sys.exit() class SparseContext(SparseContainer): def __init__(self, oov_cutoff = 0): super(SparseContext, self).__init__(oov_cutoff, True) #initializing the parent parameters oov_id = self.type_id_map.pop("<unk> ||| <unk>", None) #SparseContext will handle its own OOVs (when constructing sparse matrix) assert oov_id is not None self.id_type_map.pop(oov_id, None) def add_token(self, features): for feature in features: if feature in self.type_id_map: #features make it here only after they exceed the oov cutoff, so we increment as normal self._rows.append(self._counter) self._cols.append(self.type_id_map[feature]) self._vals.append(1.) else: #either feature is in oov map or this is the first time we've seen this feature count, row_idxs = self._oov_map[feature] if feature in self._oov_map else (0, []) #pull up count and row_idxs, else assign init count += 1 row_idxs.append(self._counter) if count > self._cutoff: #if cut off exceeded, add to seen features feature_id = len(self.type_id_map) self.type_id_map[feature] = feature_id #and add to observed features self.id_type_map[feature_id] = feature for row_idx in row_idxs: self._rows.append(row_idx) self._cols.append(feature_id) self._vals.append(1.) self._oov_map.pop(feature, None) #and remove from potential OOV features (returns None if not there) else: #otherwise, add to OOV features self._oov_map[feature] = (count, row_idxs) self._counter += 1 ''' ignores OOVs, etc. ''' def add_token_vec(self, rep): self._vals.append(rep) def create_dense_matrix(self): stacked_features = np.vstack(self._vals) mean_center_vec = np.divide(stacked_features.sum(axis=0), stacked_features.shape[0]) self.token_matrix = stacked_features - mean_center_vec print "Created dense matrix from word vectors" del self._vals gc.collect() def create_sparse_matrix(self, pos_depend, con_length): if len(self._oov_map) > 0: #there is at least one feature that we have seen <= self.cut_off times print "Number of context types <= oov cut-off %d: %d"%(self._cutoff, len(self._oov_map)) oov_feat_id = len(self.type_id_map) #corresponds to id of new OOV feat if pos_depend: for dist in range(con_length): #add position-dependent OOVs key = "<unk>_dist%d"%(dist+1) self.type_id_map[key] = oov_feat_id oov_feat_id = len(self.type_id_map) #update oov_feat_id for next iteration - otherwise it is unchanged else: self.type_id_map["<unk>"] = oov_feat_id #unk is position-independent for low_freq_token in self._oov_map: #loop through and add indicator to <unk> (pos-dep if flagged) count, row_idxs = self._oov_map[low_freq_token] if pos_depend: #update oov_feat_id oov_key = "<unk>_dist" + low_freq_token.split('_dist')[1] oov_feat_id = self.type_id_map[oov_key] for row_idx in row_idxs: self._rows.append(row_idx) self._cols.append(oov_feat_id) self._vals.append(1.) self.token_matrix = sp.csr_matrix((self._vals, (self._rows, self._cols)), shape = (self._counter, len(self.type_id_map))) print "Created sparse matrix from counts" def filter_zero_rows(self, zero_rows): rows = [] cols = [] vals = [] counter = 0 zero_rows = set(zero_rows) #testing membership is O(1) in set original_size = self.token_matrix.shape[0] for row_idx in xrange(original_size): if row_idx not in zero_rows: dummy, row_cols = self.token_matrix[row_idx,:].nonzero() assert len(row_cols) > 0 for row_col in row_cols: rows.append(counter) cols.append(row_col) vals.append(1.) counter += 1 assert counter + len(zero_rows) == original_size self.token_matrix = sp.csr_matrix((vals, (rows, cols)), shape = (counter, len(self.type_id_map))) class ContextExtractor: '''constructor that takes into account context window size, position dependence, and any filtering required''' def __init__(self, con_length, pos_depend, stop_words, topN_features, vecs_filename): self.con_length = con_length self.pos_depend = pos_depend self.vec_dim = 0 self.rep_dict = None if vecs_filename != "": #reads in word vectors if defined self.rep_dict = self.read_vectors(vecs_filename) self.vec_dim = len(self.rep_dict[self.rep_dict.keys()[0]]) self.filter_stop = stop_words != "" self.stop_words = [] self.filter_features = topN_features != "" self.freq_features = [] if self.filter_stop: #read in stop words sw_fh = open(stop_words, 'rb') for line in sw_fh: self.stop_words.append(line.strip()) sw_fh.close() if self.filter_features: #read in most frequent words freq_fh = open(topN_features, 'rb') for line in freq_fh: self.freq_features.append(line.strip()) freq_fh.close() def read_vectors(self, filename): fh = open(filename, 'r') vecs = {} for line in fh: if len(line.strip().split()) > 2: word = line.strip().split()[0] rep = np.array([float(i) for i in line.strip().split()[1:]]) vec_len = np.linalg.norm(rep) vecs[word] = np.divide(rep, vec_len) if vec_len > 0 else np.zeros(len(rep)) fh.close() return vecs def is_repvec(self): return self.vec_dim > 0 ''' function that extracts the context of a phrase (given left and right indices of its span) from a sentence provided as a list. The function does the required checks if stop-word filtering is enabled and/or a provided list of the most frequent words is provided, in which case we restrict our feature space to those words, and any words outside the list are replaced with <OTHER>. Note that this function may return empty lists for the left/right contexts if all con_length context words are stop words. ''' def extract_context(self, sentence_items, left_idx, right_idx): left_con_idx = left_idx - self.con_length left_con_words = [] while left_con_idx < left_idx: if left_con_idx < 0: word_to_add = np.zeros((self.vec_dim,)) if self.vec_dim > 0 else "<s>" left_con_words.append(word_to_add) left_con_idx = 0 #skip to beginning of sentence else: context_word = sentence_items[left_con_idx] if not self.filter_stop or not context_word in self.stop_words: #if not checking for sw, or if checking, context is not in sw if self.filter_features and context_word not in self.freq_features: #if checking for listed featureds and context not in list context_word = "<OTHER>" #then replace with other --> assumption is filter_features and wordvecs cannot be both true if self.pos_depend and self.vec_dim == 0: #decorate with distance from word if position dependent and we're not using wordvecs context_word += "_dist%d"%(left_idx-left_con_idx) if self.vec_dim > 0: #using word vectors rep = self.rep_dict[context_word] if context_word in self.rep_dict else np.zeros((self.vec_dim,)) context_word = rep left_con_words.append(context_word) left_con_idx += 1 #skips if its a stop word right_con_idx = right_idx + 1 right_con_words = [] while right_con_idx < right_idx + self.con_length + 1: #right side; symmetric to left if right_con_idx >= len(sentence_items): word_to_add = self.rep_dict["</s>"] if self.vec_dim > 0 else "</s>" right_con_words.append(word_to_add) break else: context_word = sentence_items[right_con_idx] if not self.filter_stop or not context_word in self.stop_words: if self.filter_features and context_word not in self.freq_features: context_word = "<OTHER>" if self.pos_depend and self.vec_dim == 0: context_word += "_dist%d"%(right_con_idx-right_idx) if self.vec_dim > 0: rep = self.rep_dict[context_word] if context_word in self.rep_dict else np.zeros((self.vec_dim,)) context_word = rep right_con_words.append(context_word) right_con_idx += 1 if self.vec_dim > 0 and self.pos_depend: #concatenate the vectors if len(left_con_words) < self.con_length: #check if we need to zero-pad diff = self.con_length - len(left_con_words) for dummy in range(diff): left_con_words.append(np.zeros((self.vec_dim,))) concat_left = np.hstack(left_con_words) if len(right_con_words) < self.con_length: #correspondingly on right diff = self.con_length - len(right_con_words) for dummy in range(diff): right_con_words.append(np.zeros((self.vec_dim,))) concat_right = np.hstack(right_con_words) return concat_left, concat_right elif self.vec_dim > 0: sum_left = np.sum(np.vstack(left_con_words), axis=0) sum_right = np.sum(np.vstack(right_con_words), axis=0) return sum_left, sum_right else: return left_con_words, right_con_words class config: '''constructor reads in config parameters from file''' def __init__(self, filename): self.file_locs = {} fh = open(filename, 'rb') for line in fh: structure, file_loc = line.strip().split('=') self.file_locs[structure] = file_loc def get_fileloc(self, eigtype): if eigtype in self.file_locs: return self.file_locs[eigtype] else: sys.stderr.write("Eigentype '%s' not found, so cannot read/write. The field name is most likely incorrect\n") return ""
49.510204
146
0.600459
5582c36755a9c23046b9379864318d179fc41a91
411
py
Python
coinbase/vwap_calculator.py
flaviostutz/coinbase-vwap-python
87b9a8e819f320af170c7200f92757639b5fb752
[ "MIT" ]
null
null
null
coinbase/vwap_calculator.py
flaviostutz/coinbase-vwap-python
87b9a8e819f320af170c7200f92757639b5fb752
[ "MIT" ]
null
null
null
coinbase/vwap_calculator.py
flaviostutz/coinbase-vwap-python
87b9a8e819f320af170c7200f92757639b5fb752
[ "MIT" ]
null
null
null
from mathutils import weighted_avg from decimal import * weighted_avgs = {} def CalculateVWAP(matchInfo, averagerMaxSize, onVMAPInfo): pid = matchInfo['product_id'] sa = weighted_avgs.get(pid) if sa == None: sa = weighted_avg.WeightedAvg(averagerMaxSize) weighted_avgs[pid] = sa sa.add(Decimal(matchInfo['price']), Decimal(matchInfo['size'])) onVMAPInfo(sa.avg(), pid)
22.833333
67
0.690998
9e5a094b835add4112dd4cb73f1d217e90178dc0
1,014
py
Python
Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex017.py
marcoshsq/python_practical_exercises
77136cd4bc0f34acde3380ffdc5af74f7a960670
[ "MIT" ]
9
2022-03-22T16:45:17.000Z
2022-03-25T20:22:35.000Z
Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex017.py
marcoshsq/python_practical_exercises
77136cd4bc0f34acde3380ffdc5af74f7a960670
[ "MIT" ]
null
null
null
Course 01 - Getting Started with Python/Extra Studies/Conditionals/ex017.py
marcoshsq/python_practical_exercises
77136cd4bc0f34acde3380ffdc5af74f7a960670
[ "MIT" ]
3
2022-03-22T17:03:38.000Z
2022-03-29T17:20:55.000Z
# Exercise 017 - Payment Manager """ Develop a program that calculates the amount to be paid for a product, Considering your normal price and payment term: - cash/check: 10% discount - cash on card: 5% discount - up to 2x on the card: formal price - 3x or more on the card: 20% interest """ product_price = float(input("What is the price of the Product: ")) print( "-=" * 25, """ Choose a way to pay: [1] - Cash/check: 10% discount [2] - Cash on credit card: 5% discount [3] - 2x on the card: regular price [4] - 3x or more on the card: 20% interest """, ) payment = int(input("Choose the form of payment: ")) if payment == 1: print(f"The amount to be paid is R${product_price * 0.9}") elif payment == 2: print(f"The amount to be paid is R${product_price * 0.95}") elif payment == 3: print(f"The amount to be paid is R${product_price}") elif payment == 4: print(f"The amount to be paid is R${product_price * 1.2}") else: print("Invalid choice!")
26
74
0.641026
3d440c955c21aadef37987095957bf0281875c7b
12,894
py
Python
src/run_pt.py
ashmitbhattarai/ITEC876Project
a5a50143bc6df33202bf11cf69f8b1e8903634b3
[ "Apache-2.0" ]
null
null
null
src/run_pt.py
ashmitbhattarai/ITEC876Project
a5a50143bc6df33202bf11cf69f8b1e8903634b3
[ "Apache-2.0" ]
null
null
null
src/run_pt.py
ashmitbhattarai/ITEC876Project
a5a50143bc6df33202bf11cf69f8b1e8903634b3
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import argparse import random import json import numpy as np import torch from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling import BertModel, BertPreTrainingHeads from pytorch_pretrained_bert.modeling import BertPreTrainedModel from pytorch_pretrained_bert.optimization import BertAdam import modelconfig class BertForMTPostTraining(BertPreTrainedModel): def __init__(self, config): super(BertForMTPostTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.qa_outputs = torch.nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, mode, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, start_positions=None, end_positions=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) if mode=="review": prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score elif mode=="squad": logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) qa_loss = (start_loss + end_loss) / 2 return qa_loss else: return start_logits, end_logits else: raise Exception("unknown mode.") logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def warmup_linear(x, warmup=0.002): if x < warmup: return x/warmup return 1.0 - x def train(args): #load squad data for pre-training. args.train_batch_size=int(args.train_batch_size / args.gradient_accumulation_steps) review_train_examples=np.load(os.path.join(args.review_data_dir, "data.npz") ) squad_train_examples=np.load(os.path.join(args.squad_data_dir, "data.npz") ) num_train_steps = args.num_train_steps # load bert pre-train data. review_train_data = TensorDataset( torch.from_numpy(review_train_examples["input_ids"]), torch.from_numpy(review_train_examples["segment_ids"]), torch.from_numpy(review_train_examples["input_mask"]), torch.from_numpy(review_train_examples["masked_lm_ids"]), torch.from_numpy(review_train_examples["next_sentence_labels"]) ) review_train_dataloader = DataLoader(review_train_data, sampler=RandomSampler(review_train_data), batch_size=args.train_batch_size , drop_last=True) squad_train_data = TensorDataset( torch.from_numpy(squad_train_examples["input_ids"]), torch.from_numpy(squad_train_examples["segment_ids"]), torch.from_numpy(squad_train_examples["input_mask"]), torch.from_numpy(squad_train_examples["start_positions"]), torch.from_numpy(squad_train_examples["end_positions"] ) ) squad_train_dataloader = DataLoader(squad_train_data, sampler=RandomSampler(squad_train_data), batch_size=args.train_batch_size , drop_last=True) #we do not have any valiation for pretuning #model = BertForMTPostTraining.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model]) model = BertForMTPostTraining.from_pretrained('bert-base-uncased') if args.fp16: model.half() model.cuda() # Prepare optimizer param_optimizer = [(k, v) for k, v in model.named_parameters() if v.requires_grad==True] param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] t_total = num_train_steps if args.fp16: try: from apex.fp16_utils import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, ) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) global_step=0 step=0 batch_loss=0. model.train() model.zero_grad() training=True review_iter=iter(review_train_dataloader) squad_iter=iter(squad_train_dataloader) while training: try: batch = next(review_iter) except: review_iter=iter(review_train_dataloader) batch = next(review_iter) batch = tuple(t.cuda() for t in batch) input_ids, segment_ids, input_mask, masked_lm_ids, next_sentence_labels = batch review_loss = model("review", input_ids.long(), segment_ids.long(), input_mask.long(), masked_lm_ids.long(), next_sentence_labels.long(), None, None) try: batch = next(squad_iter) except: squad_iter=iter(squad_train_dataloader) batch = next(squad_iter) batch = tuple(t.cuda() for t in batch) input_ids, segment_ids, input_mask, start_positions, end_positions = batch squad_loss = model("squad", input_ids.long(), segment_ids.long(), input_mask.long(), None, None, start_positions.long(), end_positions.long() ) loss=review_loss + squad_loss if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps batch_loss+=loss if args.fp16: optimizer.backward(loss) else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if global_step % 50 ==0: logging.info("step %d batch_loss %f ", global_step, batch_loss) batch_loss=0. if global_step % args.save_checkpoints_steps==0: model.float() torch.save(model.state_dict(), os.path.join(args.output_dir, "pytorch_model_"+str(global_step)+".bin") ) if args.fp16: model.half() if global_step>=num_train_steps: training=False break step+=1 model.float() torch.save(model.state_dict(), os.path.join(args.output_dir, "pytorch_model.bin") ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--bert_model", default="bert-base", type=str, required=True, help="pretrained weights of bert.") parser.add_argument("--review_data_dir", default=None, type=str, required=True, help="dir of review numpy file dir.") parser.add_argument("--squad_data_dir", default=None, type=str, required=True, help="dir of squad preprocessed numpy file.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--train_batch_size", default=16, type=int, help="training batch size for both review and squad.") parser.add_argument("--do_train", default=False, action="store_true", help="Whether to run training.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_steps", default=50000, type=int, help="Number of training steps.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Number of warmup steps.") parser.add_argument("--save_checkpoints_steps", default=5000, type=int, help="How often to save the model checkpoint.") parser.add_argument("--seed", default=12345, type=int, help="random seed.") parser.add_argument('--gradient_accumulation_steps', type=int, default=2) parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) os.makedirs(args.output_dir, exist_ok=True) if args.do_train: logger.info("***** Running training *****") logger.info(" Batch size = %d", args.train_batch_size) train(args) if __name__ == "__main__": main()
42.137255
172
0.632232
54819c9961ce8decf6cabef0b02ca60a59c491c5
3,786
py
Python
src/tests/ftest/osa/osa_offline_extend.py
myliucom/daos
74999a3307a530b307bdd62e3d4e3260648f2315
[ "BSD-2-Clause-Patent" ]
null
null
null
src/tests/ftest/osa/osa_offline_extend.py
myliucom/daos
74999a3307a530b307bdd62e3d4e3260648f2315
[ "BSD-2-Clause-Patent" ]
null
null
null
src/tests/ftest/osa/osa_offline_extend.py
myliucom/daos
74999a3307a530b307bdd62e3d4e3260648f2315
[ "BSD-2-Clause-Patent" ]
1
2021-04-13T16:04:21.000Z
2021-04-13T16:04:21.000Z
#!/usr/bin/python """ (C) Copyright 2020-2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ import time from osa_utils import OSAUtils from test_utils_pool import TestPool from apricot import skipForTicket class OSAOfflineExtend(OSAUtils): # pylint: disable=too-many-ancestors """ Test Class Description: This test runs daos_server offline extend test cases. :avocado: recursive """ def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() # Start an additional server. self.extra_servers = self.params.get("test_servers", "/run/extra_servers/*") def run_offline_extend_test(self, num_pool, data=False): """Run the offline extend without data. Args: num_pool (int) : total pools to create for testing purposes. data (bool) : whether pool has no data or to create some data in pool. Defaults to False. """ # Create a pool pool = {} pool_uuid = [] # Extend a ranks 4 and 5 rank = [4, 5] for val in range(0, num_pool): pool[val] = TestPool(self.context, dmg_command=self.dmg_command) pool[val].get_params(self) # Split total SCM and NVME size for creating multiple pools. pool[val].scm_size.value = int(pool[val].scm_size.value / num_pool) pool[val].nvme_size.value = int(pool[val].nvme_size.value / num_pool) pool[val].create() pool_uuid.append(pool[val].uuid) self.pool = pool[val] if data: self.write_single_object() # Start the additional servers and extend the pool self.log.info("Extra Servers = %s", self.extra_servers) self.start_additional_servers(self.extra_servers) # Give sometime for the additional server to come up. time.sleep(5) # Extend the pool_uuid, rank and targets for val in range(0, num_pool): self.pool = pool[val] scm_size = self.pool.scm_size nvme_size = self.pool.nvme_size self.pool.display_pool_daos_space("Pool space: Beginning") pver_begin = self.get_pool_version() self.log.info("Pool Version at the beginning %s", pver_begin) output = self.dmg_command.pool_extend(self.pool.uuid, rank, scm_size, nvme_size) self.log.info(output) self.is_rebuild_done(3) self.assert_on_rebuild_failure() pver_extend = self.get_pool_version() self.log.info("Pool Version after extend %d", pver_extend) # Check pool version incremented after pool extend self.assertTrue(pver_extend > pver_begin, "Pool Version Error: After extend") for val in range(0, num_pool): display_string = "Pool{} space at the End".format(val) pool[val].display_pool_daos_space(display_string) if data: self.verify_single_object() @skipForTicket("DAOS-6644") def test_osa_offline_extend(self): """ JIRA ID: DAOS-4751 Test Description: Validate Offline Extend :avocado: tags=all,daily_regression :avocado: tags=hw,medium,ib2 :avocado: tags=osa,checksum :avocado: tags=osa_extend,offline_extend """ # Perform extend testing with 1 pool self.run_offline_extend_test(1, True)
36.057143
76
0.578183
5c7174faa06c0ea11b9dc25add1cb1b4fd065e04
10,718
py
Python
ignite/contrib/handlers/tqdm_logger.py
VinhLoiIT/ignite
3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56
[ "BSD-3-Clause" ]
1
2020-03-04T20:07:45.000Z
2020-03-04T20:07:45.000Z
ignite/contrib/handlers/tqdm_logger.py
hefv57/ignite
a22a0f5e909ac70d2a1f76a60b6e84b2134f196c
[ "BSD-3-Clause" ]
null
null
null
ignite/contrib/handlers/tqdm_logger.py
hefv57/ignite
a22a0f5e909ac70d2a1f76a60b6e84b2134f196c
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import warnings import torch from ignite.engine import Events, Engine from ignite.engine.events import CallableEventWithFilter from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler class ProgressBar(BaseLogger): """ TQDM progress bar handler to log training progress and computed metrics. Args: persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``) bar_format (str, optional): Specify a custom bar string formatting. May impact performance. [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]']. Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_. **tqdm_kwargs: kwargs passed to tqdm progress bar. By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions". Examples: Simple progress bar .. code-block:: python trainer = create_supervised_trainer(model, optimizer, loss) pbar = ProgressBar() pbar.attach(trainer) # Progress bar will looks like # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34] Log output to a file instead of stderr (tqdm's default output) .. code-block:: python trainer = create_supervised_trainer(model, optimizer, loss) log_file = open("output.log", "w") pbar = ProgressBar(file=log_file) pbar.attach(trainer) Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED` (such as :class:`~ignite.metrics.RunningAverage`) .. code-block:: python trainer = create_supervised_trainer(model, optimizer, loss) RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss') pbar = ProgressBar() pbar.attach(trainer, ['loss']) # Progress bar will looks like # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34] Directly attach the engine's output .. code-block:: python trainer = create_supervised_trainer(model, optimizer, loss) pbar = ProgressBar() pbar.attach(trainer, output_transform=lambda x: {'loss': x}) # Progress bar will looks like # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34] Note: When adding attaching the progress bar to an engine, it is recommend that you replace every print operation in the engine's handlers triggered every iteration with ``pbar.log_message`` to guarantee the correct format of the stdout. Note: When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering, please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_. Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set to an empty string value. """ _events_order = [ Events.STARTED, Events.EPOCH_STARTED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.EPOCH_COMPLETED, Events.COMPLETED, ] def __init__( self, persist=False, bar_format="{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]", **tqdm_kwargs ): try: from tqdm.autonotebook import tqdm except ImportError: raise RuntimeError( "This contrib module requires tqdm to be installed. " "Please install it with command: \n pip install tqdm" ) self.pbar_cls = tqdm self.pbar = None self.persist = persist self.bar_format = bar_format self.tqdm_kwargs = tqdm_kwargs def _reset(self, pbar_total): self.pbar = self.pbar_cls( total=pbar_total, leave=self.persist, bar_format=self.bar_format, initial=1, **self.tqdm_kwargs ) def _close(self, engine): if self.pbar: self.pbar.close() self.pbar = None @staticmethod def _compare_lt(event1, event2): i1 = ProgressBar._events_order.index(event1) i2 = ProgressBar._events_order.index(event2) return i1 < i2 def log_message(self, message): """ Logs a message, preserving the progress bar correct output format. Args: message (str): string you wish to log. """ from tqdm import tqdm tqdm.write(message, file=self.tqdm_kwargs.get("file", None)) def attach( self, engine, metric_names=None, output_transform=None, event_name=Events.ITERATION_COMPLETED, closing_event_name=Events.EPOCH_COMPLETED, ): """ Attaches the progress bar to an engine object. Args: engine (Engine): engine object. metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available metrics. output_transform (callable, optional): a function to select what you want to print from the engine's output. This function may return either a dictionary with entries in the format of ``{name: value}``, or a single scalar, which will be displayed with the default name `output`. event_name: event's name on which the progress bar advances. Valid events are from :class:`~ignite.engine.Events`. closing_event_name: event's name on which the progress bar is closed. Valid events are from :class:`~ignite.engine.Events`. Note: accepted output value types are numbers, 0d and 1d torch tensors and strings """ desc = self.tqdm_kwargs.get("desc", "Epoch") if event_name not in engine._allowed_events: raise ValueError("Logging event {} is not in allowed events for this engine".format(event_name.name)) if isinstance(closing_event_name, CallableEventWithFilter): if closing_event_name.filter != CallableEventWithFilter.default_event_filter: raise ValueError("Closing Event should not be a filtered event") if not self._compare_lt(event_name, closing_event_name): raise ValueError( "Logging event {} should be called before closing event {}".format(event_name, closing_event_name) ) log_handler = _OutputHandler(desc, metric_names, output_transform, closing_event_name=closing_event_name) super(ProgressBar, self).attach(engine, log_handler, event_name) engine.add_event_handler(closing_event_name, self._close) class _OutputHandler(BaseOutputHandler): """Helper handler to log engine's output and/or metrics Args: description (str): progress bar description. metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available metrics. output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number. For example, `output_transform = lambda output: output` This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot with corresponding keys. closing_event_name: event's name on which the progress bar is closed. Valid events are from :class:`~ignite.engine.Events` or any `event_name` added by :meth:`~ignite.engine.Engine.register_events`. """ def __init__( self, description, metric_names=None, output_transform=None, closing_event_name=Events.EPOCH_COMPLETED ): if metric_names is None and output_transform is None: # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler metric_names = [] super(_OutputHandler, self).__init__( description, metric_names, output_transform, another_engine=None, global_step_transform=None ) self.closing_event_name = closing_event_name @staticmethod def get_max_number_events(event_name, engine): if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED): return engine.state.epoch_length if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED): return engine.state.max_epochs return 1 def __call__(self, engine, logger, event_name): pbar_total = self.get_max_number_events(event_name, engine) if logger.pbar is None: logger._reset(pbar_total=pbar_total) desc = self.tag max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine) if max_num_of_closing_events > 1: global_step = engine.state.get_event_attrib_value(self.closing_event_name) desc += " [{}/{}]".format(global_step, max_num_of_closing_events) logger.pbar.set_description(desc) metrics = self._setup_output_metrics(engine) rendered_metrics = {} for key, value in metrics.items(): if isinstance(value, torch.Tensor): if value.ndimension() == 0: rendered_metrics[key] = value.item() elif value.ndimension() == 1: for i, v in enumerate(value): k = "{}_{}".format(key, i) rendered_metrics[k] = v.item() else: warnings.warn("ProgressBar can not log " "tensor with {} dimensions".format(value.ndimension())) else: rendered_metrics[key] = value if rendered_metrics: logger.pbar.set_postfix(**rendered_metrics) global_step = engine.state.get_event_attrib_value(event_name) global_step = (global_step - 1) % pbar_total + 1 logger.pbar.update(global_step - logger.pbar.n)
40.293233
118
0.637152
ba687141f9a3a29e79977e248d7b2cb403d2e887
20,613
py
Python
src/backend/qai_testbed_backend/usecases/test_description.py
ads-ad-itcenter/qunomon.forked
48d532692d353fe2d3946f62b227f834f9349034
[ "Apache-2.0" ]
16
2020-11-18T05:43:55.000Z
2021-11-27T14:43:26.000Z
src/backend/qai_testbed_backend/usecases/test_description.py
aistairc/qunomon
d4e9c5cb569b16addfbe6c33c73812065065a1df
[ "Apache-2.0" ]
1
2022-03-23T07:55:54.000Z
2022-03-23T13:24:11.000Z
src/backend/qai_testbed_backend/usecases/test_description.py
ads-ad-itcenter/qunomon.forked
48d532692d353fe2d3946f62b227f834f9349034
[ "Apache-2.0" ]
3
2021-02-12T01:56:31.000Z
2022-03-23T02:45:02.000Z
# Copyright © 2019 National Institute of Advanced Industrial Science and Technology (AIST). All rights reserved. from typing import Optional from qlib.utils.logging import get_logger, log from ..controllers.dto import Result from ..controllers.dto.test_description import GetTestDescriptionsRes, GetTestDescriptionDetailRes,\ DeleteTestDescriptionRes, PutTestDescriptionRes, PutTestDescriptionsReq, AppendTestDescriptionRes,\ AppendTestDescriptionReq, GetTestDescriptionAncestorsRes from ..entities.test import TestMapper from ..entities.ml_component import MLComponentMapper from ..entities.test_description import TestDescriptionMapper from ..entities.operand import OperandMapper from ..entities.quality_measurement import QualityMeasurementMapper from ..entities.inventory_td import InventoryTDMapper from ..entities.test_runner_param import TestRunnerParamMapper from ..entities.test_runner_param_template import TestRunnerParamTemplateMapper from ..across.exception import QAIException, QAINotFoundException, QAIInternalServerException,\ QAIInvalidRequestException from ..across.utils import is_num from ..gateways.extensions import sql_db from sqlalchemy.exc import SQLAlchemyError import datetime logger = get_logger() class TestDescriptionService: def __init__(self): # TODO 要DI pass @log(logger) def get(self, organizer_id: str, ml_component_id: int, testdescription_id: int) -> GetTestDescriptionDetailRes: try: td = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id).\ filter(MLComponentMapper.org_id == organizer_id).\ filter(TestDescriptionMapper.id == testdescription_id).first() if td is None: raise QAINotFoundException(result_code='T34000', result_msg='not found test description') # delete_flagがTrueのTDであれば、エラーを返す。 if td.delete_flag is True: raise QAINotFoundException(result_code='T34001', result_msg='test description has already been deleted') return GetTestDescriptionDetailRes(result=Result(code='T32000', message="get detail success."), test_description_detail=td.to_dto_detail()) except QAIException as e: raise e except SQLAlchemyError as e: raise QAIInvalidRequestException('T39000', 'database error: {}'.format(e)) except Exception as e: raise QAIInternalServerException(result_code='T39999', result_msg='internal server error: {}'.format(e)) @log(logger) def get_list(self, organizer_id: str, ml_component_id: int) -> GetTestDescriptionsRes: try: test = TestMapper.query.\ filter(TestMapper.ml_component_id == ml_component_id).\ filter(MLComponentMapper.org_id == organizer_id).first() if test is None: raise QAINotFoundException(result_code='T14000', result_msg='not found test descriptions') # delete_flagがTrueのTDを除外したTestDescriptionMapperを作る mapper = TestDescriptionMapper.query. \ filter(TestDescriptionMapper.test_id == test.id). \ filter(TestDescriptionMapper.delete_flag == False). \ all() return GetTestDescriptionsRes( result=Result(code='T12000', message="get list success."), test=test.to_dto(mapper) ) except QAIException as e: raise e except SQLAlchemyError as e: raise QAIInvalidRequestException('T19000', 'database error: {}'.format(e)) except Exception as e: raise QAIInternalServerException(result_code='T19999', result_msg='internal server error: {}'.format(e)) @log(logger) def delete_test_description(self, organizer_id: str, ml_component_id: int, testdescription_id: int) -> DeleteTestDescriptionRes: try: test_description = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id). \ filter(MLComponentMapper.org_id == organizer_id). \ filter(TestDescriptionMapper.id == testdescription_id).first() if test_description is None: raise QAINotFoundException('T54000', 'not found test descriptions') if test_description.delete_flag is True: raise QAINotFoundException('T54001', 'test description has already been deleted') # 子供がいる場合、子供のdelete_flagもTrueに変更する self._delete_children_td(organizer_id, ml_component_id, test_description.id) test_description.delete_flag = True sql_db.session.commit() return DeleteTestDescriptionRes( result=Result(code='T52000', message="delete success.") ) except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('T59000', 'database error: {}'.format(e)) except Exception as e: sql_db.session.rollback() raise QAIInternalServerException(result_code='T59999', result_msg='internal server error: {}'.format(e)) def _delete_children_td(self, organizer_id: str, ml_component_id: int, parent_td_id: Optional[int]): td_children = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id). \ filter(MLComponentMapper.org_id == organizer_id). \ filter(TestDescriptionMapper.parent_id == parent_td_id).all() for td_child in td_children: self._delete_children_td(organizer_id, ml_component_id, td_child.id) td_child.delete_flag = True @log(logger) def put_test_descriptions(self, organizer_id: str, ml_component_id: int, testdescription_id: int, req: PutTestDescriptionsReq) -> PutTestDescriptionRes: try: test_description = TestDescriptionMapper.query. \ filter(TestDescriptionMapper.id == testdescription_id). \ filter(TestMapper.ml_component_id == ml_component_id). \ filter(MLComponentMapper.org_id == organizer_id).first() if test_description is None: raise QAINotFoundException(result_code='T44000', result_msg='not found test descriptions') # delete_flagがTrueのTDであれば、エラーを返す。 if test_description.delete_flag is True: raise QAINotFoundException(result_code='T44001', result_msg='test description has already been deleted') test_description.name = req.name test_description.quality_dimension_id = req.quality_dimension_id test_description.test_runner_id = req.test_runner.id_ test_description.update_datetime = datetime.datetime.utcnow() InventoryTDMapper().query.\ filter(InventoryTDMapper.test_description_id == test_description.id).delete() OperandMapper().query.\ filter(OperandMapper.test_description_id == test_description.id).delete() TestRunnerParamMapper().query.\ filter(TestRunnerParamMapper.test_description_id == test_description.id).delete() self._add_operands(req, test_description) self._add_inventories(req, test_description) self._add_test_runner_params(req, test_description) self._update_value_target(req, test_description) sql_db.session.commit() return PutTestDescriptionRes( result=Result(code='T42000', message="put success."), test_description=test_description ) except QAIException as e: sql_db.session.rollback() raise e except Exception as e: sql_db.session.rollback() raise QAIInternalServerException(result_code='T49999', result_msg='internal server error: {}'.format(e)) @log(logger) def append_test_description(self, organizer_id: str, ml_component_id: int, req: AppendTestDescriptionReq) -> AppendTestDescriptionRes: try: test_description = TestDescriptionMapper() test = TestMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id). \ filter(MLComponentMapper.org_id == organizer_id).first() test_description.name = req.name test_description.opinion = '' test_description.delete_flag = False if req.parent_id is not None: parent_td = TestDescriptionMapper.query.get(req.parent_id) if parent_td is None: raise QAINotFoundException('T24000', 'not found parent test description') if parent_td.delete_flag: raise QAINotFoundException('T24000', 'parent test description has deleted') test_description.parent_id = req.parent_id test_description.test_id = test.id test_description.quality_dimension_id = req.quality_dimension_id test_description.test_runner_id = req.test_runner.id_ sql_db.session.add(test_description) sql_db.session.flush() self._add_operands(req, test_description) self._add_inventories(req, test_description) self._add_test_runner_params(req, test_description) self._update_value_target(req, test_description) sql_db.session.commit() return AppendTestDescriptionRes( result=Result(code='T22000', message="append test description success.") ) except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('T29000', 'database error: {}'.format(e)) except Exception as e: sql_db.session.rollback() raise QAIInternalServerException(result_code='T29999', result_msg='internal server error: {}'.format(e)) def _add_test_runner_params(self, req, test_description): try: test_runner_params = [] for test_runner_param_req in req.test_runner.params: # パラメータのminとmaxを取得する param_temp = TestRunnerParamTemplateMapper.query.get(test_runner_param_req.test_runner_param_template_id) if param_temp is None: raise QAINotFoundException(result_code='T94000', result_msg='not found TestRunnerParamTemplate') # min < 入力値 < maxのチェック if param_temp.value_type == "int" or param_temp.value_type == "float": # 入力値が数値か判定 if is_num(test_runner_param_req.value): if param_temp.min_value is not None: if param_temp.min_value > float(test_runner_param_req.value): raise QAIInvalidRequestException('T94001', f'parameter value({test_runner_param_req.value}) < min_val({param_temp.min_value})') if param_temp.max_value is not None: if param_temp.max_value < float(test_runner_param_req.value): raise QAIInvalidRequestException('T94001', f'parameter value({test_runner_param_req.value}) > max_val({param_temp.max_value})') else: raise QAIInvalidRequestException('T94001', f'parameter value({test_runner_param_req.value}) is invalid') test_runner_param = TestRunnerParamMapper() test_runner_param.value = test_runner_param_req.value test_runner_param.test_description_id = test_description.id # TODO test_runner_param_template_idが紐づいているテストランナーのものかチェック test_runner_param.test_runner_param_template_id = test_runner_param_req.test_runner_param_template_id test_runner_params.append(test_runner_param) sql_db.session.add_all(test_runner_params) except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('T99000', 'database error: {}'.format(e)) except Exception as e: sql_db.session.rollback() raise QAIInternalServerException(result_code='T99999', result_msg='internal server error: {}'.format(e)) def _add_inventories(self, req, test_description): inventories = [] for inventory_req in req.target_inventories: inventory = InventoryTDMapper() inventory.inventory_id = inventory_req.inventory_id inventory.test_description_id = test_description.id # TODO template_inventory_idが紐づいているテストランナーのものかチェック inventory.template_inventory_id = inventory_req.template_inventory_id inventories.append(inventory) sql_db.session.add_all(inventories) def _add_operands(self, req, test_description): try: operands = [] for measurement_req in req.quality_measurements: # クライテリアの入力があった場合 if measurement_req.enable: # クライテリアのminとmaxを取得する quality_measurement = QualityMeasurementMapper.query.get(measurement_req.id_) if quality_measurement is None: raise QAINotFoundException(result_code='TA4000', result_msg='not found QualityMeasurement') # min < 入力値 < maxのチェック if quality_measurement.type == "int" or quality_measurement.type == "float": # 入力値が数値か判定 if is_num(measurement_req.value): if quality_measurement.min_value is not None: if quality_measurement.min_value > float(measurement_req.value): raise QAIInvalidRequestException('TA4001', f'measurement value({measurement_req.value}) < min_val({quality_measurement.min_value})') if quality_measurement.max_value is not None: if quality_measurement.max_value < float(measurement_req.value): raise QAIInvalidRequestException('TA4001', f'measurement value({measurement_req.value}) > max_val({quality_measurement.max_value})') else: raise QAIInvalidRequestException('TA4001', f'measurement value({measurement_req.value}) is invalid') operand = OperandMapper() operand.quality_measurement_id = measurement_req.id_ operand.value = measurement_req.value operand.test_description_id = test_description.id operand.relational_operator_id = measurement_req.relational_operator_id operand.enable = measurement_req.enable operands.append(operand) sql_db.session.add_all(operands) except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('TA9000', 'database error: {}'.format(e)) except Exception as e: sql_db.session.rollback() raise QAIInternalServerException(result_code='TA9999', result_msg='internal server error: {}'.format(e)) def _update_value_target(self, req, test_description): """quality_measurementsが存在する場合、レポートへの指標値出力フラグを有効にする""" if (req.quality_measurements is None) or (len(req.quality_measurements) == 0): test_description.value_target = False else: test_description.value_target = True @log(logger) def set_star(self, organizer_id: str, ml_component_id: int, test_description_id: int) -> Result: try: td = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id).\ filter(MLComponentMapper.org_id == organizer_id).\ filter(TestDescriptionMapper.id == test_description_id).first() if td is None: raise QAINotFoundException(result_code='T64000', result_msg='not found test description') # delete_flagがTrueのTDであれば、エラーを返す。 if td.delete_flag is True: raise QAIInvalidRequestException(result_code='T65000', result_msg='test description has already been deleted') td.star = True sql_db.session.commit() return Result(code='T62000', message='set star success.') except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('T69999', 'database error: {}'.format(e)) @log(logger) def set_unstar(self, organizer_id: str, ml_component_id: int, test_description_id: int) -> Result: try: td = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id).\ filter(MLComponentMapper.org_id == organizer_id).\ filter(TestDescriptionMapper.id == test_description_id).first() if td is None: raise QAINotFoundException(result_code='T74000', result_msg='not found test description') # delete_flagがTrueのTDであれば、エラーを返す。 if td.delete_flag is True: raise QAIInvalidRequestException(result_code='T75000', result_msg='test description has already been deleted') td.star = False sql_db.session.commit() return Result(code='T72000', message='set star success.') except QAIException as e: sql_db.session.rollback() raise e except SQLAlchemyError as e: sql_db.session.rollback() raise QAIInvalidRequestException('T79999', 'database error: {}'.format(e)) @log(logger) def get_ancestor(self, organizer_id: str, ml_component_id: int, test_description_id: int)\ -> GetTestDescriptionAncestorsRes: try: td = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id).\ filter(MLComponentMapper.org_id == organizer_id).\ filter(TestDescriptionMapper.id == test_description_id).first() if td is None: raise QAINotFoundException(result_code='T84000', result_msg='not found test description') # delete_flagがTrueのTDであれば、エラーを返す。 if td.delete_flag is True: raise QAIInvalidRequestException(result_code='T85000', result_msg='test description has already been deleted') test_descriptions = [] next_td = td while next_td is not None: test_descriptions.append(next_td) next_td = TestDescriptionMapper.query. \ filter(TestMapper.ml_component_id == ml_component_id). \ filter(MLComponentMapper.org_id == organizer_id). \ filter(TestDescriptionMapper.id == next_td.parent_id). \ filter(TestDescriptionMapper.delete_flag == False).first() test_descriptions.reverse() return GetTestDescriptionAncestorsRes(result=Result(code='T82000', message="get detail success."), test_descriptions=[td.to_dto() for td in test_descriptions]) except QAIException as e: raise e except SQLAlchemyError as e: raise QAIInvalidRequestException('T89000', 'database error: {}'.format(e))
50.896296
168
0.629457
043cc53837f8e626b1daf317452b578fd22c75bd
56
py
Python
quines/quine2.py
WhitmanCS370-SP2021/examples
dae884df3c6ef5e20464e09c059c197fc962d53a
[ "MIT" ]
null
null
null
quines/quine2.py
WhitmanCS370-SP2021/examples
dae884df3c6ef5e20464e09c059c197fc962d53a
[ "MIT" ]
null
null
null
quines/quine2.py
WhitmanCS370-SP2021/examples
dae884df3c6ef5e20464e09c059c197fc962d53a
[ "MIT" ]
1
2021-03-23T19:57:52.000Z
2021-03-23T19:57:52.000Z
s = r"print 's = r\"' + s + '\"' + '\nexec(s)'" exec(s)
18.666667
47
0.375
92800e47026f1375893131f294b47f9cc14cf01b
1,265
py
Python
python/servotest.py
just4chill/imageprocessing
838beecba13497e5fa91e208304a34141ab1ac2d
[ "MIT" ]
null
null
null
python/servotest.py
just4chill/imageprocessing
838beecba13497e5fa91e208304a34141ab1ac2d
[ "MIT" ]
null
null
null
python/servotest.py
just4chill/imageprocessing
838beecba13497e5fa91e208304a34141ab1ac2d
[ "MIT" ]
null
null
null
# Author: Ingmar Stapel # Usage: Servo test program # URL: http://www.raspberry-pi-car.com/top-story-en/raspberry-pi-controlling-servo-motors/7028 # Version: 0.1 beta import RPi.GPIO as GPIO import time import os GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(4, GPIO.OUT) # Now we will start with a PWM signal at 50Hz at pin 11. # 50Hz should work for many servos very will. If not you can play with # the frequency if you like. Servo = GPIO.PWM(4, 50) #Servo.start(2.5) while True: # This command sets the left position of the servo #Servo.start(2.5) # Now the program asks for the direction the servo should turn. input = raw_input("Selection: ") if(input == "r"): Servo.start(12.5) print "Right" stepslength = 12.5 / 20 for Counter in range(int(20)): Servo.ChangeDutyCycle(stepslength * (Counter + 1)) print stepslength * (Counter + 1) time.sleep(1) #Servo.stop() elif(input == "l"): Servo.start(2.5) print "Left" stepslength = 12.5 / 10 for Counter in range(int(10)): Servo.ChangeDutyCycle(stepslength * (Counter + 1)) print stepslength * (Counter + 1) time.sleep(1) Servo.stop() elif(input == "s"): print "Stop" Servo.stop() # input not valid else: print "input not valid!"
22.589286
94
0.679842
8a320adca9ec9b85fb850b5735ee6bdb2ef01122
921
py
Python
tests/loggers/test_logger.py
growlix/composer
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
[ "Apache-2.0" ]
null
null
null
tests/loggers/test_logger.py
growlix/composer
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
[ "Apache-2.0" ]
null
null
null
tests/loggers/test_logger.py
growlix/composer
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
[ "Apache-2.0" ]
null
null
null
# Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 import pathlib from composer.core.state import State from composer.loggers import Logger, LoggerDestination, LogLevel def test_logger_file_artifact(dummy_state: State): file_logged = False class DummyLoggerDestination(LoggerDestination): def log_file_artifact(self, state: State, log_level: LogLevel, artifact_name: str, file_path: pathlib.Path, *, overwrite: bool): nonlocal file_logged file_logged = True assert artifact_name == "foo" assert file_path.name == "bar" assert overwrite logger = Logger(state=dummy_state, destinations=[DummyLoggerDestination()]) logger.file_artifact( log_level="epoch", artifact_name="foo", file_path="bar", overwrite=True, ) assert file_logged
27.909091
118
0.668838
d67064dd35e2ed4a0362c3b9d3da2036d6bfa4c5
1,675
py
Python
train.py
Noizecube/flair
b163c5506ef60803fb49eab1201c41c93e745cc8
[ "MIT" ]
null
null
null
train.py
Noizecube/flair
b163c5506ef60803fb49eab1201c41c93e745cc8
[ "MIT" ]
null
null
null
train.py
Noizecube/flair
b163c5506ef60803fb49eab1201c41c93e745cc8
[ "MIT" ]
null
null
null
from typing import List import flair.datasets from flair.data import Corpus from flair.embeddings import ( TokenEmbeddings, WordEmbeddings, StackedEmbeddings, FlairEmbeddings, CharacterEmbeddings, ) from flair.training_utils import EvaluationMetric from flair.visual.training_curves import Plotter # 1. get the corpus corpus: Corpus = flair.datasets.UD_ENGLISH() print(corpus) # 2. what tag do we want to predict? tag_type = "upos" # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) print(tag_dictionary.idx2item) # initialize embeddings embedding_types: List[TokenEmbeddings] = [ WordEmbeddings("glove"), # comment in this line to use character embeddings # CharacterEmbeddings(), # comment in these lines to use contextual string embeddings # # FlairEmbeddings('news-forward'), # # FlairEmbeddings('news-backward'), ] embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types) # initialize sequence tagger from flair.models import SequenceTagger tagger: SequenceTagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type, use_crf=True, ) # initialize trainer from flair.trainers import ModelTrainer trainer: ModelTrainer = ModelTrainer(tagger, corpus) trainer.train( "resources/taggers/example-ner", learning_rate=0.1, mini_batch_size=32, max_epochs=20, shuffle=False, ) plotter = Plotter() plotter.plot_training_curves("resources/taggers/example-ner/loss.tsv") plotter.plot_weights("resources/taggers/example-ner/weights.txt") nichttest
24.632353
77
0.76597
fa534a5f5dcc7393feca46b81987b2a06ea7b0e8
8,361
py
Python
src/quo/layout/margin.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
3
2022-03-13T13:22:35.000Z
2022-03-18T08:22:51.000Z
src/quo/layout/margin.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
1
2022-03-21T16:29:54.000Z
2022-03-21T16:29:54.000Z
src/quo/layout/margin.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
null
null
null
""" Margin implementations for a :class:`~quo.layout.containers.Window`. """ from abc import ABCMeta, abstractmethod from typing import TYPE_CHECKING, Callable, Optional from quo.filters import FilterOrBool, to_filter from quo.text.core import ( StyleAndTextTuples, to_formatted_text ) from quo.text.utils import fragment_list_to_text from quo.utils.utils import get_width as g_w from .controls import UIContent if TYPE_CHECKING: from .containers import WindowRenderInfo __all__ = [ "Margin", "NumberedMargin", "ScrollbarMargin", "ConditionalMargin", "PromptMargin", ] class Margin(metaclass=ABCMeta): """ Base interface for a margin. """ @abstractmethod def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: """ Return the width that this margin is going to consume. :param get_ui_content: Callable that asks the user control to create a :class:`.UIContent` instance. This can be used for instance to obtain the number of lines. """ return 0 @abstractmethod def create_margin( self, window_render_info: "WindowRenderInfo", width: int, height: int ) -> StyleAndTextTuples: """ Creates a margin. This should return a list of (style_str, text) tuples. :param window_render_info: :class:`~quo.layout.containers.WindowRenderInfo` instance, generated after rendering and copying the visible part of the :class:`~quo.layout.controls.UIControl` into the :class:`~quo.layout.containers.Window`. :param width: The width that's available for this margin. (As reported by :meth:`.get_width`.) :param height: The height that's available for this margin. (The height of the :class:`~quo.layout.containers.Window`.) """ return [] class NumberedMargin(Margin): """ Margin that displays the line numbers. :param relative: Number relative to the cursor position. Similar to the Vi 'relativenumber' option. :param display_tildes: Display tildes after the end of the document, just like Vi does. """ def __init__( self, relative: FilterOrBool = False, display_tildes: FilterOrBool = False ) -> None: self.relative = to_filter(relative) self.display_tildes = to_filter(display_tildes) def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: line_count = get_ui_content().line_count return max(3, len("%s" % line_count) + 1) def create_margin( self, window_render_info: "WindowRenderInfo", width: int, height: int ) -> StyleAndTextTuples: relative = self.relative() style = "class:line-number" style_current = "class:line-number.current" # Get current line number. current_lineno = window_render_info.ui_content.cursor_position.y # Construct margin. result: StyleAndTextTuples = [] last_lineno = None for y, lineno in enumerate(window_render_info.displayed_lines): # Only display line number if this line is not a continuation of the previous line. if lineno != last_lineno: if lineno is None: pass elif lineno == current_lineno: # Current line. if relative: # Left align current number in relative mode. result.append((style_current, "%i" % (lineno + 1))) else: result.append( (style_current, ("%i " % (lineno + 1)).rjust(width)) ) else: # Other lines. if relative: lineno = abs(lineno - current_lineno) - 1 result.append((style, ("%i " % (lineno + 1)).rjust(width))) last_lineno = lineno result.append(("", "\n")) # Fill with tildes. if self.display_tildes(): while y < window_render_info.window_height: result.append(("class:tilde", "~\n")) y += 1 return result class ConditionalMargin(Margin): """ Wrapper around other :class:`.Margin` classes to show/hide them. """ def __init__(self, margin: Margin, filter: FilterOrBool) -> None: self.margin = margin self.filter = to_filter(filter) def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: if self.filter(): return self.margin.get_width(get_ui_content) else: return 0 def create_margin( self, window_render_info: "WindowRenderInfo", width: int, height: int ) -> StyleAndTextTuples: if width and self.filter(): return self.margin.create_margin(window_render_info, width, height) else: return [] class ScrollbarMargin(Margin): """ Margin displaying a scrollbar. :param display_arrows: Display scroll up/down arrows. """ def __init__( self, display_arrows: FilterOrBool = False, up_arrow_symbol: str = None, down_arrow_symbol: str = None, ) -> None: from quo.accordance import WIN if WIN: up_arrow_symbol = "↑" down_arrow_symbol = "↓" else: up_arrow_symbol = "\u27F0" down_arrow_symbol = "\u27F1" self.display_arrows = to_filter(display_arrows) self.up_arrow_symbol = up_arrow_symbol self.down_arrow_symbol = down_arrow_symbol def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: return 1 def create_margin( self, window_render_info: "WindowRenderInfo", width: int, height: int ) -> StyleAndTextTuples: content_height = window_render_info.content_height window_height = window_render_info.window_height display_arrows = self.display_arrows() if display_arrows: window_height -= 2 try: fraction_visible = len(window_render_info.displayed_lines) / float( content_height ) fraction_above = window_render_info.vertical_scroll / float(content_height) scrollbar_height = int( min(window_height, max(1, window_height * fraction_visible)) ) scrollbar_top = int(window_height * fraction_above) except ZeroDivisionError: return [] else: def is_scroll_button(row: int) -> bool: "True if we should display a button on this row." return scrollbar_top <= row <= scrollbar_top + scrollbar_height # Up arrow. result: StyleAndTextTuples = [] if display_arrows: result.extend( [ ("class:scrollbar.arrow", self.up_arrow_symbol), ("class:scrollbar", "\n"), ] ) # Scrollbar body. scrollbar_background = "class:scrollbar.background" scrollbar_background_start = "class:scrollbar.background,scrollbar.start" scrollbar_button = "class:scrollbar.button" scrollbar_button_end = "class:scrollbar.button,scrollbar.end" for i in range(window_height): if is_scroll_button(i): if not is_scroll_button(i + 1): # Give the last cell a different style, because we # want to underline this. result.append((scrollbar_button_end, " ")) else: result.append((scrollbar_button, " ")) else: if is_scroll_button(i + 1): result.append((scrollbar_background_start, " ")) else: result.append((scrollbar_background, " ")) result.append(("", "\n")) # Down arrow if display_arrows: result.append(("class:scrollbar.arrow", self.down_arrow_symbol)) return result
32.660156
95
0.580194
e41706698550c0f29aed8b9a4865d10e682b7713
6,317
py
Python
analysis/muons.py
TheGreatCabbage/ParticlePy
676d99cfa244b0365dbfb9693acaed1265910299
[ "MIT" ]
null
null
null
analysis/muons.py
TheGreatCabbage/ParticlePy
676d99cfa244b0365dbfb9693acaed1265910299
[ "MIT" ]
null
null
null
analysis/muons.py
TheGreatCabbage/ParticlePy
676d99cfa244b0365dbfb9693acaed1265910299
[ "MIT" ]
null
null
null
import time import os import numpy as np import sys from datetime import datetime folder = "data" def parse_data(*files): """ Parses a data file, returning a dictionary containing the time in seconds since the epoch (key) and muon count (value). """ # Add the path to the filenames (they are in the 'data/' folder). files = map(lambda x: "{}/{}".format(folder, x), files) result = [] # List of disctionaries. for file in files: # All the lines from the file in one list. lines = read_lines(file) data = {} # Dictionary with time as key, muon count as value. for l in lines: # Separate any elements of text that have a space between them. line_data = l.split(" ") # Muon count if positive, unwanted data if negative. count = int(line_data[0]) - 40000 # Time in seconds since the epoch. time = int(line_data[1]) # Ensure that count is non-negative and set as value in 'data' associated with time. data[time] = count if count > 0 else 0 result.append(data) return result def read_lines(file): try: with open(file, 'r') as f: return f.readlines() except IsADirectoryError: print("Ignoring directory: {}".format(file)) except FileNotFoundError: print("File does not exist, please check path: {}".format(file)) except UnicodeDecodeError: print("Unicode error with {} - program will exit.".format(file)) sys.exit(0) return [] def average(data): result = {} # Final result. for data_dict in data: # Iterate over the data for each file. for key in data_dict: # Take key from current dict. # If that key already has been added to the final data, we don't want to do the average again. if key in result: continue # Skip to next key. # Make a list to contain the values corresponding to the counts from different dicts. values = [] for i in data: # Iterate over dicts again to check which have the current key. # If it doesn't have the key, it will give -1. This is because -1 cannot appear in valid data. value = i.get(key, -1) # Only want to average valid (positive) results. if value >= 0: values.append(value) # Add the average for this key to the final data. result[key] = sum(values) / len(values) return result def make_unique(data): result = {} for data_dict in data: # Iterate over the data for each file. for key in data_dict: # Take key from current dict. result[key] = data_dict[key] return result def get_data_set_2(no_print=True): return get_data(any_which_satisfy=lambda x: "Set 2" in x, conflict_strategy="overwrite", no_print=no_print) def get_data_set_3(no_print=True): return get_data(any_which_satisfy=lambda x: "Set 3" in x, conflict_strategy="overwrite", no_print=no_print) def get_data(*files, conflict_strategy="average", any_which_satisfy=lambda x: True, no_print=False): """ Parses data files, averaging any duplicate entries by default. Returns a tuple containing a list of times and a list of counts respectively. """ if len(files) == 0: show_output( "No files specified. Using all files in data directory...", not no_print) files = [i for i in os.listdir( folder) if any_which_satisfy(i) and ".data" == i[-5:]] # List of dictionaries containing data from all files. data = parse_data(*files) # Deal with duplicate data. if conflict_strategy == "average": show_output("Averaging data...", not no_print) return sorted_data_from(average(data)) elif conflict_strategy == "overwrite": show_output( "Overwriting duplicate data. This is dangerous! Take care...", not no_print) return sorted_data_from(make_unique(data)) else: print("Unknown conflict strategy: '{}'\nExiting.".format(conflict_strategy)) sys.exit(-1) def show_output(text, show): if show: print(text) def get_counts_from(data_dict): return list(data_dict.values()) def get_times_from(data_dict): return list(data_dict.keys()) def sorted_data_from(dict): tuples = [] for key in dict: tuples.append((key, dict[key])) sorted_tuples = sorted(tuples, key=lambda x: x[0]) # Sort by times. result = ([], []) for i in sorted_tuples: for j in range(0, 2): result[j].append(i[j]) return result def average_with_step(sorted_data, step_in_seconds): result = ([], []) start_value = sorted_data[0][0] # First time value temp = [] # List containing the counts to be averaged. # Iterate over all time values. for i in range(0, len(sorted_data[0])): current_time = sorted_data[0][i] # If we exceed the step, average what we have since last average. if current_time - start_value > step_in_seconds: # Take midpoint as time measurement. result[0].append(current_time - step_in_seconds/2) # Associate average count with the midpoint. result[1].append(sum(temp) / len(temp)) # Remove items from temp list, so we can average the next set of points. temp = [] # Set start value to this time so we know when we've moved another full step. start_value = current_time continue # Go to next iteration of the loop. # Another data point to be averaged; add to list. temp.append(sorted_data[1][i]) return result def indices_between_times(times, start, end): """ When provided with a list of times, a start time and an end time, returns a tuple containing the first index where the time is greater than 'start' and the last index where the time is less than 'end'. """ indices = [-1, -1] for i in range(0, len(times)): t = times[i] if t >= start and indices[0] is -1: indices[0] = i if t >= end and indices[1] is -1: indices[1] = i - 1 return tuple(indices)
35.290503
111
0.621339
1d4e6983973cfd57c742c5a5a3c3cb88a288a09d
405
py
Python
sdk/python/pulumi_azure_native/subscription/v20200901/_enums.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_native/subscription/v20200901/_enums.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_native/subscription/v20200901/_enums.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from enum import Enum __all__ = [ 'Workload', ] class Workload(str, Enum): """ The workload type of the subscription. It can be either Production or DevTest. """ PRODUCTION = "Production" DEV_TEST = "DevTest"
22.5
82
0.659259
29342ee1661ed9abca3cabee89fbb8d4d65595d6
2,978
py
Python
python/openassetio/specifications.py
j-helman/OpenAssetIO
fd4f23d37dfc104a68f906804846d60b6d78b839
[ "Apache-2.0" ]
null
null
null
python/openassetio/specifications.py
j-helman/OpenAssetIO
fd4f23d37dfc104a68f906804846d60b6d78b839
[ "Apache-2.0" ]
1
2021-11-25T12:24:36.000Z
2021-11-25T18:01:58.000Z
python/openassetio/specifications.py
j-helman/OpenAssetIO
fd4f23d37dfc104a68f906804846d60b6d78b839
[ "Apache-2.0" ]
null
null
null
# # Copyright 2013-2021 [The Foundry Visionmongers Ltd] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .Specification import Specification, TypedProperty __all__ = ['EntitySpecification', 'LocaleSpecification', 'RelationshipSpecification'] class EntitySpecification(Specification): """ EntitySpecifications are used to 'type' a \ref entity. In their simplest form, the _type can be used as a simple string-matched filter. In more advanced cases, the other properties of the Specification may be useful to further refine selection. During registration, the Specification may also provide valuable information to the Manager to help it best represent the Hosts data. """ _prefix = "core.entity" nameHint = TypedProperty( str, doc="A hint as to the name of the entity, used in cases where" " this is not implicit in the reference.") referenceHint = TypedProperty( str, doc="A hint for the entity reference, useful for default" " browser path, etc... This may, or may not ultimately be" " relevant. The Asset Management system should check its" " applicability before using it, and may freely ignore it" " if it has a better idea about a suitable reference.") thumbnailPath = TypedProperty( str, initVal="", doc="If a thumbnail was requested for the registration, then" " this may be set to a path, pointing to a thumbnail. If," " for any reason, the thumbnail isn't available, then this" " will be an empty string.") class LocaleSpecification(Specification): """ LocaleSpecifications are used by a Host to define which part of the application is interacting with the Manager. For example, the DocumentLocale should be used when dealing with scene files, projects, etc... This information is generally useful to Managers as it allows them to better handle the resulting Entity data. """ _prefix = "core.locale" class RelationshipSpecification(Specification): """ RelationshipSpecifications are used mainly with \ref openassetio.managerAPI.ManagerInterface.ManagerInterface.getRelatedReferences "ManagerInterface.getRelatedReferences", in order to describe the kind of relation that is being requested, when a simply EntitySpecification will not suffice. """ _prefix = "core.relationship"
39.706667
85
0.709872
1f7dfb3233fd68c8aa69bb56edd0101538ab7257
1,318
py
Python
src/content.py
Hybin/cBook-Updates
97f1ad65a06de861a9da2ecc4ff0e6e890c9f0de
[ "MIT", "Unlicense" ]
null
null
null
src/content.py
Hybin/cBook-Updates
97f1ad65a06de861a9da2ecc4ff0e6e890c9f0de
[ "MIT", "Unlicense" ]
null
null
null
src/content.py
Hybin/cBook-Updates
97f1ad65a06de861a9da2ecc4ff0e6e890c9f0de
[ "MIT", "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- from urllib.request import urlopen from urllib.parse import quote from bs4 import BeautifulSoup import json import re import os import clear def getList(page): jsonOutput = {"items":[]} # open the page of introduction introPage = urlopen(quote(page, safe='/:?=')) soup = BeautifulSoup(introPage, 'html.parser') # get the content list page url = soup.find('div', class_='tablist').find_all('li')[1].find('a')['href'] contentPage = urlopen(url) bsObj = BeautifulSoup(contentPage, 'html.parser') # get the title and link def getTitle(o): title = re.sub(r'\n*', '', o.find('a').get_text()) return title def getLink(o): link = o.find('a')['href'] return link def getInfo(o): info = re.sub(r'\xa0', ' ', o.find('a')['title']) return info item = { "title": "", "subtitle": "", "arg": "", } volumes = bsObj.find_all('ul', class_='block_ul') for v in volumes: chapters = v.find_all('li') for c in chapters: item['title'] = getTitle(c) item['subtitle'] = getInfo(c) item['arg'] = getLink(c) jsonOutput['items'].insert(0, item) item = { "title": "", "subtitle": "", "arg": "", } output = json.dumps(jsonOutput, ensure_ascii=False, indent=4) # clear the cache of file icons clear.on() # output the results print(output)
21.258065
77
0.630501
0ef4c4c1f98eacf57a618c0ac1b4360b6319f1d9
3,052
py
Python
checkSpeciesOfGenes.py
jakelever/GNBR
2a457112b60f1667ca06a508a55d892185965db7
[ "MIT" ]
null
null
null
checkSpeciesOfGenes.py
jakelever/GNBR
2a457112b60f1667ca06a508a55d892185965db7
[ "MIT" ]
null
null
null
checkSpeciesOfGenes.py
jakelever/GNBR
2a457112b60f1667ca06a508a55d892185965db7
[ "MIT" ]
null
null
null
import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description='Check the species of genes from GNBR data') parser.add_argument('--geneIDs',required=True,type=str,help='All the gene IDs in GNBR') parser.add_argument('--ncbiGeneInfo',required=True,type=str,help='Uncompressed version of NCBI gene info file') parser.add_argument('--ncbiGeneHistory',required=True,type=str,help='Uncompressed version of NCBI gene history file') parser.add_argument('--taxonomyNames',required=True,type=str,help='names.dmp file from NCBI taxonomy taxdmp') parser.add_argument('--taxonomyMerged',required=True,type=str,help='merged.dmp file from NCBI taxonomy taxdmp') parser.add_argument('--outFile',required=True,type=str,help='Output file') args = parser.parse_args() print("Loading taxonomy names") species = {} with open(args.taxonomyNames) as f: for line in f: split = [ s.strip() for s in line.strip('\n').split('|') ] if split[3] == 'scientific name': tax_id = int(split[0]) name = split[1] species[tax_id] = name #if 9606 in species: # break print("Loading taxonomy merges") with open(args.taxonomyMerged) as f: for line in f: split = [ s.strip() for s in line.strip('\n').split('|') ] old_taxa_id = int(split[0]) new_taxa_id = int(split[1]) if new_taxa_id in species: species[old_taxa_id] = species[new_taxa_id] print("Loading NCBI genes") geneToSpeciesID = {} with open(args.ncbiGeneInfo) as f: headers = f.readline() for line in f: split = line.strip('\n').split('\t') tax_id = int(split[0]) gene_id = int(split[1]) geneToSpeciesID[gene_id] = tax_id #assert tax_id in species, "Couldn't find taxa_id=%d for gene_id=%d" % (tax_id,gene_id) print("Loading NCBI gene history") renaming = {} discontinued = set() with open(args.ncbiGeneHistory) as f: headers = f.readline() for line in f: split = line.strip('\n').split('\t') old_gene_id = int(split[2]) if split[1] == '-': discontinued.add(old_gene_id) else: gene_id = int(split[1]) renaming[old_gene_id] = gene_id print("Processing GNBR data") with open(args.geneIDs) as f, open(args.outFile,'w') as outF: for line in f: if line.strip('\n') == 'null': continue gene_ids = [ int(g.split('(')[0]) for g in line.strip('\n').split(';') ] gene_ids = [ renaming[g] if g in renaming else g for g in gene_ids ] hasInvalidIDs = any( not g in geneToSpeciesID for g in gene_ids ) if hasInvalidIDs: continue for g in gene_ids: assert g in geneToSpeciesID, "Couldn't find gene_id=%d" % g matched_species = [ geneToSpeciesID[g] for g in gene_ids ] matched_species = sorted(set(matched_species)) for s in matched_species: assert s in species, "Couldn't find taxa_id=%d" % s species_names = [ species[s] for s in matched_species ] outData = [ ",".join(map(str,gene_ids)), ",".join(map(str,matched_species)), ",".join(map(str,species_names)), len(matched_species) == 1 ] outF.write("\t".join(map(str,outData)) + "\n")
34.681818
141
0.681193
f4e3ea8dc5bbdc64f29b833e7096f050ab6bee28
3,164
py
Python
stages/l4lb/fabfile.py
midonet/senbazuru
40653aa1d92d96187baf7bd6a661090027de6c3d
[ "Apache-2.0" ]
1
2015-07-02T05:43:48.000Z
2015-07-02T05:43:48.000Z
stages/l4lb/fabfile.py
midonet/senbazuru
40653aa1d92d96187baf7bd6a661090027de6c3d
[ "Apache-2.0" ]
null
null
null
stages/l4lb/fabfile.py
midonet/senbazuru
40653aa1d92d96187baf7bd6a661090027de6c3d
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2015 Midokura SARL, All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from senbazuru.config import Config from senbazuru.utils import Puppet from fabric.api import env,parallel,roles,run from fabric.colors import green from fabric.utils import puts from netaddr import IPNetwork as CIDR metadata = Config(os.environ["CONFIGFILE"]) # # thanks to http://docs.midonet.org/docs/operation-guide/2015.06/content/l4lb_configuration.html for making this easy to program. # @parallel @roles('midonet_api') def l4lb(): puts(green("configuring l4 TCP load balancer (using cli on %s)" % env.host_string)) for midonet_agent in metadata.roles["midonet_agents"]: run(""" HOST_STRING="%s" FIRST_IP="%s" DEBUG="%s" MAXPAIRS="%s" ROUTER_ID="$(midonet-cli -e 'router list' | grep alexrouter | awk '{print $2;}')" if [[ "" == "${ROUTER_ID}" ]]; then echo "could not find alexrouter" exit 1 else echo "using router id: ${ROUTER_ID}" fi LB_ID="$(midonet-cli -e 'load-balancer list' | grep 'state up' | head -n1 | awk '{print $2;}')" if [[ "" == "${LB_ID}" ]]; then LB_ID="$(midonet-cli -e 'load-balancer create')" fi echo "using load balancer id: ${LB_ID}" midonet-cli -e "router ${ROUTER_ID} set load-balancer ${LB_ID}" POOL_ID="$(midonet-cli -e "load-balancer ${LB_ID} list pool" | head -n1 | awk '{print $2;}')" if [[ "" == "${POOL_ID}" ]]; then POOL_ID="$(midonet-cli -e "load-balancer ${LB_ID} create pool lb-method ROUND_ROBIN")" fi midonet-cli -e "load-balancer ${LB_ID} pool ${POOL_ID} show" # # add the ips of the veth pairs of the agents # ip2dec () { local a b c d ip=$@ IFS=. read -r a b c d <<< "$ip" printf '%%d' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))" } dec2ip () { local ip dec=$@ for e in {3..0} do ((octet = dec / (256 ** e) )) ((dec -= octet * 256 ** e)) ip+=$delim$octet delim=. done printf '%%s' "$ip" } # start at ip .20 in the 1024 block k=20 for i in $(seq "$(( ${k} ))" "$(( ${k} + ${MAXPAIRS} ))"); do IP="$(dec2ip $(( $(ip2dec ${FIRST_IP}) + ${k} )))" midonet-cli -e "load-balancer ${LB_ID} pool ${POOL_ID} create member address ${IP} protocol-port 80" k="$(( ${k} + 1 ))" done midonet-cli -e "load-balancer ${LB_ID} pool ${POOL_ID} list vip" | grep "10.0.0.10" || \ midonet-cli -e "load-balancer ${LB_ID} pool ${POOL_ID} create vip address 10.0.0.10 persistence SOURCE_IP protocol-port 80" """ % ( env.host_string, CIDR(metadata.servers[midonet_agent]['network'])[0], metadata.config['debug'], metadata.config['maxpairs'] ))
27.513043
129
0.638748
e21af41cf05676153778e2cfa07c5138f259c22c
6,544
py
Python
adafruit_pca9685.py
sommersoft/Adafruit_CircuitPython_PCA9685
57a4500e883e56508a8e647dc2e9756f905625e1
[ "MIT" ]
1
2020-09-27T20:08:57.000Z
2020-09-27T20:08:57.000Z
adafruit_pca9685.py
sommersoft/Adafruit_CircuitPython_PCA9685
57a4500e883e56508a8e647dc2e9756f905625e1
[ "MIT" ]
null
null
null
adafruit_pca9685.py
sommersoft/Adafruit_CircuitPython_PCA9685
57a4500e883e56508a8e647dc2e9756f905625e1
[ "MIT" ]
null
null
null
# The MIT License (MIT) # # Copyright (c) 2016 Radomir Dopieralski, written for Adafruit Industries # Copyright (c) 2017 Scott Shawcroft for Adafruit Industries LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ `adafruit_pca9685` ==================================================== Driver for the PCA9685 PWM control IC. Its commonly used to control servos, leds and motors. .. seealso:: The `Adafruit CircuitPython Motor library <https://github.com/adafruit/Adafruit_CircuitPython_Motor>`_ can be used to control the PWM outputs for specific uses instead of generic duty_cycle adjustments. * Author(s): Scott Shawcroft Implementation Notes -------------------- **Hardware:** * Adafruit `16-Channel 12-bit PWM/Servo Driver - I2C interface - PCA9685 <https://www.adafruit.com/product/815>`_ (Product ID: 815) **Software and Dependencies:** * Adafruit CircuitPython firmware for the ESP8622 and M0-based boards: https://github.com/adafruit/circuitpython/releases * Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice * Adafruit's Register library: https://github.com/adafruit/Adafruit_CircuitPython_Register """ __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PCA9685.git" import time from adafruit_register.i2c_struct import UnaryStruct from adafruit_register.i2c_struct_array import StructArray from adafruit_bus_device import i2c_device class PWMChannel: """A single PCA9685 channel that matches the :py:class:`~pulseio.PWMOut` API.""" def __init__(self, pca, index): self._pca = pca self._index = index @property def frequency(self): """The overall PWM frequency in Hertz (read-only). A PWMChannel's frequency cannot be set individually. All channels share a common frequency, set by PCA9685.frequency.""" return self._pca.frequency @frequency.setter def frequency(self, _): raise NotImplementedError("frequency cannot be set on individual channels") @property def duty_cycle(self): """16 bit value that dictates how much of one cycle is high (1) versus low (0). 0xffff will always be high, 0 will always be low and 0x7fff will be half high and then half low.""" pwm = self._pca.pwm_regs[self._index] if pwm[0] == 0x1000: return 0xFFFF return pwm[1] << 4 @duty_cycle.setter def duty_cycle(self, value): if not 0 <= value <= 0xFFFF: raise ValueError("Out of range") if value == 0xFFFF: self._pca.pwm_regs[self._index] = (0x1000, 0) else: # Shift our value by four because the PCA9685 is only 12 bits but our value is 16 value = (value + 1) >> 4 self._pca.pwm_regs[self._index] = (0, value) class PCAChannels: # pylint: disable=too-few-public-methods """Lazily creates and caches channel objects as needed. Treat it like a sequence.""" def __init__(self, pca): self._pca = pca self._channels = [None] * len(self) def __len__(self): return 16 def __getitem__(self, index): if not self._channels[index]: self._channels[index] = PWMChannel(self._pca, index) return self._channels[index] class PCA9685: """ Initialise the PCA9685 chip at ``address`` on ``i2c_bus``. The internal reference clock is 25mhz but may vary slightly with environmental conditions and manufacturing variances. Providing a more precise ``reference_clock_speed`` can improve the accuracy of the frequency and duty_cycle computations. See the ``calibration.py`` example for how to derive this value by measuring the resulting pulse widths. :param ~busio.I2C i2c_bus: The I2C bus which the PCA9685 is connected to. :param int address: The I2C address of the PCA9685. :param int reference_clock_speed: The frequency of the internal reference clock in Hertz. """ # Registers: mode1_reg = UnaryStruct(0x00, "<B") prescale_reg = UnaryStruct(0xFE, "<B") pwm_regs = StructArray(0x06, "<HH", 16) def __init__(self, i2c_bus, *, address=0x40, reference_clock_speed=25000000): self.i2c_device = i2c_device.I2CDevice(i2c_bus, address) self.channels = PCAChannels(self) """Sequence of 16 `PWMChannel` objects. One for each channel.""" self.reference_clock_speed = reference_clock_speed """The reference clock speed in Hz.""" self.reset() def reset(self): """Reset the chip.""" self.mode1_reg = 0x00 # Mode1 @property def frequency(self): """The overall PWM frequency in Hertz.""" return self.reference_clock_speed / 4096 / self.prescale_reg @frequency.setter def frequency(self, freq): prescale = int(self.reference_clock_speed / 4096.0 / freq + 0.5) if prescale < 3: raise ValueError("PCA9685 cannot output at the given frequency") old_mode = self.mode1_reg # Mode 1 self.mode1_reg = (old_mode & 0x7F) | 0x10 # Mode 1, sleep self.prescale_reg = prescale # Prescale self.mode1_reg = old_mode # Mode 1 time.sleep(0.005) self.mode1_reg = old_mode | 0xA1 # Mode 1, autoincrement on def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.deinit() def deinit(self): """Stop using the pca9685.""" self.reset()
37.609195
99
0.688417
ba1586a0d048d798a6267feaa8af11625275b627
4,934
py
Python
faceRecognizer.py
Trance5AM/FaceRecogniton
a608c846af0bdd3cb6e478d004373c3987b06f9d
[ "MIT" ]
4
2017-02-08T22:11:22.000Z
2017-02-08T22:24:57.000Z
faceRecognizer.py
etiro/FaceRecogniton
a608c846af0bdd3cb6e478d004373c3987b06f9d
[ "MIT" ]
null
null
null
faceRecognizer.py
etiro/FaceRecogniton
a608c846af0bdd3cb6e478d004373c3987b06f9d
[ "MIT" ]
null
null
null
import cv2 import os import numpy as np from baza import Database class FaceRecognizer(object): def __init__(self, xml_path, index=0,): self.camera = cv2.VideoCapture(index) self.index = index self.haar_cascade = cv2.CascadeClassifier(xml_path) self.xml_path = xml_path self.recognizer = cv2.face.createLBPHFaceRecognizer() self.db1 = Database("Face_Recognition\\Database\\baza.db") def __del__(self): self.camera.release() print("Program finished, memory de-allocated, camera closed") def normalize(self, roi): width, height = 200, 200 gray = cv2.equalizeHist(roi) #Equalizes the histogram of a grayscale image - better contrast resized_img = cv2.resize(gray, (height, width)) return resized_img def detect_store(self, first, path): counter = 0 input("I will take 20 pictures, press ENTER when you ready") while(self.camera.isOpened() and counter < 100): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) face_rects = self.haar_cascade.detectMultiScale( gray, scaleFactor = 1.3, minNeighbors=5, minSize=(30, 30) ) for (x,y,w,h) in face_rects: cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2) gray_roi = gray[y:y+h, x:x+w] cv2.putText(frame,"Face Detected",(10,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) print(counter) if(counter > 70 and counter < 101): #to give user some time to prepare cv2.imwrite(path + "\\" + first + "." + str(counter-70) + ".jpg", self.normalize(gray_roi)) print("saving {}. frame".format(counter)) #cv2.imshow("Saving..", self.normalize(gray_roi)) counter += 1 cv2.imshow("ESC to close", frame) key = cv2.waitKey(1) if(key == 27): break cv2.destroyAllWindows() def get_images_and_labels(self, path): images = [] labels = [] counter = None for person in os.listdir(path): first, last = (person.split("_")) counter = self.db1.get_id(first, last)[0] for image in os.listdir(os.path.join(path, person)): #if(image.endswith(".jpg")): try: print(os.path.join(path, person, image)) img = cv2.imread(os.path.join(path, person, image), cv2.IMREAD_GRAYSCALE) np_image = np.array(img, "uint8") images.append(np_image) labels.append(counter) print(counter) cv2.imshow("training", np_image) cv2.waitKey(10) except OSError as e: print(e) cv2.destroyAllWindows() return images, np.array(labels) def train_recognizer(self): path = "Face_Recognition\\People\\" model_path = "Face_Recognition\\Recognizer\\data.xml" images, labels = self.get_images_and_labels(path) self.recognizer.train(images, labels) self.recognizer.save(model_path) return model_path def recognize(self): model_path = self.train_recognizer() self.recognizer.load(model_path) while(self.camera.isOpened()): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) face_rects = self.haar_cascade.detectMultiScale( gray, scaleFactor = 1.3, minNeighbors=5, minSize=(30, 30) ) for (x,y,w,h) in face_rects: gray_roi = gray[y:y+h, x:x+w] collector = cv2.face.MinDistancePredictCollector() self.recognizer.predict(gray_roi, collector) conf = collector.getDist() pred = collector.getLabel() treshold = 70 #you need to play with it cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2) if(conf < treshold): profile = self.db1.get_name(str(pred)) cv2.putText(frame,str(profile[0]),(x,y+h+30), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) cv2.putText(frame,str(profile[1]),(x,y+h+60), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) print("Current face XY location is {}".format(x,y)) else: cv2.putText(frame,"Unknown",(x,y+h+30), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA) cv2.imshow("Face Recognizer", frame) key = cv2.waitKey(1) if(key == 27): break cv2.destroyAllWindows()
42.904348
122
0.548439
f27e1df25818af8bc472c00eea8c9ee389b0164c
859
py
Python
leetcode/771_jewels_and_stones.py
coocos/leetcode
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
[ "MIT" ]
null
null
null
leetcode/771_jewels_and_stones.py
coocos/leetcode
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
[ "MIT" ]
null
null
null
leetcode/771_jewels_and_stones.py
coocos/leetcode
007bbeb46fa4b32e1c92fc894edeb2100eb6ba21
[ "MIT" ]
null
null
null
import unittest from collections import Set class Solution: """ This solution simply creates a set with all the jewel tokens and then iterates through the stones one by one incrementing a counter if they are present in the set. Note that using sum() and a comprehension like return sum(s in jewels for s in S) would be cleaner but benchmarking it on leetcode shows that it's a slightly slower approach than using an explicit counter and a for loop. """ def numJewelsInStones(self, J: str, S: str) -> int: jewels: Set[int] = set(J) count = 0 for s in S: if s in jewels: count += 1 return count class TestSolution(unittest.TestCase): def test_first_example(self): self.assertEqual(Solution().numJewelsInStones('aA', 'aAAbbbb'), 3)
25.264706
74
0.650757
17bb5daad1e5406b9b6c563fb6e51cb767513b54
23,049
py
Python
tests/test_pycfg.py
kshithijiyer/byterun
e3dc1349ed0d5f737708f274f714ac77b6f047cc
[ "PSF-2.0" ]
13
2015-03-19T12:17:07.000Z
2021-07-24T14:22:58.000Z
tests/test_pycfg.py
kshithijiyer/byterun
e3dc1349ed0d5f737708f274f714ac77b6f047cc
[ "PSF-2.0" ]
null
null
null
tests/test_pycfg.py
kshithijiyer/byterun
e3dc1349ed0d5f737708f274f714ac77b6f047cc
[ "PSF-2.0" ]
4
2017-01-27T19:08:50.000Z
2020-10-13T16:32:23.000Z
"""Tests for pycfg. """ import dis import inspect import logging import unittest from byterun import pycfg # Disable because pylint does not like any name for the nested test_code # functions used to get the needed bytecode. # pylint: disable=invalid-name # The bytecode constants used to check against the generated code are formatted # as follows. Each line is one instruction. Blank lines separate basic blocks. # # dis.opmap["<opcode name>"], <arg low>, <arg high>, # <offset of inst>, <arg> # # The <arg> is a decoded version of the argument. This is more useful for # relative jumps. def line_number(): """Returns the line number of the call site.""" return inspect.currentframe().f_back.f_lineno class CFGTest(unittest.TestCase): def assertEndsWith(self, actual, expected): self.assertTrue(actual.endswith(expected), msg="'%s' does not end with '%s'" % (actual, expected)) # Copy this line into your test when developing it. It prints the formatted # bytecode to use as the expected. # print pycfg._bytecode_repr(test_code.func_code.co_code) def checkBlocks(self, table, expected): self.assertEqual(len(table._blocks), len(expected)) for block, (expected_begin, expected_end) in zip(table._blocks, expected): self.assertEqual(block.begin, expected_begin) self.assertEqual(block.end, expected_end) @staticmethod def codeOneBlock(): return x + 1 # pylint: disable=undefined-variable codeOneBlockBytecode = pycfg._list_to_string([ dis.opmap["LOAD_GLOBAL"], 0, 0, # 0 dis.opmap["LOAD_CONST"], 1, 0, # 3 dis.opmap["BINARY_ADD"], # 6 dis.opmap["RETURN_VALUE"], # 7 ]) def testOneBlock(self): # Check the code to make sure the test will fail if the compilation changes. self.assertEqual(self.codeOneBlock.func_code.co_code, self.codeOneBlockBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeOneBlock.func_code) # Should all be one basic block. self.assertIs(table.get_basic_block(0), table.get_basic_block(3)) self.assertIs(table.get_basic_block(0), table.get_basic_block(6)) self.assertIs(table.get_basic_block(0), table.get_basic_block(7)) # No incoming self.assertItemsEqual(table.get_basic_block(0).incoming, []) # Outgoing is an unknown return location self.assertItemsEqual(table.get_basic_block(0).outgoing, [None]) @staticmethod def codeTriangle(y): x = y if y > 10: x -= 2 return x codeTriangleLineNumber = line_number() - 4 # codeTriangleLineNumber is used to compute the correct line numbers for code # in codeTriangle. This makes the tests less brittle if other tests in the # file are changed. However the "- 4" will need to be changed if codeTriangle # is changed or anything is inserted between the line_number() call and the # definition of codeTriangle. codeTriangleBytecode = pycfg._list_to_string([ dis.opmap["LOAD_FAST"], 0, 0, # 0, arg=0 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["LOAD_FAST"], 0, 0, # 6, arg=0 dis.opmap["LOAD_CONST"], 1, 0, # 9, arg=1 dis.opmap["COMPARE_OP"], 4, 0, # 12, arg=4 dis.opmap["POP_JUMP_IF_FALSE"], 31, 0, # 15, dest=31 dis.opmap["LOAD_FAST"], 1, 0, # 18, arg=1 dis.opmap["LOAD_CONST"], 2, 0, # 21, arg=2 dis.opmap["INPLACE_SUBTRACT"], # 24 dis.opmap["STORE_FAST"], 1, 0, # 25, arg=1 dis.opmap["JUMP_FORWARD"], 0, 0, # 28, dest=31 dis.opmap["LOAD_FAST"], 1, 0, # 31, arg=1 dis.opmap["RETURN_VALUE"], # 34 ]) def testTriangle(self): self.assertEqual(self.codeTriangle.func_code.co_code, self.codeTriangleBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeTriangle.func_code) expected = [(0, 15), (18, 28), (31, 34)] self.checkBlocks(table, expected) bb = table.get_basic_block # Check the POP_JUMP_IF_FALSE conditional jump self.assertItemsEqual(bb(0).outgoing, [bb(18), bb(31)]) # Check the return self.assertItemsEqual(bb(44).outgoing, [None]) # Check the incoming of the entry block self.assertItemsEqual(bb(0).incoming, []) # Check incoming of the merge block. self.assertItemsEqual(bb(44).incoming, [bb(28), bb(15)]) self.assertEndsWith( bb(21).get_name(), "tests/test_pycfg.py:{0}-{0}".format(self.codeTriangleLineNumber+2)) def testTriangleDominators(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeTriangle.func_code) bb = table.get_basic_block self.assertEqual(bb(0)._dominators, {bb(0)}) self.assertEqual(bb(18)._dominators, {bb(0), bb(18)}) self.assertEqual(bb(31)._dominators, {bb(0), bb(31)}) self.assertEqual(bb(41)._dominators, {bb(0), bb(41)}) self.assertTrue(table.dominates(0, 37)) self.assertFalse(table.dominates(24, 41)) self.assertTrue(table.dominates(21, 28)) self.assertFalse(table.dominates(28, 21)) def testTriangleOrder(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeTriangle.func_code) bb = table.get_basic_block self.assertEqual(table.get_ancestors_first_traversal(), [bb(o) for o in [0, 18, 31]]) @staticmethod def codeDiamond(y): x = y if y > 10: x -= 2 else: x += 2 return x codeDiamondBytecode = pycfg._list_to_string([ dis.opmap["LOAD_FAST"], 0, 0, # 0, arg=0 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["LOAD_FAST"], 0, 0, # 6, arg=0 dis.opmap["LOAD_CONST"], 1, 0, # 9, arg=1 dis.opmap["COMPARE_OP"], 4, 0, # 12, arg=4 dis.opmap["POP_JUMP_IF_FALSE"], 31, 0, # 15, dest=31 dis.opmap["LOAD_FAST"], 1, 0, # 18, arg=1 dis.opmap["LOAD_CONST"], 2, 0, # 21, arg=2 dis.opmap["INPLACE_SUBTRACT"], # 24 dis.opmap["STORE_FAST"], 1, 0, # 25, arg=1 dis.opmap["JUMP_FORWARD"], 10, 0, # 28, dest=41 dis.opmap["LOAD_FAST"], 1, 0, # 31, arg=1 dis.opmap["LOAD_CONST"], 2, 0, # 34, arg=2 dis.opmap["INPLACE_ADD"], # 37 dis.opmap["STORE_FAST"], 1, 0, # 38, arg=1 dis.opmap["LOAD_FAST"], 1, 0, # 41, arg=1 dis.opmap["RETURN_VALUE"], # 44 ]) def testDiamond(self): self.assertEqual(self.codeDiamond.func_code.co_code, self.codeDiamondBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeDiamond.func_code) expected = [(0, 15), (18, 28), (31, 38), (41, 44)] self.checkBlocks(table, expected) bb = table.get_basic_block # Check the POP_JUMP_IF_FALSE conditional jump self.assertItemsEqual(bb(0).outgoing, [bb(18), bb(31)]) # Check the jumps at the end of the 2 of branches self.assertItemsEqual(bb(18).outgoing, [bb(41)]) self.assertItemsEqual(bb(38).outgoing, [bb(41)]) # Check the return self.assertItemsEqual(bb(44).outgoing, [None]) # Check the incoming of the entry block self.assertItemsEqual(bb(0).incoming, []) # Check the incoming of the 2 if branches self.assertItemsEqual(bb(18).incoming, [bb(15)]) self.assertItemsEqual(bb(31).incoming, [bb(15)]) # Check incoming of the merge block. self.assertItemsEqual(bb(44).incoming, [bb(28), bb(38)]) def testDiamondDominators(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeDiamond.func_code) bb = table.get_basic_block self.assertEqual(bb(0)._dominators, {bb(0)}) self.assertEqual(bb(18)._dominators, {bb(0), bb(18)}) self.assertEqual(bb(31)._dominators, {bb(0), bb(31)}) self.assertEqual(bb(41)._dominators, {bb(0), bb(41)}) self.assertTrue(table.dominates(0, 37)) self.assertFalse(table.dominates(24, 41)) self.assertTrue(table.dominates(21, 28)) self.assertFalse(table.dominates(28, 21)) def testDiamondOrder(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeDiamond.func_code) bb = table.get_basic_block self.assertEqual(table.get_ancestors_first_traversal(), [bb(o) for o in [0, 18, 31, 41]]) @staticmethod def codeLoop(y): z = 0 for x in y: z += x return z codeLoopBytecode = pycfg._list_to_string([ dis.opmap["LOAD_CONST"], 1, 0, # 0, arg=1 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["SETUP_LOOP"], 24, 0, # 6, dest=33 dis.opmap["LOAD_FAST"], 0, 0, # 9, arg=0 dis.opmap["GET_ITER"], # 12 dis.opmap["FOR_ITER"], 16, 0, # 13, dest=32 dis.opmap["STORE_FAST"], 2, 0, # 16, arg=2 dis.opmap["LOAD_FAST"], 1, 0, # 19, arg=1 dis.opmap["LOAD_FAST"], 2, 0, # 22, arg=2 dis.opmap["INPLACE_ADD"], # 25 dis.opmap["STORE_FAST"], 1, 0, # 26, arg=1 dis.opmap["JUMP_ABSOLUTE"], 13, 0, # 29, dest=13 dis.opmap["POP_BLOCK"], # 32 dis.opmap["LOAD_FAST"], 1, 0, # 33, arg=1 dis.opmap["RETURN_VALUE"], # 36 ]) def testLoop(self): self.assertEqual(self.codeLoop.func_code.co_code, self.codeLoopBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeLoop.func_code) expected = [(0, 12), (13, 13), (16, 29), (32, 32), (33, 36)] self.checkBlocks(table, expected) bb = table.get_basic_block # Check outgoing of the loop handler instruction. self.assertItemsEqual(bb(13).outgoing, [bb(16), bb(32)]) self.assertItemsEqual(bb(0).outgoing, [bb(13)]) def testLoopDominators(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeLoop.func_code) bb = table.get_basic_block self.assertEqual(bb(0)._dominators, {bb(0)}) self.assertEqual(bb(13)._dominators, {bb(0), bb(13)}) self.assertEqual(bb(16)._dominators, {bb(0), bb(13), bb(16)}) self.assertEqual(bb(32)._dominators, {bb(0), bb(13), bb(32)}) self.assertEqual(bb(33)._dominators, {bb(0), bb(13), bb(32), bb(33)}) def testLoopOrder(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeLoop.func_code) bb = table.get_basic_block self.assertEqual(table.get_ancestors_first_traversal(), [bb(o) for o in [0, 13, 16, 32, 33]]) @staticmethod def codeNestedLoops(y): z = 0 for x in y: for x in y: z += x*x return z codeNestedLoopsLineNumber = line_number() - 5 # See comment on codeTriangleLineNumber above. codeNestedLoopsBytecode = pycfg._list_to_string([ dis.opmap["LOAD_CONST"], 1, 0, # 0, arg=1 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["SETUP_LOOP"], 45, 0, # 6, dest=54 dis.opmap["LOAD_FAST"], 0, 0, # 9, arg=0 dis.opmap["GET_ITER"], # 12 dis.opmap["FOR_ITER"], 37, 0, # 13, dest=53 dis.opmap["STORE_FAST"], 2, 0, # 16, arg=2 dis.opmap["SETUP_LOOP"], 28, 0, # 19, dest=50 dis.opmap["LOAD_FAST"], 0, 0, # 22, arg=0 dis.opmap["GET_ITER"], # 25 dis.opmap["FOR_ITER"], 20, 0, # 26, dest=49 dis.opmap["STORE_FAST"], 2, 0, # 29, arg=2 dis.opmap["LOAD_FAST"], 1, 0, # 32, arg=1 dis.opmap["LOAD_FAST"], 2, 0, # 35, arg=2 dis.opmap["LOAD_FAST"], 2, 0, # 38, arg=2 dis.opmap["BINARY_MULTIPLY"], # 41 dis.opmap["INPLACE_ADD"], # 42 dis.opmap["STORE_FAST"], 1, 0, # 43, arg=1 dis.opmap["JUMP_ABSOLUTE"], 26, 0, # 46, dest=26 dis.opmap["POP_BLOCK"], # 49 dis.opmap["JUMP_ABSOLUTE"], 13, 0, # 50, dest=13 dis.opmap["POP_BLOCK"], # 53 dis.opmap["LOAD_FAST"], 1, 0, # 54, arg=1 dis.opmap["RETURN_VALUE"], # 57 ]) def testNestedLoops(self): self.assertEqual(self.codeNestedLoops.func_code.co_code, self.codeNestedLoopsBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeNestedLoops.func_code) expected = [(0, 12), (13, 13), (16, 25), (26, 26), (29, 46), (49, 49), (50, 50), (53, 53), (54, 57)] self.checkBlocks(table, expected) bb = table.get_basic_block self.assertItemsEqual(bb(13).incoming, [bb(12), bb(50)]) self.assertItemsEqual(bb(13).outgoing, [bb(16), bb(53)]) self.assertItemsEqual(bb(26).incoming, [bb(25), bb(46)]) self.assertItemsEqual(bb(26).outgoing, [bb(29), bb(49)]) self.assertEndsWith( bb(43).get_name(), "tests/test_pycfg.py:{}-{}".format(self.codeNestedLoopsLineNumber + 2, self.codeNestedLoopsLineNumber + 3)) def testNestedLoopsDominators(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeNestedLoops.func_code) bb = table.get_basic_block self.assertEqual(bb(0)._dominators, {bb(0)}) self.assertEqual(bb(13)._dominators, {bb(0), bb(13)}) self.assertEqual(bb(16)._dominators, {bb(0), bb(13), bb(16)}) self.assertEqual(bb(26)._dominators, {bb(0), bb(13), bb(16), bb(26)}) self.assertEqual(bb(29)._dominators, {bb(0), bb(13), bb(16), bb(26), bb(29)}) self.assertEqual(bb(49)._dominators, {bb(0), bb(13), bb(16), bb(26), bb(49)}) self.assertEqual(bb(50)._dominators, {bb(0), bb(13), bb(16), bb(26), bb(49), bb(50)}) self.assertEqual(bb(53)._dominators, {bb(0), bb(13), bb(53)}) self.assertEqual(bb(54)._dominators, {bb(0), bb(13), bb(53), bb(54)}) def testNestedLoopsReachable(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeNestedLoops.func_code) bb = table.get_basic_block self.assertEqual(bb(26)._reachable_from, set([bb(0), bb(13), bb(16), bb(26), bb(29), bb(49), bb(50)])) self.assertTrue(table.reachable_from(41, 50)) self.assertTrue(table.reachable_from(50, 41)) self.assertFalse(table.reachable_from(41, 53)) def testNestedLoopsOrder(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeNestedLoops.func_code) bb = table.get_basic_block self.assertEqual(table.get_ancestors_first_traversal(), [bb(o) for o in [0, 13, 16, 26, 29, 49, 50, 53, 54]]) @staticmethod def codeContinue(y): z = 0 for x in y: if x == 1: continue z += x*x return z codeContinueBytecode = pycfg._list_to_string([ dis.opmap["LOAD_CONST"], 1, 0, # 0, arg=1 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["SETUP_LOOP"], 46, 0, # 6, dest=55 dis.opmap["LOAD_FAST"], 0, 0, # 9, arg=0 dis.opmap["GET_ITER"], # 12 dis.opmap["FOR_ITER"], 38, 0, # 13, dest=54 dis.opmap["STORE_FAST"], 2, 0, # 16, arg=2 dis.opmap["LOAD_FAST"], 2, 0, # 19, arg=2 dis.opmap["LOAD_CONST"], 2, 0, # 22, arg=2 dis.opmap["COMPARE_OP"], 2, 0, # 25, arg=2 dis.opmap["POP_JUMP_IF_FALSE"], 37, 0, # 28, dest=37 dis.opmap["JUMP_ABSOLUTE"], 13, 0, # 31, dest=13 dis.opmap["JUMP_FORWARD"], 0, 0, # 34, dest=37 dis.opmap["LOAD_FAST"], 1, 0, # 37, arg=1 dis.opmap["LOAD_FAST"], 2, 0, # 40, arg=2 dis.opmap["LOAD_FAST"], 2, 0, # 43, arg=2 dis.opmap["BINARY_MULTIPLY"], # 46 dis.opmap["INPLACE_ADD"], # 47 dis.opmap["STORE_FAST"], 1, 0, # 48, arg=1 dis.opmap["JUMP_ABSOLUTE"], 13, 0, # 51, dest=13 dis.opmap["POP_BLOCK"], # 54 dis.opmap["LOAD_FAST"], 1, 0, # 55, arg=1 dis.opmap["RETURN_VALUE"], # 58 ]) def testContinue(self): self.assertEqual(self.codeContinue.func_code.co_code, self.codeContinueBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeContinue.func_code) bb = table.get_basic_block self.assertItemsEqual(bb(31).outgoing, [bb(13)]) self.assertItemsEqual(bb(13).incoming, [bb(12), bb(51), bb(31)]) self.assertItemsEqual(bb(13).outgoing, [bb(16), bb(54)]) @staticmethod def codeBreak(y): z = 0 for x in y: if x == 1: break z += x*x return z codeBreakBytecode = pycfg._list_to_string([ dis.opmap["LOAD_CONST"], 1, 0, # 0, arg=1 dis.opmap["STORE_FAST"], 1, 0, # 3, arg=1 dis.opmap["SETUP_LOOP"], 44, 0, # 6, dest=53 dis.opmap["LOAD_FAST"], 0, 0, # 9, arg=0 dis.opmap["GET_ITER"], # 12 dis.opmap["FOR_ITER"], 36, 0, # 13, dest=52 dis.opmap["STORE_FAST"], 2, 0, # 16, arg=2 dis.opmap["LOAD_FAST"], 2, 0, # 19, arg=2 dis.opmap["LOAD_CONST"], 2, 0, # 22, arg=2 dis.opmap["COMPARE_OP"], 2, 0, # 25, arg=2 dis.opmap["POP_JUMP_IF_FALSE"], 35, 0, # 28, dest=35 dis.opmap["BREAK_LOOP"], # 31 dis.opmap["JUMP_FORWARD"], 0, 0, # 32, dest=35 dis.opmap["LOAD_FAST"], 1, 0, # 35, arg=1 dis.opmap["LOAD_FAST"], 2, 0, # 38, arg=2 dis.opmap["LOAD_FAST"], 2, 0, # 41, arg=2 dis.opmap["BINARY_MULTIPLY"], # 44 dis.opmap["INPLACE_ADD"], # 45 dis.opmap["STORE_FAST"], 1, 0, # 46, arg=1 dis.opmap["JUMP_ABSOLUTE"], 13, 0, # 49, dest=13 dis.opmap["POP_BLOCK"], # 52 dis.opmap["LOAD_FAST"], 1, 0, # 53, arg=1 dis.opmap["RETURN_VALUE"], # 56 ]) def testBreak(self): self.assertEqual(self.codeBreak.func_code.co_code, self.codeBreakBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeBreak.func_code) bb = table.get_basic_block self.assertItemsEqual(bb(13).incoming, [bb(12), bb(49)]) self.assertItemsEqual(bb(31).incoming, [bb(28)]) self.assertItemsEqual(bb(31).outgoing, [None]) # TODO(ampere): This is correct, however more information would make the # following succeed. # self.assertItemsEqual(bb(31).incoming, [53]) @staticmethod def codeYield(): yield 1 yield 2 yield 3 codeYieldBytecode = pycfg._list_to_string([ dis.opmap["LOAD_CONST"], 1, 0, # 0, arg=1 dis.opmap["YIELD_VALUE"], # 3 dis.opmap["POP_TOP"], # 4 dis.opmap["LOAD_CONST"], 2, 0, # 5, arg=2 dis.opmap["YIELD_VALUE"], # 8 dis.opmap["POP_TOP"], # 9 dis.opmap["LOAD_CONST"], 3, 0, # 10, arg=3 dis.opmap["YIELD_VALUE"], # 13 dis.opmap["POP_TOP"], # 14 dis.opmap["LOAD_CONST"], 0, 0, # 15, arg=0 dis.opmap["RETURN_VALUE"], # 18 ]) def testYield(self): self.assertEqual(self.codeYield.func_code.co_code, self.codeYieldBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeYield.func_code) expected = [(0, 3), (4, 8), (9, 13), (14, 18)] self.checkBlocks(table, expected) bb = table.get_basic_block # We both branch to unknown and to the best instruction for each yield. self.assertItemsEqual(bb(0).outgoing, [None, bb(4)]) self.assertItemsEqual(bb(4).outgoing, [None, bb(9)]) self.assertItemsEqual(bb(9).incoming, [bb(8)]) self.assertItemsEqual(bb(9).outgoing, [None, bb(14)]) @staticmethod def codeRaise(): raise ValueError() return 0 # pylint: disable=unreachable codeRaiseBytecode = pycfg._list_to_string([ dis.opmap["LOAD_GLOBAL"], 0, 0, # 0, arg=0 dis.opmap["CALL_FUNCTION"], 0, 0, # 3, arg=0 dis.opmap["RAISE_VARARGS"], 1, 0, # 6, arg=1 dis.opmap["LOAD_CONST"], 1, 0, # 9, arg=1 dis.opmap["RETURN_VALUE"], # 12 ]) def testRaise(self): self.assertEqual(self.codeRaise.func_code.co_code, self.codeRaiseBytecode) cfg = pycfg.CFG() table = cfg.get_block_table(self.codeRaise.func_code) expected = [(0, 3), (6, 6), (9, 12)] self.checkBlocks(table, expected) bb = table.get_basic_block # CALL_FUNCTION could either continue or raise self.assertItemsEqual(bb(0).outgoing, [bb(6), None]) # RAISE_VARARGS always raises self.assertItemsEqual(bb(6).outgoing, [None]) # This basic block is unreachable self.assertItemsEqual(bb(9).incoming, []) # We return to an unknown location self.assertItemsEqual(bb(9).outgoing, [None]) def testRaiseOrder(self): cfg = pycfg.CFG() table = cfg.get_block_table(self.codeRaise.func_code) bb = table.get_basic_block self.assertEqual(table.get_ancestors_first_traversal(), [bb(o) for o in [0, 6, 9]]) class InstructionsIndexTest(unittest.TestCase): @staticmethod def simple_function(x): x += 1 y = 4 x **= y return x + y def setUp(self): self.index = pycfg.InstructionsIndex(self.simple_function.func_code.co_code) def testNext(self): self.assertEqual(self.index.next(0), 3) self.assertEqual(self.index.next(6), 7) self.assertEqual(self.index.next(23), 26) def testPrev(self): self.assertEqual(self.index.prev(3), 0) self.assertEqual(self.index.prev(7), 6) self.assertEqual(self.index.prev(26), 23) def testRoundTrip(self): offset = 3 while offset < len(self.simple_function.func_code.co_code)-1: self.assertEqual(self.index.prev(self.index.next(offset)), offset) self.assertEqual(self.index.next(self.index.prev(offset)), offset) offset = self.index.next(offset) class BytecodeReprTest(unittest.TestCase): def checkRoundTrip(self, code): self.assertEqual(eval(pycfg._bytecode_repr(code)), code) def testOtherTestMethods(self): for method in CFGTest.__dict__: if hasattr(method, "func_code"): self.checkRoundTrip(method.func_code.co_code) def testThisTestMethods(self): for method in BytecodeReprTest.__dict__: if hasattr(method, "func_code"): self.checkRoundTrip(method.func_code.co_code) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) unittest.main()
35.189313
80
0.5838
39c970e5dae48b7b2e205e34d5587584b0857221
4,158
py
Python
mxnet_benchmarks/custom_operations/custom_operations.py
sandeep-krishnamurthy/dl-operator-benchmark
965797d2b847c840a4b8ef29c70c631f6642890a
[ "Apache-2.0" ]
6
2019-05-01T22:05:05.000Z
2020-02-13T19:07:27.000Z
mxnet_benchmarks/custom_operations/custom_operations.py
sandeep-krishnamurthy/dl-operator-benchmark
965797d2b847c840a4b8ef29c70c631f6642890a
[ "Apache-2.0" ]
2
2019-11-09T06:38:09.000Z
2019-11-09T06:41:44.000Z
mxnet_benchmarks/custom_operations/custom_operations.py
sandeep-krishnamurthy/dl-operator-benchmark
965797d2b847c840a4b8ef29c70c631f6642890a
[ "Apache-2.0" ]
null
null
null
import mxnet as mx import mxnet.ndarray as nd from mxnet_benchmarks.MXNetOperatorBenchmark import MXNetOperatorBenchmarkBase from mxnet_benchmarks.utils.ndarray_utils import get_mx_ndarray from utils.profiler_utils import timer """ MXNet's Custom Operator Benchmark Tests. It does a simple element wise addition to make sure computation is not too much and we can see custom operator logistics overhead. 1. Tests Custom v/s Imperative (Native NDArray) 2. Tests Custom v/s Symbolic (Native Symbol with Simple Bind) """ # 1. Define Custom Operator - Element wise Addition Multiplication class CustomAddOne(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0] + 1) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], out_grad[0]) @mx.operator.register("CustomAddOne") class CustomAddOneProp(mx.operator.CustomOpProp): def __init__(self): super(CustomAddOneProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['in'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): # inputs, outputs, aux return [in_shape[0]], [in_shape[0]], [] def create_operator(self, ctx, shapes, dtypes): return CustomAddOne() # 2. Benchmarks class CustomOpElementwiseAdd(MXNetOperatorBenchmarkBase): """Helps to benchmark MXNet's Custom Op for Elementwise addition on a (1000, 1) tensor. Performs both forward and backward operation. This test mainly uncovers core custom op overhead in MXNet. Benchmark will be done on the following operation: native_add -> native_add -> native_add -> CUSTOM_ADD -> native_add -> native_add -> native_add By default run on 'float32' precision. """ def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None): # Set the default Inputs default_parameters = {"data": (1000, 1), "data_initializer": nd.normal, "run_backward": True, "dtype": "float32"} super().__init__(ctx=ctx, warmup=warmup, runs=runs, default_parameters=default_parameters, custom_parameters=inputs) self.data = get_mx_ndarray(ctx=self.ctx, in_tensor=self.inputs["data"], dtype=self.inputs["dtype"], initializer=self.inputs["data_initializer"], attach_grad=self.inputs["run_backward"]) @timer def _run_forward_backward_benchmark(self, runs, x): for _ in range(runs): with mx.autograd.record(): # Forward res1 = x + 1 res2 = res1 + 1 res3 = res2 + 1 res4 = nd.Custom(res3, name="customaddone", op_type="CustomAddOne") res5 = res4 + 1 res6 = res5 + 1 res7 = res6 + 1 # Backward res7.backward() nd.waitall() def run_benchmark(self): # Warm up, ignore execution time value _, _ = self._run_forward_backward_benchmark(runs=self.warmup, x=self.data) # Run Benchmarks exe_time, _ = self._run_forward_backward_benchmark(runs=self.runs, x=self.data) self.results["MX_CustomOp_Elementwise_Add_Forward_Backward_Time"] = exe_time / self.runs # Utilities def run_customop_operations_benchmarks(ctx, inputs): """Helper to run all MXNet custom op benchmarks. Just runs the benchmarks with default input values. This just a utility to run benchmarks with all default input values. TODO: Capture results in a clean dictionary rather than printing everything to console. """ customop_operations_results = [] benchmark_ref = CustomOpElementwiseAdd(ctx=ctx, inputs=inputs) benchmark_ref.run_benchmark() benchmark_ref.print_benchmark_results() customop_operations_results.append(benchmark_ref.get_benchmark_results()) return customop_operations_results
35.237288
104
0.657287
0979563d043ce86a4304bee2b6bf4695d42f0809
1,515
py
Python
importasol/db/gestorsol/otros.py
telenieko/importasol
92848ff369b664bee5077b821830d690c31e7830
[ "BSD-3-Clause" ]
null
null
null
importasol/db/gestorsol/otros.py
telenieko/importasol
92848ff369b664bee5077b821830d690c31e7830
[ "BSD-3-Clause" ]
null
null
null
importasol/db/gestorsol/otros.py
telenieko/importasol
92848ff369b664bee5077b821830d690c31e7830
[ "BSD-3-Clause" ]
1
2022-02-02T14:29:33.000Z
2022-02-02T14:29:33.000Z
from ..base import SOLFile from ..fields import CampoA, CampoN, CampoB, CampoND, CampoCuenta class FPA(SOLFile): """ Formas de pago/cobro. """ cA = CampoA("Codigo", size=3, truncate=False) cB = CampoA("Descripcion", size=100) cC = CampoN("Numero Vencimientos", size=1) cD = CampoB("Pagos proporcionales", default=False) cE = CampoN("Dias vto 1", size=3) cK = CampoND("Proporcion vto 1", size=4) cQ = CampoB("Efectivo") cR = CampoN("Meses o Dias", size=1) cS = CampoA("Codigo en Factura-e", size=5, truncate=False) class Meta: tabla = 'FPA' def __unicode__(self): t = u"FPA(%s: %s)" % (self.cA, self.cB) return t __str__ = __unicode__ __repr__ = __unicode__ class FAM(SOLFile): """ Familias. """ cA = CampoA("Codigo", size=3, truncate=False) cB = CampoA("Descripcion", size=50) cC = CampoCuenta("Cuenta compras", size=10) cD = CampoCuenta("Cuenta Ventas", size=10) class Meta: tabla = 'FAM' def __unicode__(self): t = u"FAM(%s: %s)" % (self.cA, self.cB) return t __str__ = __unicode__ __repr__ = __unicode__ class AGE(SOLFile): """ Agentes. """ cA = CampoA("Codigo", size=3, truncate=False) cB = CampoA("Nombre", size=100) cL = CampoND("Comision", size=5) class Meta: tabla = 'AGE' def __unicode__(self): t = u"AGE(%s: %s)" % (self.cA, self.cB) return t __str__ = __unicode__ __repr__ = __unicode__
22.61194
65
0.592079
353883559497a208043d91b98ce5dc4d54d17fe4
536
py
Python
nn/sk_perceptron.py
binzhihao/py-ai-project
64491f6e5ff2007fd7abad5ee0c6058a914d1f84
[ "MIT" ]
null
null
null
nn/sk_perceptron.py
binzhihao/py-ai-project
64491f6e5ff2007fd7abad5ee0c6058a914d1f84
[ "MIT" ]
null
null
null
nn/sk_perceptron.py
binzhihao/py-ai-project
64491f6e5ff2007fd7abad5ee0c6058a914d1f84
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import data.iris.loader as loader from sklearn.linear_model import Perceptron if __name__ == '__main__': x, y = loader.load_iris() # 探索数据 print(x.head()) print('species: ', pd.unique(y.species)) # 花瓣长度,宽度 x_train = x.loc[:, ['petal_length', 'petal_width']] # 转成0-1二分类 y_train = (y.species == 'setosa').astype(np.int) per_clf = Perceptron(random_state=1) per_clf.fit(x_train, y_train) y_predict = per_clf.predict([[3.5, 0.2]]) print(y_predict)
22.333333
55
0.651119
741dae51137b1b899b9650ba708a92a800c72f05
8,055
py
Python
lib/ansiblelint/rules/__init__.py
moreati/ansible-lint
4d8b52f42879109b02dc3211962421064c90af16
[ "MIT" ]
null
null
null
lib/ansiblelint/rules/__init__.py
moreati/ansible-lint
4d8b52f42879109b02dc3211962421064c90af16
[ "MIT" ]
28
2020-09-01T20:06:28.000Z
2022-02-13T19:01:50.000Z
lib/ansiblelint/rules/__init__.py
moreati/ansible-lint
4d8b52f42879109b02dc3211962421064c90af16
[ "MIT" ]
null
null
null
"""All internal ansible-lint rules.""" import glob import importlib.util import logging import os import re from collections import defaultdict from importlib.abc import Loader from typing import List import ansiblelint.utils from ansiblelint.errors import MatchError from ansiblelint.skip_utils import append_skipped_rules, get_rule_skips_from_line _logger = logging.getLogger(__name__) class AnsibleLintRule(object): def __repr__(self) -> str: """Return a AnsibleLintRule instance representation.""" return self.id + ": " + self.shortdesc def verbose(self) -> str: return self.id + ": " + self.shortdesc + "\n " + self.description id: str = "" tags: List[str] = [] shortdesc: str = "" description: str = "" match = None matchtask = None matchplay = None @staticmethod def unjinja(text): text = re.sub(r"{{.+?}}", "JINJA_EXPRESSION", text) text = re.sub(r"{%.+?%}", "JINJA_STATEMENT", text) text = re.sub(r"{#.+?#}", "JINJA_COMMENT", text) return text def matchlines(self, file, text) -> List[MatchError]: matches: List[MatchError] = [] if not self.match: return matches # arrays are 0-based, line numbers are 1-based # so use prev_line_no as the counter for (prev_line_no, line) in enumerate(text.split("\n")): if line.lstrip().startswith('#'): continue rule_id_list = get_rule_skips_from_line(line) if self.id in rule_id_list: continue result = self.match(file, line) if not result: continue message = None if isinstance(result, str): message = result m = MatchError( message=message, linenumber=prev_line_no + 1, details=line, filename=file['path'], rule=self) matches.append(m) return matches # TODO(ssbarnea): Reduce mccabe complexity # https://github.com/ansible/ansible-lint/issues/744 def matchtasks(self, file: str, text: str) -> List[MatchError]: # noqa: C901 matches: List[MatchError] = [] if not self.matchtask: return matches if file['type'] == 'meta': return matches yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path']) if not yaml: return matches yaml = append_skipped_rules(yaml, text, file['type']) try: tasks = ansiblelint.utils.get_normalized_tasks(yaml, file) except MatchError as e: return [e] for task in tasks: if self.id in task.get('skipped_rules', ()): continue if 'action' not in task: continue result = self.matchtask(file, task) if not result: continue message = None if isinstance(result, str): message = result task_msg = "Task/Handler: " + ansiblelint.utils.task_to_str(task) m = MatchError( message=message, linenumber=task[ansiblelint.utils.LINE_NUMBER_KEY], details=task_msg, filename=file['path'], rule=self) matches.append(m) return matches @staticmethod def _matchplay_linenumber(play, optional_linenumber): try: linenumber, = optional_linenumber except ValueError: linenumber = play[ansiblelint.utils.LINE_NUMBER_KEY] return linenumber def matchyaml(self, file: str, text: str) -> List[MatchError]: matches: List[MatchError] = [] if not self.matchplay: return matches yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path']) if not yaml: return matches if isinstance(yaml, dict): yaml = [yaml] yaml = ansiblelint.skip_utils.append_skipped_rules(yaml, text, file['type']) for play in yaml: if self.id in play.get('skipped_rules', ()): continue result = self.matchplay(file, play) if not result: continue if isinstance(result, tuple): result = [result] if not isinstance(result, list): raise TypeError("{} is not a list".format(result)) for section, message, *optional_linenumber in result: linenumber = self._matchplay_linenumber(play, optional_linenumber) m = MatchError( message=message, linenumber=linenumber, details=str(section), filename=file['path'], rule=self) matches.append(m) return matches def load_plugins(directory: str) -> List[AnsibleLintRule]: """Return a list of rule classes.""" result = [] for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')): pluginname = os.path.basename(pluginfile.replace('.py', '')) spec = importlib.util.spec_from_file_location(pluginname, pluginfile) # https://github.com/python/typeshed/issues/2793 if spec and isinstance(spec.loader, Loader): module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) obj = getattr(module, pluginname)() result.append(obj) return result class RulesCollection(object): def __init__(self, rulesdirs=None) -> None: """Initialize a RulesCollection instance.""" if rulesdirs is None: rulesdirs = [] self.rulesdirs = ansiblelint.utils.expand_paths_vars(rulesdirs) self.rules: List[AnsibleLintRule] = [] for rulesdir in self.rulesdirs: _logger.debug("Loading rules from %s", rulesdir) self.extend(load_plugins(rulesdir)) self.rules = sorted(self.rules, key=lambda r: r.id) def register(self, obj: AnsibleLintRule): self.rules.append(obj) def __iter__(self): """Return the iterator over the rules in the RulesCollection.""" return iter(self.rules) def __len__(self): """Return the length of the RulesCollection data.""" return len(self.rules) def extend(self, more: List[AnsibleLintRule]) -> None: self.rules.extend(more) def run(self, playbookfile, tags=set(), skip_list=frozenset()) -> List: text = "" matches: List = list() try: with open(playbookfile['path'], mode='r', encoding='utf-8') as f: text = f.read() except IOError as e: _logger.warning( "Couldn't open %s - %s", playbookfile['path'], e.strerror) return matches for rule in self.rules: if not tags or not set(rule.tags).union([rule.id]).isdisjoint(tags): rule_definition = set(rule.tags) rule_definition.add(rule.id) if set(rule_definition).isdisjoint(skip_list): matches.extend(rule.matchlines(playbookfile, text)) matches.extend(rule.matchtasks(playbookfile, text)) matches.extend(rule.matchyaml(playbookfile, text)) return matches def __repr__(self) -> str: """Return a RulesCollection instance representation.""" return "\n".join([rule.verbose() for rule in sorted(self.rules, key=lambda x: x.id)]) def listtags(self) -> str: tags = defaultdict(list) for rule in self.rules: for tag in rule.tags: tags[tag].append("[{0}]".format(rule.id)) results = [] for tag in sorted(tags): results.append("{0} {1}".format(tag, tags[tag])) return "\n".join(results)
32.743902
84
0.569957
6ffb73d726e8be8ded0452418cbe95d98da901da
966
py
Python
leetcode/p59.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
leetcode/p59.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
leetcode/p59.py
mythnc/lab
9f69482a063e3cfce2ce8832c2ef1425658c31b9
[ "MIT" ]
null
null
null
from typing import List class Solution: def generateMatrix(self, n: int) -> List[List[int]]: r = 0 c = 0 next_direction = ((0, 1), (1, 0), (0, -1), (-1, 0)) next_direction_index = 0 result = [[0] * n for _ in range(n)] for ele in range(1, n**2+1): result[r][c] = ele next_r = r + next_direction[next_direction_index][0] next_c = c + next_direction[next_direction_index][1] if not (0 <= next_r < n and 0 <= next_c < n and result[next_r][next_c] == 0): next_direction_index = (next_direction_index + 1) % 4 next_r = r + next_direction[next_direction_index][0] next_c = c + next_direction[next_direction_index][1] r, c = next_r, next_c return result print(Solution().generateMatrix(1)) print(Solution().generateMatrix(2)) print(Solution().generateMatrix(3)) print(Solution().generateMatrix(10))
33.310345
89
0.578675
45e18b454e9a7174b84b0232e97ba7a7249c7d8d
718
py
Python
Python Script Tools/11.0 Game Guess The Number.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
1
2020-04-13T00:16:16.000Z
2020-04-13T00:16:16.000Z
Python Script Tools/11.0 Game Guess The Number.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
Python Script Tools/11.0 Game Guess The Number.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
#Este es un juego de adivinar el numero import random intentos = 0 print('Hola! Cual es tu nombre?') nombre = input() numero = random.randint(1, 20) print('Bueno, '+nombre+', piensa un numero entre 1 y 20.') while intentos < 6: print('Adivinalo! Tienes 6 intentos') adivina = input() adivina = int(adivina) intentos = intentos+1 if adivina < numero: print("Demasiado pequeño!") if adivina > numero: print("Demasiado grande!") if adivina == numero: break if adivina == numero: intentos = str(intentos) print('Fabuloso, '+nombre+', acercaste el numero en ' + intentos+' intentos.') if adivina != numero: numero = str(numero) print('Que fatalidad ' +nombre+' Yo estaba pensando en el numero ' + numero)
28.72
79
0.696379
afdce9dbd92f297e9e76e8a182c450e4ff9cbffd
2,273
py
Python
Linked_list/linked_list_v1.py
AjayJohnAlex/Data-Structure
32ec3cb592b89a8828346d7e0209f1702bcd51b3
[ "MIT" ]
null
null
null
Linked_list/linked_list_v1.py
AjayJohnAlex/Data-Structure
32ec3cb592b89a8828346d7e0209f1702bcd51b3
[ "MIT" ]
null
null
null
Linked_list/linked_list_v1.py
AjayJohnAlex/Data-Structure
32ec3cb592b89a8828346d7e0209f1702bcd51b3
[ "MIT" ]
null
null
null
class Node: def __init__(self,data=None): '''Creates a node in the linked list Takes a data and next_node(pointer to next node )''' self.data = data self.next_node = None def __repr__(self): return f"<NODE data is: {self.data} " class LinkedList: def __init__(self): self.head = None def isEmpty(self): return self.head is None def size(self): '''Return the no of nodes in a linked list and takes O(n) time''' current_node = self.head count = 0 while current_node : current_node = self.next_node count +=1 return count def add(self, data): '''Adding a new node/item to the linked list: Create a node -> assign new_node.next_node as current head -> assign head to new node Takes O(1) time''' new_node = Node(data) new_node.next_node = self.head self.head = new_node def search(self, key): '''Search for a key in the linked list. If not found in the list then return None else return data found takes O(n) time''' current_node = self.head while current_node: if key == current_node.data: return f"{current_node}>" else: current_node = current_node.next_node return "Item not in Linked List" def __repr__(self): '''Return string rep of list takes O(n) time''' nodes = [] current_node = self.head while current_node: if current_node == self.head: nodes.append(f"[START NODE: {current_node.data}]") elif current_node.next_node == None: nodes.append(f"[END NODE: {current_node.data}]") else: nodes.append(f"[ {current_node.data} ]") current_node = current_node.next_node return '->'.join(nodes) ll = LinkedList() ll.add(10) print(ll.head) ll.add(20) print(ll.head) ll.add(40) print(ll.head) print(ll) print(ll.search(10)) print(ll.search(220))
25.539326
93
0.531016
c286a1d3a371c19195c4525508bc6f1c1f9dfd94
1,666
py
Python
apps/examples/display.py
leolani/pepper
2592b867bdb44dd23a0fa58f9f96b9f6083d6804
[ "MIT" ]
3
2020-11-18T10:29:58.000Z
2021-06-03T18:57:43.000Z
apps/examples/display.py
cltl-leolani/pepper
2592b867bdb44dd23a0fa58f9f96b9f6083d6804
[ "MIT" ]
14
2020-09-08T12:41:37.000Z
2020-10-19T08:42:47.000Z
apps/examples/display.py
cltl-leolani/pepper
2592b867bdb44dd23a0fa58f9f96b9f6083d6804
[ "MIT" ]
3
2020-11-27T12:49:08.000Z
2022-02-16T09:49:14.000Z
"""Example Application that displays what it sees in the browser""" from pepper.app_container import ApplicationContainer, Application from pepper.framework.application.context import ContextComponent from pepper.framework.application.face_detection import FaceRecognitionComponent from pepper.framework.application.intention import AbstractIntention from pepper.framework.application.monitoring import MonitoringComponent from pepper.framework.application.object_detection import ObjectDetectionComponent from pepper.framework.application.speech_recognition import SpeechRecognitionComponent from pepper.framework.application.statistics import StatisticsComponent from pepper.framework.application.text_to_speech import TextToSpeechComponent class DisplayIntention(ApplicationContainer, AbstractIntention, StatisticsComponent, # Show Performance Statistics in Terminal MonitoringComponent, # Display what Robot (or Computer) sees in browser ContextComponent, # Context (dependency of MonitoringComponent) ObjectDetectionComponent, # Object Detection (dependency of MonitoringComponent) FaceRecognitionComponent, # Face Recognition (dependency of MonitoringComponent) SpeechRecognitionComponent, # Speech Recognition Component (dependency) TextToSpeechComponent): # Text to Speech (dependency) pass # Application does not need to react to events :) if __name__ == '__main__': Application(DisplayIntention()).run()
57.448276
108
0.733493
feeae46bb070918f2ca6f4c098e3593ec787dd00
1,406
py
Python
heudiconv/info.py
AKSoo/heudiconv
dcc590d0be8c514211c8a7323467acce2b338e9c
[ "Apache-2.0" ]
null
null
null
heudiconv/info.py
AKSoo/heudiconv
dcc590d0be8c514211c8a7323467acce2b338e9c
[ "Apache-2.0" ]
null
null
null
heudiconv/info.py
AKSoo/heudiconv
dcc590d0be8c514211c8a7323467acce2b338e9c
[ "Apache-2.0" ]
null
null
null
__version__ = "0.6.0.dev1" __author__ = "HeuDiConv team and contributors" __url__ = "https://github.com/nipy/heudiconv" __packagename__ = 'heudiconv' __description__ = "Heuristic DICOM Converter" __license__ = "Apache 2.0" __longdesc__ = """Convert DICOM dirs based on heuristic info - HeuDiConv uses the dcmstack package and dcm2niix tool to convert DICOM directories or tarballs into collections of NIfTI files following pre-defined heuristic(s).""" CLASSIFIERS = [ 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering' ] PYTHON_REQUIRES = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" REQUIRES = [ 'nibabel', 'pydicom', 'nipype >=1.0.0; python_version > "3.0"', 'nipype >=1.0.0,!=1.2.1,!=1.2.2; python_version == "2.7"', 'pathlib', 'dcmstack>=0.7', 'etelemetry', ] TESTS_REQUIRES = [ 'six', 'pytest', 'mock', 'tinydb', 'inotify', ] EXTRA_REQUIRES = { 'tests': TESTS_REQUIRES, 'extras': [], # Requires patched version ATM ['dcmstack'], 'datalad': ['datalad'] } # Flatten the lists EXTRA_REQUIRES['all'] = sum(EXTRA_REQUIRES.values(), [])
28.12
79
0.641536
0900039bd8e1156c1e7759636c823caaec70bd0c
346
py
Python
src/test.py
imoted/pcl_tutorials_ros
3cbcf2c0d14aacbe9f176f370a95de61808db7c3
[ "MIT" ]
null
null
null
src/test.py
imoted/pcl_tutorials_ros
3cbcf2c0d14aacbe9f176f370a95de61808db7c3
[ "MIT" ]
null
null
null
src/test.py
imoted/pcl_tutorials_ros
3cbcf2c0d14aacbe9f176f370a95de61808db7c3
[ "MIT" ]
null
null
null
#!/usr/bin/python import rospy if __name__ == '__main__': rospy.init_node('test_node_python') rate = rospy.Rate(0.5) test_pub = rospy.Publisher('/test_topic', String, queue_size=10) val = 0 for i in range(10): test_pub.publish(str(val)) rospy.loginfo('node is running') rate.sleep() val += 1
23.066667
68
0.612717
665540c7d335dcd7715ba58249901b3cac09b153
89,806
py
Python
salt/modules/postgres.py
lvg01/salt
9f42882ed7cc340d6dce6692a1ec1e2fe385d1bd
[ "Apache-2.0" ]
1
2016-05-20T09:15:57.000Z
2016-05-20T09:15:57.000Z
salt/modules/postgres.py
lvg01/salt
9f42882ed7cc340d6dce6692a1ec1e2fe385d1bd
[ "Apache-2.0" ]
null
null
null
salt/modules/postgres.py
lvg01/salt
9f42882ed7cc340d6dce6692a1ec1e2fe385d1bd
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ''' Module to provide Postgres compatibility to salt. :configuration: In order to connect to Postgres, certain configuration is required in /etc/salt/minion on the relevant minions. Some sample configs might look like:: postgres.host: 'localhost' postgres.port: '5432' postgres.user: 'postgres' -> db user postgres.pass: '' postgres.maintenance_db: 'postgres' The default for the maintenance_db is 'postgres' and in most cases it can be left at the default setting. This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar :note: This module uses MD5 hashing which may not be compliant with certain security audits. ''' # This pylint error is popping up where there are no colons? # pylint: disable=E8203 # Import python libs from __future__ import absolute_import import datetime import distutils.version # pylint: disable=import-error,no-name-in-module import logging import hashlib import os import re import pipes import tempfile try: import csv HAS_CSV = True except ImportError: HAS_CSV = False # Import salt libs import salt.utils import salt.utils.itertools from salt.exceptions import SaltInvocationError # Import 3rd-party libs import salt.ext.six as six from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import StringIO log = logging.getLogger(__name__) _DEFAULT_PASSWORDS_ENCRYPTION = True _EXTENSION_NOT_INSTALLED = 'EXTENSION NOT INSTALLED' _EXTENSION_INSTALLED = 'EXTENSION INSTALLED' _EXTENSION_TO_UPGRADE = 'EXTENSION TO UPGRADE' _EXTENSION_TO_MOVE = 'EXTENSION TO MOVE' _EXTENSION_FLAGS = ( _EXTENSION_NOT_INSTALLED, _EXTENSION_INSTALLED, _EXTENSION_TO_UPGRADE, _EXTENSION_TO_MOVE, ) _PRIVILEGES_MAP = { 'a': 'INSERT', 'C': 'CREATE', 'D': 'TRUNCATE', 'c': 'CONNECT', 't': 'TRIGGER', 'r': 'SELECT', 'U': 'USAGE', 'T': 'TEMPORARY', 'w': 'UPDATE', 'X': 'EXECUTE', 'x': 'REFERENCES', 'd': 'DELETE', '*': 'GRANT', } _PRIVILEGES_OBJECTS = frozenset( ( 'schema', 'tablespace', 'language', 'sequence', 'table', 'group', 'database', ) ) _PRIVILEGE_TYPE_MAP = { 'table': 'arwdDxt', 'tablespace': 'C', 'language': 'U', 'sequence': 'rwU', 'schema': 'UC', 'database': 'CTc', } def __virtual__(): ''' Only load this module if the psql bin exists ''' if all((salt.utils.which('psql'), HAS_CSV)): return True return (False, 'The postgres execution module failed to load: ' 'either the psql or initdb binary are not in the path or ' 'the csv library is not available') def _run_psql(cmd, runas=None, password=None, host=None, port=None, user=None): ''' Helper function to call psql, because the password requirement makes this too much code to be repeated in each function below ''' kwargs = { 'reset_system_locale': False, 'clean_env': True, } if runas is None: if not host: host = __salt__['config.option']('postgres.host') if not host or host.startswith('/'): if 'FreeBSD' in __grains__['os_family']: runas = 'pgsql' if 'OpenBSD' in __grains__['os_family']: runas = '_postgresql' else: runas = 'postgres' if user is None: user = runas if runas: kwargs['runas'] = runas if password is None: password = __salt__['config.option']('postgres.pass') if password is not None: pgpassfile = salt.utils.mkstemp(text=True) with salt.utils.fopen(pgpassfile, 'w') as fp_: fp_.write('{0}:{1}:*:{2}:{3}'.format( 'localhost' if not host or host.startswith('/') else host, port if port else '*', user if user else '*', password, )) __salt__['file.chown'](pgpassfile, runas, '') kwargs['env'] = {'PGPASSFILE': pgpassfile} ret = __salt__['cmd.run_all'](cmd, python_shell=False, **kwargs) if ret.get('retcode', 0) != 0: log.error('Error connecting to Postgresql server') if password is not None and not __salt__['file.remove'](pgpassfile): log.warning('Remove PGPASSFILE failed') return ret def _run_initdb(name, auth='password', user=None, password=None, encoding='UTF8', locale=None, runas=None): ''' Helper function to call initdb ''' if runas is None: if 'FreeBSD' in __grains__['os_family']: runas = 'pgsql' if 'OpenBSD' in __grains__['os_family']: runas = '_postgresql' else: runas = 'postgres' if user is None: user = runas cmd = [ salt.utils.which('initdb'), '--pgdata={0}'.format(name), '--username={0}'.format(user), '--auth={0}'.format(auth), '--encoding={0}'.format(encoding), ] if locale is not None: cmd.append('--locale={0}'.format(locale)) if password is not None: pgpassfile = salt.utils.mkstemp(text=True) with salt.utils.fopen(pgpassfile, 'w') as fp_: fp_.write('{0}'.format(password)) __salt__['file.chown'](pgpassfile, runas, '') cmd.extend([ '--pwfile={0}'.format(pgpassfile), ]) kwargs = dict(runas=runas, clean_env=True) cmdstr = ' '.join([pipes.quote(c) for c in cmd]) ret = __salt__['cmd.run_all'](cmdstr, python_shell=False, **kwargs) if ret.get('retcode', 0) != 0: log.error('Error initilizing the postgres data directory') if password is not None and not __salt__['file.remove'](pgpassfile): log.warning('Removal of PGPASSFILE failed') return ret def version(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Return the version of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.version ''' query = 'SELECT setting FROM pg_catalog.pg_settings ' \ 'WHERE name = \'server_version\'' cmd = _psql_cmd('-c', query, '-t', host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) ret = _run_psql( cmd, runas=runas, password=password, host=host, port=port, user=user) for line in salt.utils.itertools.split(ret['stdout'], '\n'): # Just return the first line return line def _parsed_version(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Returns the server version properly parsed and int casted for internal use. If the Postgres server does not respond, None will be returned. ''' psql_version = version( user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, ) if psql_version: return distutils.version.LooseVersion(psql_version) else: log.warning('Attempt to parse version of Postgres server failed. ' 'Is the server responding?') return None def _connection_defaults(user=None, host=None, port=None, maintenance_db=None, password=None): ''' Returns a tuple of (user, host, port, db) with config, pillar, or default values assigned to missing values. ''' if not user: user = __salt__['config.option']('postgres.user') if not host: host = __salt__['config.option']('postgres.host') if not port: port = __salt__['config.option']('postgres.port') if not maintenance_db: maintenance_db = __salt__['config.option']('postgres.maintenance_db') if password is None: password = __salt__['config.option']('postgres.pass') return (user, host, port, maintenance_db, password) def _psql_cmd(*args, **kwargs): ''' Return string with fully composed psql command. Accept optional keyword arguments: user, host and port as well as any number or positional arguments to be added to the end of command. ''' (user, host, port, maintenance_db, password) = _connection_defaults( kwargs.get('user'), kwargs.get('host'), kwargs.get('port'), kwargs.get('maintenance_db'), kwargs.get('password')) cmd = [salt.utils.which('psql'), '--no-align', '--no-readline', '--no-password'] # It is never acceptable to issue a password prompt. if user: cmd += ['--username', user] if host: cmd += ['--host', host] if port: cmd += ['--port', str(port)] if not maintenance_db: maintenance_db = 'postgres' cmd.extend(['--dbname', maintenance_db]) cmd.extend(args) return cmd def _psql_prepare_and_run(cmd, host=None, port=None, maintenance_db=None, password=None, runas=None, user=None): rcmd = _psql_cmd( host=host, user=user, port=port, maintenance_db=maintenance_db, password=password, *cmd) cmdret = _run_psql( rcmd, runas=runas, password=password, host=host, port=port, user=user) return cmdret def psql_query(query, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Run an SQL-Query and return the results as a list. This command only supports SELECT statements. This limitation can be worked around with a query like this: WITH updated AS (UPDATE pg_authid SET rolconnlimit = 2000 WHERE rolname = 'rolename' RETURNING rolconnlimit) SELECT * FROM updated; CLI Example: .. code-block:: bash salt '*' postgres.psql_query 'select * from pg_stat_activity' ''' ret = [] csv_query = 'COPY ({0}) TO STDOUT WITH CSV HEADER'.format( query.strip().rstrip(';')) # always use the same datestyle settings to allow parsing dates # regardless what server settings are configured cmdret = _psql_prepare_and_run(['-v', 'datestyle=ISO,MDY', '-c', csv_query], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) if cmdret['retcode'] > 0: return ret csv_file = StringIO(cmdret['stdout']) header = {} for row in csv.reader(csv_file, delimiter=',', quotechar='"'): if not row: continue if not header: header = row continue ret.append(dict(zip(header, row))) return ret # Database related actions def db_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Return dictionary with information about databases of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_list ''' ret = {} query = ( 'SELECT datname as "Name", pga.rolname as "Owner", ' 'pg_encoding_to_char(encoding) as "Encoding", ' 'datcollate as "Collate", datctype as "Ctype", ' 'datacl as "Access privileges", spcname as "Tablespace" ' 'FROM pg_database pgd, pg_roles pga, pg_tablespace pgts ' 'WHERE pga.oid = pgd.datdba AND pgts.oid = pgd.dattablespace' ) rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: ret[row['Name']] = row ret[row['Name']].pop('Name') return ret def db_exists(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Checks if a database exists on the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_exists 'dbname' ''' databases = db_list(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return name in databases # TODO properly implemented escaping def _quote_ddl_value(value, quote="'"): if value is None: return None if quote in value: # detect trivial sqli raise SaltInvocationError( 'Unsupported character {0} in value: {1}'.format(quote, value)) return "{quote}{value}{quote}".format(quote=quote, value=value) def db_create(name, user=None, host=None, port=None, maintenance_db=None, password=None, tablespace=None, encoding=None, lc_collate=None, lc_ctype=None, owner=None, template=None, runas=None): ''' Adds a databases to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_create 'dbname' salt '*' postgres.db_create 'dbname' template=template_postgis ''' # Base query to create a database query = 'CREATE DATABASE "{0}"'.format(name) # "With"-options to create a database with_args = salt.utils.odict.OrderedDict([ ('TABLESPACE', _quote_ddl_value(tablespace, '"')), # owner needs to be enclosed in double quotes so postgres # doesn't get thrown by dashes in the name ('OWNER', _quote_ddl_value(owner, '"')), ('TEMPLATE', template), ('ENCODING', _quote_ddl_value(encoding)), ('LC_COLLATE', _quote_ddl_value(lc_collate)), ('LC_CTYPE', _quote_ddl_value(lc_ctype)), ]) with_chunks = [] for key, value in with_args.items(): if value is not None: with_chunks += [key, '=', value] # Build a final query if with_chunks: with_chunks.insert(0, ' WITH') query += ' '.join(with_chunks) # Execute the command ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0 def db_alter(name, user=None, host=None, port=None, maintenance_db=None, password=None, tablespace=None, owner=None, owner_recurse=False, runas=None): ''' Change tablespace or/and owner of database. CLI Example: .. code-block:: bash salt '*' postgres.db_alter dbname owner=otheruser ''' if not any((tablespace, owner)): return True # Nothing todo? if owner and owner_recurse: ret = owner_to(name, owner, user=user, host=host, port=port, password=password, runas=runas) else: queries = [] if owner: queries.append('ALTER DATABASE "{0}" OWNER TO "{1}"'.format( name, owner )) if tablespace: queries.append('ALTER DATABASE "{0}" SET TABLESPACE "{1}"'.format( name, tablespace )) for query in queries: ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if ret['retcode'] != 0: return False return True def db_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a databases from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_remove 'dbname' ''' # db doesn't exist, proceed query = 'DROP DATABASE "{0}"'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, runas=runas, maintenance_db=maintenance_db, password=password) return ret['retcode'] == 0 # Tablespace related actions def tablespace_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Return dictionary with information about tablespaces of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_list .. versionadded:: 2015.8.0 ''' ret = {} query = ( 'SELECT spcname as "Name", pga.rolname as "Owner", spcacl as "ACL", ' 'spcoptions as "Opts", pg_tablespace_location(pgts.oid) as "Location" ' 'FROM pg_tablespace pgts, pg_roles pga WHERE pga.oid = pgts.spcowner' ) rows = __salt__['postgres.psql_query'](query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: ret[row['Name']] = row ret[row['Name']].pop('Name') return ret def tablespace_exists(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Checks if a tablespace exists on the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_exists 'dbname' .. versionadded:: 2015.8.0 ''' tablespaces = tablespace_list(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return name in tablespaces def tablespace_create(name, location, options=None, owner=None, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Adds a tablespace to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_create tablespacename '/path/datadir' .. versionadded:: 2015.8.0 ''' owner_query = '' options_query = '' if owner: owner_query = 'OWNER "{0}"'.format(owner) # should come out looking like: 'OWNER postgres' if options: optionstext = ['{0} = {1}'.format(k, v) for k, v in options.items()] options_query = 'WITH ( {0} )'.format(', '.join(optionstext)) # should come out looking like: 'WITH ( opt1 = 1.0, opt2 = 4.0 )' query = 'CREATE TABLESPACE "{0}" {1} LOCATION \'{2}\' {3}'.format(name, owner_query, location, options_query) # Execute the command ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0 def tablespace_alter(name, user=None, host=None, port=None, maintenance_db=None, password=None, new_name=None, new_owner=None, set_option=None, reset_option=None, runas=None): ''' Change tablespace name, owner, or options. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_alter tsname new_owner=otheruser salt '*' postgres.tablespace_alter index_space new_name=fast_raid salt '*' postgres.tablespace_alter test set_option="{'seq_page_cost': '1.1'}" salt '*' postgres.tablespace_alter tsname reset_option=seq_page_cost .. versionadded:: 2015.8.0 ''' if not any([new_name, new_owner, set_option, reset_option]): return True # Nothing todo? queries = [] if new_name: queries.append('ALTER TABLESPACE "{0}" RENAME TO "{1}"'.format( name, new_name)) if new_owner: queries.append('ALTER TABLESPACE "{0}" OWNER TO "{1}"'.format( name, new_owner)) if set_option: queries.append('ALTER TABLESPACE "{0}" SET ({1} = {2})'.format( name, set_option.keys()[0], set_option.values()[0])) if reset_option: queries.append('ALTER TABLESPACE "{0}" RESET ({1})'.format( name, reset_option)) for query in queries: ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if ret['retcode'] != 0: return False return True def tablespace_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a tablespace from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.tablespace_remove tsname .. versionadded:: 2015.8.0 ''' query = 'DROP TABLESPACE "{0}"'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, runas=runas, maintenance_db=maintenance_db, password=password) return ret['retcode'] == 0 # User related actions def user_list(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False): ''' Return a dict with information about users of a Postgres server. Set return_password to True to get password hash in the result. CLI Example: .. code-block:: bash salt '*' postgres.user_list ''' ret = {} ver = _parsed_version(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if ver: if ver >= distutils.version.LooseVersion('9.1'): replication_column = 'pg_roles.rolreplication' else: replication_column = 'NULL' if ver >= distutils.version.LooseVersion('9.5'): rolcatupdate_column = 'NULL' else: rolcatupdate_column = 'pg_roles.rolcatupdate' else: log.error('Could not retrieve Postgres version. Is Postgresql server running?') return False # will return empty string if return_password = False _x = lambda s: s if return_password else '' query = (''.join([ 'SELECT ' 'pg_roles.rolname as "name",' 'pg_roles.rolsuper as "superuser", ' 'pg_roles.rolinherit as "inherits privileges", ' 'pg_roles.rolcreaterole as "can create roles", ' 'pg_roles.rolcreatedb as "can create databases", ' '{0} as "can update system catalogs", ' 'pg_roles.rolcanlogin as "can login", ' '{1} as "replication", ' 'pg_roles.rolconnlimit as "connections", ' 'pg_roles.rolvaliduntil::timestamp(0) as "expiry time", ' 'pg_roles.rolconfig as "defaults variables" ' , _x(', COALESCE(pg_shadow.passwd, pg_authid.rolpassword) as "password" '), 'FROM pg_roles ' , _x('LEFT JOIN pg_authid ON pg_roles.oid = pg_authid.oid ') , _x('LEFT JOIN pg_shadow ON pg_roles.oid = pg_shadow.usesysid') ]).format(rolcatupdate_column, replication_column)) rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) def get_bool(rowdict, key): ''' Returns the boolean value of the key, instead of 't' and 'f' strings. ''' if rowdict[key] == 't': return True elif rowdict[key] == 'f': return False else: return None for row in rows: retrow = {} for key in ('superuser', 'inherits privileges', 'can create roles', 'can create databases', 'can update system catalogs', 'can login', 'replication', 'connections'): retrow[key] = get_bool(row, key) for date_key in ('expiry time',): try: retrow[date_key] = datetime.datetime.strptime( row['date_key'], '%Y-%m-%d %H:%M:%S') except (ValueError, KeyError): retrow[date_key] = None retrow['defaults variables'] = row['defaults variables'] if return_password: retrow['password'] = row['password'] ret[row['name']] = retrow # for each role, determine the inherited roles for role in six.iterkeys(ret): rdata = ret[role] groups = rdata.setdefault('groups', []) query = ( 'select rolname' ' from pg_user' ' join pg_auth_members' ' on (pg_user.usesysid=pg_auth_members.member)' ' join pg_roles ' ' on (pg_roles.oid=pg_auth_members.roleid)' ' where pg_user.usename=\'{0}\'' ).format(role) try: rows = psql_query(query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: if row['rolname'] not in groups: groups.append(row['rolname']) except Exception: # do not fail here, it is just a bonus # to try to determine groups, but the query # is not portable amongst all pg versions continue return ret def role_get(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None, return_password=False): ''' Return a dict with information about users of a Postgres server. Set return_password to True to get password hash in the result. CLI Example: .. code-block:: bash salt '*' postgres.role_get postgres ''' all_users = user_list(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=return_password) try: return all_users.get(name, None) except AttributeError: log.error('Could not retrieve Postgres role. Is Postgres running?') return None def user_exists(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Checks if a user exists on the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.user_exists 'username' ''' return bool( role_get(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=False)) def _add_role_flag(string, test, flag, cond=None, prefix='NO', addtxt='', skip=False): if not skip: if cond is None: cond = test if test is not None: if cond: string = '{0} {1}'.format(string, flag) else: string = '{0} {2}{1}'.format(string, flag, prefix) if addtxt: string = '{0} {1}'.format(string, addtxt) return string def _maybe_encrypt_password(role, password, encrypted=_DEFAULT_PASSWORDS_ENCRYPTION): ''' pgsql passwords are md5 hashes of the string: 'md5{password}{rolename}' ''' if password is not None: password = str(password) if encrypted and password and not password.startswith('md5'): password = "md5{0}".format( hashlib.md5(salt.utils.to_bytes('{0}{1}'.format(password, role))).hexdigest()) return password def _role_cmd_args(name, sub_cmd='', typ_='role', encrypted=None, login=None, connlimit=None, inherit=None, createdb=None, createuser=None, createroles=None, superuser=None, groups=None, replication=None, rolepassword=None, db_role=None): if createuser is not None and superuser is None: superuser = createuser if inherit is None: if typ_ in ['user', 'group']: inherit = True if login is None: if typ_ == 'user': login = True if typ_ == 'group': login = False # defaults to encrypted passwords (md5{password}{rolename}) if encrypted is None: encrypted = _DEFAULT_PASSWORDS_ENCRYPTION skip_passwd = False escaped_password = '' if not ( rolepassword is not None # first is passwd set # second is for handling NOPASSWD and ( isinstance(rolepassword, six.string_types) and bool(rolepassword) ) or ( isinstance(rolepassword, bool) ) ): skip_passwd = True if isinstance(rolepassword, six.string_types) and bool(rolepassword): escaped_password = '\'{0}\''.format( _maybe_encrypt_password(name, rolepassword.replace('\'', '\'\''), encrypted=encrypted)) skip_superuser = False if bool(db_role) and bool(superuser) == bool(db_role['superuser']): skip_superuser = True flags = ( {'flag': 'INHERIT', 'test': inherit}, {'flag': 'CREATEDB', 'test': createdb}, {'flag': 'CREATEROLE', 'test': createroles}, {'flag': 'SUPERUSER', 'test': superuser, 'skip': skip_superuser}, {'flag': 'REPLICATION', 'test': replication}, {'flag': 'LOGIN', 'test': login}, {'flag': 'CONNECTION LIMIT', 'test': bool(connlimit), 'addtxt': str(connlimit), 'skip': connlimit is None}, {'flag': 'ENCRYPTED', 'test': (encrypted is not None and bool(rolepassword)), 'skip': skip_passwd or isinstance(rolepassword, bool), 'cond': encrypted, 'prefix': 'UN'}, {'flag': 'PASSWORD', 'test': bool(rolepassword), 'skip': skip_passwd, 'addtxt': escaped_password}, ) for data in flags: sub_cmd = _add_role_flag(sub_cmd, **data) if sub_cmd.endswith('WITH'): sub_cmd = sub_cmd.replace(' WITH', '') if groups: if isinstance(groups, list): groups = ','.join(groups) for group in groups.split(','): sub_cmd = '{0}; GRANT "{1}" TO "{2}"'.format(sub_cmd, group, name) return sub_cmd def _role_create(name, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createroles=None, createuser=None, encrypted=None, superuser=None, login=None, connlimit=None, inherit=None, replication=None, rolepassword=None, typ_='role', groups=None, runas=None): ''' Creates a Postgres role. Users and Groups are both roles in postgres. However, users can login, groups cannot. ''' # check if role exists if user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): log.info('{0} \'{1}\' already exists'.format(typ_.capitalize(), name)) return False sub_cmd = 'CREATE ROLE "{0}" WITH'.format(name) sub_cmd = '{0} {1}'.format(sub_cmd, _role_cmd_args( name, typ_=typ_, encrypted=encrypted, login=login, connlimit=connlimit, inherit=inherit, createdb=createdb, createroles=createroles, createuser=createuser, superuser=superuser, groups=groups, replication=replication, rolepassword=rolepassword )) ret = _psql_prepare_and_run(['-c', sub_cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) return ret['retcode'] == 0 def user_create(username, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createuser=None, createroles=None, inherit=None, login=None, connlimit=None, encrypted=None, superuser=None, replication=None, rolepassword=None, groups=None, runas=None): ''' Creates a Postgres user. CLI Examples: .. code-block:: bash salt '*' postgres.user_create 'username' user='user' \\ host='hostname' port='port' password='password' \\ rolepassword='rolepassword' ''' return _role_create(username, typ_='user', user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, createdb=createdb, createuser=createuser, createroles=createroles, inherit=inherit, login=login, connlimit=connlimit, encrypted=encrypted, superuser=superuser, replication=replication, rolepassword=rolepassword, groups=groups, runas=runas) def _role_update(name, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createuser=None, typ_='role', createroles=None, inherit=None, login=None, connlimit=None, encrypted=None, superuser=None, replication=None, rolepassword=None, groups=None, runas=None): ''' Updates a postgres role. ''' role = role_get(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas, return_password=False) # check if user exists if not bool(role): log.info( '{0} \'{1}\' could not be found'.format(typ_.capitalize(), name) ) return False sub_cmd = 'ALTER ROLE "{0}" WITH'.format(name) sub_cmd = '{0} {1}'.format(sub_cmd, _role_cmd_args( name, encrypted=encrypted, login=login, connlimit=connlimit, inherit=inherit, createdb=createdb, createuser=createuser, createroles=createroles, superuser=superuser, groups=groups, replication=replication, rolepassword=rolepassword, db_role=role )) ret = _psql_prepare_and_run(['-c', sub_cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) return ret['retcode'] == 0 def user_update(username, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createuser=None, createroles=None, encrypted=None, superuser=None, inherit=None, login=None, connlimit=None, replication=None, rolepassword=None, groups=None, runas=None): ''' Updates a Postgres user. CLI Examples: .. code-block:: bash salt '*' postgres.user_update 'username' user='user' \\ host='hostname' port='port' password='password' \\ rolepassword='rolepassword' ''' return _role_update(username, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, typ_='user', inherit=inherit, login=login, connlimit=connlimit, createdb=createdb, createuser=createuser, createroles=createroles, encrypted=encrypted, superuser=superuser, replication=replication, rolepassword=rolepassword, groups=groups, runas=runas) def _role_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a role from the Postgres Server ''' # check if user exists if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): log.info('User \'{0}\' does not exist'.format(name)) return False # user exists, proceed sub_cmd = 'DROP ROLE "{0}"'.format(name) _psql_prepare_and_run( ['-c', sub_cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): return True else: log.info('Failed to delete user \'{0}\'.'.format(name)) return False def available_extensions(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' List available postgresql extensions CLI Example: .. code-block:: bash salt '*' postgres.available_extensions ''' exts = [] query = ( 'select * ' 'from pg_available_extensions();' ) ret = psql_query(query, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) exts = {} for row in ret: if 'default_version' in row and 'name' in row: exts[row['name']] = row return exts def installed_extensions(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' List installed postgresql extensions CLI Example: .. code-block:: bash salt '*' postgres.installed_extensions ''' exts = [] query = ( 'select a.*, b.nspname as schema_name ' 'from pg_extension a, pg_namespace b where a.extnamespace = b.oid;' ) ret = psql_query(query, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) exts = {} for row in ret: if 'extversion' in row and 'extname' in row: exts[row['extname']] = row return exts def get_available_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Get info about an available postgresql extension CLI Example: .. code-block:: bash salt '*' postgres.get_available_extension plpgsql ''' return available_extensions(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas).get(name, None) def get_installed_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Get info about an installed postgresql extension CLI Example: .. code-block:: bash salt '*' postgres.get_installed_extension plpgsql ''' return installed_extensions(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas).get(name, None) def is_available_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Test if a specific extension is available CLI Example: .. code-block:: bash salt '*' postgres.is_available_extension ''' exts = available_extensions(user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if name.lower() in [ a.lower() for a in exts ]: return True return False def _pg_is_older_ext_ver(a, b): '''Return true if version a is lesser than b TODO: be more intelligent to test versions ''' return a < b def is_installed_extension(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Test if a specific extension is installed CLI Example: .. code-block:: bash salt '*' postgres.is_installed_extension ''' installed_ext = get_installed_extension( name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return bool(installed_ext) def create_metadata(name, ext_version=None, schema=None, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Get lifecycle information about an extension CLI Example: .. code-block:: bash salt '*' postgres.create_metadata adminpack ''' installed_ext = get_installed_extension( name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) ret = [_EXTENSION_NOT_INSTALLED] if installed_ext: ret = [_EXTENSION_INSTALLED] if ( ext_version is not None and _pg_is_older_ext_ver( installed_ext.get('extversion', ext_version), ext_version ) ): ret.append(_EXTENSION_TO_UPGRADE) if ( schema is not None and installed_ext.get('extrelocatable', 'f') == 't' and installed_ext.get('schema_name', schema) != schema ): ret.append(_EXTENSION_TO_MOVE) return ret def drop_extension(name, if_exists=None, restrict=None, cascade=None, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Drop an installed postgresql extension CLI Example: .. code-block:: bash salt '*' postgres.drop_extension 'adminpack' ''' if cascade is None: cascade = True if if_exists is None: if_exists = False if restrict is None: restrict = False args = ['DROP EXTENSION'] if if_exists: args.append('IF EXISTS') args.append(name) if cascade: args.append('CASCADE') if restrict: args.append('RESTRICT') args.append(';') cmd = ' '.join(args) if is_installed_extension(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas): _psql_prepare_and_run( ['-c', cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) ret = not is_installed_extension(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if not ret: log.info('Failed to drop ext: {0}'.format(name)) return ret def create_extension(name, if_not_exists=None, schema=None, ext_version=None, from_version=None, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Install a postgresql extension CLI Example: .. code-block:: bash salt '*' postgres.create_extension 'adminpack' ''' if if_not_exists is None: if_not_exists = True mtdata = create_metadata(name, ext_version=ext_version, schema=schema, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) installed = _EXTENSION_NOT_INSTALLED not in mtdata installable = is_available_extension(name, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) if installable: if not installed: args = ['CREATE EXTENSION'] if if_not_exists: args.append('IF NOT EXISTS') args.append('"{0}"'.format(name)) sargs = [] if schema: sargs.append('SCHEMA "{0}"'.format(schema)) if ext_version: sargs.append('VERSION {0}'.format(ext_version)) if from_version: sargs.append('FROM {0}'.format(from_version)) if sargs: args.append('WITH') args.extend(sargs) args.append(';') cmd = ' '.join(args).strip() else: args = [] if schema and _EXTENSION_TO_MOVE in mtdata: args.append('ALTER EXTENSION "{0}" SET SCHEMA "{1}";'.format( name, schema)) if ext_version and _EXTENSION_TO_UPGRADE in mtdata: args.append('ALTER EXTENSION "{0}" UPDATE TO {1};'.format( name, ext_version)) cmd = ' '.join(args).strip() if cmd: _psql_prepare_and_run( ['-c', cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) mtdata = create_metadata(name, ext_version=ext_version, schema=schema, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) ret = True for i in _EXTENSION_FLAGS: if (i in mtdata) and (i != _EXTENSION_INSTALLED): ret = False if not ret: log.info('Failed to create ext: {0}'.format(name)) return ret def user_remove(username, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a user from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.user_remove 'username' ''' return _role_remove(username, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) # Group related actions def group_create(groupname, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createuser=None, createroles=None, encrypted=None, login=None, inherit=None, superuser=None, replication=None, rolepassword=None, groups=None, runas=None): ''' Creates a Postgres group. A group is postgres is similar to a user, but cannot login. CLI Example: .. code-block:: bash salt '*' postgres.group_create 'groupname' user='user' \\ host='hostname' port='port' password='password' \\ rolepassword='rolepassword' ''' return _role_create(groupname, user=user, typ_='group', host=host, port=port, maintenance_db=maintenance_db, password=password, createdb=createdb, createroles=createroles, createuser=createuser, encrypted=encrypted, login=login, inherit=inherit, superuser=superuser, replication=replication, rolepassword=rolepassword, groups=groups, runas=runas) def group_update(groupname, user=None, host=None, port=None, maintenance_db=None, password=None, createdb=None, createroles=None, createuser=None, encrypted=None, inherit=None, login=None, superuser=None, replication=None, rolepassword=None, groups=None, runas=None): ''' Updates a postgres group CLI Examples: .. code-block:: bash salt '*' postgres.group_update 'username' user='user' \\ host='hostname' port='port' password='password' \\ rolepassword='rolepassword' ''' return _role_update(groupname, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, createdb=createdb, typ_='group', createroles=createroles, createuser=createuser, encrypted=encrypted, login=login, inherit=inherit, superuser=superuser, replication=replication, rolepassword=rolepassword, groups=groups, runas=runas) def group_remove(groupname, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a group from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.group_remove 'groupname' ''' return _role_remove(groupname, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) def owner_to(dbname, ownername, user=None, host=None, port=None, password=None, runas=None): ''' Set the owner of all schemas, functions, tables, views and sequences to the given username. CLI Example: .. code-block:: bash salt '*' postgres.owner_to 'dbname' 'username' ''' sqlfile = tempfile.NamedTemporaryFile() sqlfile.write('begin;\n') sqlfile.write( 'alter database "{0}" owner to "{1}";\n'.format( dbname, ownername ) ) queries = ( # schemas ('alter schema {n} owner to {owner};', 'select quote_ident(schema_name) as n from ' 'information_schema.schemata;'), # tables and views ('alter table {n} owner to {owner};', 'select quote_ident(table_schema)||\'.\'||quote_ident(table_name) as ' 'n from information_schema.tables where table_schema not in ' '(\'pg_catalog\', \'information_schema\');'), # functions ('alter function {n} owner to {owner};', 'select p.oid::regprocedure::text as n from pg_catalog.pg_proc p ' 'join pg_catalog.pg_namespace ns on p.pronamespace=ns.oid where ' 'ns.nspname not in (\'pg_catalog\', \'information_schema\') ' ' and not p.proisagg;'), # aggregate functions ('alter aggregate {n} owner to {owner};', 'select p.oid::regprocedure::text as n from pg_catalog.pg_proc p ' 'join pg_catalog.pg_namespace ns on p.pronamespace=ns.oid where ' 'ns.nspname not in (\'pg_catalog\', \'information_schema\') ' 'and p.proisagg;'), # sequences ('alter sequence {n} owner to {owner};', 'select quote_ident(sequence_schema)||\'.\'||' 'quote_ident(sequence_name) as n from information_schema.sequences;') ) for fmt, query in queries: ret = psql_query(query, user=user, host=host, port=port, maintenance_db=dbname, password=password, runas=runas) for row in ret: sqlfile.write(fmt.format(owner=ownername, n=row['n']) + '\n') sqlfile.write('commit;\n') sqlfile.flush() os.chmod(sqlfile.name, 0o644) # ensure psql can read the file # run the generated sqlfile in the db cmdret = _psql_prepare_and_run(['-f', sqlfile.name], user=user, runas=runas, host=host, port=port, password=password, maintenance_db=dbname) return cmdret # Schema related actions def schema_create(dbname, name, owner=None, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' Creates a Postgres schema. CLI Example: .. code-block:: bash salt '*' postgres.schema_create dbname name owner='owner' \\ user='user' \\ db_user='user' db_password='password' db_host='hostname' db_port='port' ''' # check if schema exists if schema_exists(dbname, name, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('\'{0}\' already exists in \'{1}\''.format(name, dbname)) return False sub_cmd = 'CREATE SCHEMA "{0}"'.format(name) if owner is not None: sub_cmd = '{0} AUTHORIZATION "{1}"'.format(sub_cmd, owner) ret = _psql_prepare_and_run(['-c', sub_cmd], user=db_user, password=db_password, port=db_port, host=db_host, maintenance_db=dbname, runas=user) return ret['retcode'] == 0 def schema_remove(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' Removes a schema from the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.schema_remove dbname schemaname dbname Database name we work on schemaname The schema's name we'll remove user System user all operations should be performed on behalf of db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' # check if schema exists if not schema_exists(dbname, name, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('Schema \'{0}\' does not exist in \'{1}\''.format(name, dbname)) return False # schema exists, proceed sub_cmd = 'DROP SCHEMA "{0}"'.format(name) _psql_prepare_and_run( ['-c', sub_cmd], runas=user, maintenance_db=dbname, host=db_host, user=db_user, port=db_port, password=db_password) if not schema_exists(dbname, name, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): return True else: log.info('Failed to delete schema \'{0}\'.'.format(name)) return False def schema_exists(dbname, name, db_user=None, db_password=None, db_host=None, db_port=None): ''' Checks if a schema exists on the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.schema_exists dbname schemaname dbname Database name we query on name Schema name we look for db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' return bool( schema_get(dbname, name, db_user=db_user, db_host=db_host, db_port=db_port, db_password=db_password)) def schema_get(dbname, name, db_user=None, db_password=None, db_host=None, db_port=None): ''' Return a dict with information about schemas in a database. CLI Example: .. code-block:: bash salt '*' postgres.schema_get dbname name dbname Database name we query on name Schema name we look for db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' all_schemas = schema_list(dbname, db_user=db_user, db_host=db_host, db_port=db_port, db_password=db_password) try: return all_schemas.get(name, None) except AttributeError: log.error('Could not retrieve Postgres schema. Is Postgres running?') return False def schema_list(dbname, db_user=None, db_password=None, db_host=None, db_port=None): ''' Return a dict with information about schemas in a Postgres database. CLI Example: .. code-block:: bash salt '*' postgres.schema_list dbname dbname Database name we query on db_user database username if different from config or default db_password user password if any password for a specified user db_host Database host if different from config or default db_port Database port if different from config or default ''' ret = {} query = (''.join([ 'SELECT ' 'pg_namespace.nspname as "name",' 'pg_namespace.nspacl as "acl", ' 'pg_roles.rolname as "owner" ' 'FROM pg_namespace ' 'LEFT JOIN pg_roles ON pg_roles.oid = pg_namespace.nspowner ' ])) rows = psql_query(query, host=db_host, user=db_user, port=db_port, maintenance_db=dbname, password=db_password) for row in rows: retrow = {} for key in ('owner', 'acl'): retrow[key] = row[key] ret[row['name']] = retrow return ret def language_list( maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Return a list of languages in a database. CLI Example: .. code-block:: bash salt '*' postgres.language_list dbname maintenance_db The database to check user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' ret = {} query = 'SELECT lanname AS "Name" FROM pg_language' rows = psql_query( query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: ret[row['Name']] = row['Name'] return ret def language_exists( name, maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Checks if language exists in a database. CLI Example: .. code-block:: bash salt '*' postgres.language_exists plpgsql dbname name Language to check for maintenance_db The database to check in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' languages = language_list( maintenance_db, user=user, host=host, port=port, password=password, runas=runas) return name in languages def language_create(name, maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Installs a language into a database CLI Example: .. code-block:: bash salt '*' postgres.language_create plpgsql dbname name Language to install maintenance_db The database to install the language in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' if language_exists(name, maintenance_db): log.info('Language %s already exists in %s', name, maintenance_db) return False query = 'CREATE LANGUAGE {0}'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0 def language_remove(name, maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Removes a language from a database CLI Example: .. code-block:: bash salt '*' postgres.language_remove plpgsql dbname name Language to remove maintenance_db The database to install the language in user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' if not language_exists(name, maintenance_db): log.info('Language %s does not exist in %s', name, maintenance_db) return False query = 'DROP LANGUAGE {0}'.format(name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, runas=runas, maintenance_db=maintenance_db, password=password) return ret['retcode'] == 0 def _make_privileges_list_query(name, object_type, prepend): ''' Generate the SQL required for specific object type ''' if object_type == 'table': query = (' '.join([ 'SELECT relacl AS name', 'FROM pg_catalog.pg_class c', 'JOIN pg_catalog.pg_namespace n', 'ON n.oid = c.relnamespace', "WHERE nspname = '{0}'", "AND relname = '{1}'", "AND relkind = 'r'", 'ORDER BY relname', ])).format(prepend, name) elif object_type == 'sequence': query = (' '.join([ 'SELECT relacl AS name', 'FROM pg_catalog.pg_class c', 'JOIN pg_catalog.pg_namespace n', 'ON n.oid = c.relnamespace', "WHERE nspname = '{0}'", "AND relname = '{1}'", "AND relkind = 'S'", 'ORDER BY relname', ])).format(prepend, name) elif object_type == 'schema': query = (' '.join([ 'SELECT nspacl AS name', 'FROM pg_catalog.pg_namespace', "WHERE nspname = '{0}'", 'ORDER BY nspname', ])).format(name) # elif object_type == 'function': # query = (' '.join([ # 'SELECT proacl AS name', # 'FROM pg_catalog.pg_proc p', # 'JOIN pg_catalog.pg_namespace n', # 'ON n.oid = p.pronamespace', # "WHERE nspname = '{0}'", # "AND proname = '{1}'", # 'ORDER BY proname, proargtypes', # ])).format(prepend, name) elif object_type == 'tablespace': query = (' '.join([ 'SELECT spcacl AS name', 'FROM pg_catalog.pg_tablespace', "WHERE spcname = '{0}'", 'ORDER BY spcname', ])).format(name) elif object_type == 'language': query = (' '.join([ 'SELECT lanacl AS name', 'FROM pg_catalog.pg_language', "WHERE lanname = '{0}'", 'ORDER BY lanname', ])).format(name) elif object_type == 'database': query = (' '.join([ 'SELECT datacl AS name', 'FROM pg_catalog.pg_database', "WHERE datname = '{0}'", 'ORDER BY datname', ])).format(name) elif object_type == 'group': query = (' '.join([ 'SELECT rolname, admin_option', 'FROM pg_catalog.pg_auth_members m', 'JOIN pg_catalog.pg_roles r', 'ON m.member=r.oid', 'WHERE m.roleid IN', '(SELECT oid', 'FROM pg_catalog.pg_roles', "WHERE rolname='{0}')", 'ORDER BY rolname', ])).format(name) return query def _get_object_owner(name, object_type, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' Return the owner of a postgres object ''' if object_type == 'table': query = (' '.join([ 'SELECT tableowner AS name', 'FROM pg_tables', "WHERE schemaname = '{0}'", "AND tablename = '{1}'" ])).format(prepend, name) elif object_type == 'sequence': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_catalog.pg_class c', 'JOIN pg_roles r', 'ON c.relowner = r.oid', 'JOIN pg_catalog.pg_namespace n', 'ON n.oid = c.relnamespace', "WHERE relkind='S'", "AND nspname='{0}'", "AND relname = '{1}'", ])).format(prepend, name) elif object_type == 'schema': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_namespace n', 'JOIN pg_roles r', 'ON n.nspowner = r.oid', "WHERE nspname = '{0}'", ])).format(name) elif object_type == 'tablespace': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_tablespace t', 'JOIN pg_roles r', 'ON t.spcowner = r.oid', "WHERE spcname = '{0}'", ])).format(name) elif object_type == 'language': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_language l', 'JOIN pg_roles r', 'ON l.lanowner = r.oid', "WHERE lanname = '{0}'", ])).format(name) elif object_type == 'database': query = (' '.join([ 'SELECT rolname AS name', 'FROM pg_database d', 'JOIN pg_roles r', 'ON d.datdba = r.oid', "WHERE datname = '{0}'", ])).format(name) rows = psql_query( query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) try: ret = rows[0]['name'] except IndexError: ret = None return ret def _validate_privileges(object_type, privs, privileges): ''' Validate the supplied privileges ''' if object_type != 'group': _perms = [_PRIVILEGES_MAP[perm] for perm in _PRIVILEGE_TYPE_MAP[object_type]] _perms.append('ALL') if object_type not in _PRIVILEGES_OBJECTS: raise SaltInvocationError( 'Invalid object_type: {0} provided'.format(object_type)) if not set(privs).issubset(set(_perms)): raise SaltInvocationError( 'Invalid privilege(s): {0} provided for object {1}'.format( privileges, object_type)) else: if privileges: raise SaltInvocationError( 'The privileges option should not ' 'be set for object_type group') def _mod_priv_opts(object_type, privileges): ''' Format options ''' object_type = object_type.lower() privileges = '' if privileges is None else privileges _privs = re.split(r'\s?,\s?', privileges.upper()) return object_type, privileges, _privs def _process_priv_part(perms): ''' Process part ''' _tmp = {} previous = None for perm in perms: if previous is None: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] else: if perm == '*': _tmp[previous] = True else: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] return _tmp def privileges_list( name, object_type, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Return a list of privileges for the specified object. CLI Example: .. code-block:: bash salt '*' postgres.privileges_list table_name table maintenance_db=db_name name Name of the object for which the permissions should be returned object_type The object type, which can be one of the following: - table - sequence - schema - tablespace - language - database - group prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type = object_type.lower() query = _make_privileges_list_query(name, object_type, prepend) if object_type not in _PRIVILEGES_OBJECTS: raise SaltInvocationError( 'Invalid object_type: {0} provided'.format(object_type)) rows = psql_query( query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) ret = {} for row in rows: if object_type != 'group': result = row['name'] result = result.strip('{}') parts = result.split(',') for part in parts: perms_part, _ = part.split('/') rolename, perms = perms_part.split('=') if rolename == '': rolename = 'public' _tmp = _process_priv_part(perms) ret[rolename] = _tmp else: if row['admin_option'] == 't': admin_option = True else: admin_option = False ret[row['rolname']] = admin_option return ret def has_privileges(name, object_name, object_type, privileges=None, grant_option=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Check if a role has the specified privileges on an object CLI Example: .. code-block:: bash salt '*' postgres.has_privileges user_name table_name table \\ SELECT,INSERT maintenance_db=db_name name Name of the role whose privilages should be checked on object_type object_name Name of the object on which the check is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - tablespace - language - database - group privileges Comma separated list of privilages to check, from the list below: - INSERT - CREATE - TRUNCATE - CONNECT - TRIGGER - SELECT - USAGE - TEMPORARY - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the grant option check is performed prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, privileges, _privs = _mod_priv_opts(object_type, privileges) _validate_privileges(object_type, _privs, privileges) if object_type != 'group': owner = _get_object_owner(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if owner is not None and name == owner: return True _privileges = privileges_list(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if name in _privileges: if object_type == 'group': if grant_option: retval = _privileges[name] else: retval = True return retval else: _perms = _PRIVILEGE_TYPE_MAP[object_type] if grant_option: perms = dict((_PRIVILEGES_MAP[perm], True) for perm in _perms) retval = perms == _privileges[name] else: perms = [_PRIVILEGES_MAP[perm] for perm in _perms] if 'ALL' in _privs: retval = perms.sort() == _privileges[name].keys().sort() else: retval = set(_privs).issubset( set(_privileges[name].keys())) return retval return False def privileges_grant(name, object_name, object_type, privileges=None, grant_option=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Grant privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.privileges_grant user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role to which privilages should be granted object_name Name of the object on which the grant is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - tablespace - language - database - group privileges Comma separated list of privilages to grant, from the list below: - INSERT - CREATE - TRUNCATE - CONNECT - TRIGGER - SELECT - USAGE - TEMPORARY - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the recipient of the privilege can in turn grant it to others prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, privileges, _privs = _mod_priv_opts(object_type, privileges) _validate_privileges(object_type, _privs, privileges) if has_privileges(name, object_name, object_type, privileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info('The object: %s of type: %s already has privileges: %s set', object_name, object_type, privileges) return False _grants = ','.join(_privs) if object_type in ['table', 'sequence']: on_part = '{0}.{1}'.format(prepend, object_name) else: on_part = object_name if grant_option: if object_type == 'group': query = 'GRANT {0} TO {1} WITH ADMIN OPTION'.format( object_name, name) else: query = 'GRANT {0} ON {1} {2} TO {3} WITH GRANT OPTION'.format( _grants, object_type.upper(), on_part, name) else: if object_type == 'group': query = 'GRANT {0} TO {1}'.format(object_name, name) else: query = 'GRANT {0} ON {1} {2} TO {3}'.format( _grants, object_type.upper(), on_part, name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0 def privileges_revoke(name, object_name, object_type, privileges=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Revoke privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.privileges_revoke user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role whose privilages should be revoked object_name Name of the object on which the revoke is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - tablespace - language - database - group privileges Comma separated list of privilages to revoke, from the list below: - INSERT - CREATE - TRUNCATE - CONNECT - TRIGGER - SELECT - USAGE - TEMPORARY - UPDATE - EXECUTE - REFERENCES - DELETE - ALL maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, privileges, _privs = _mod_priv_opts(object_type, privileges) _validate_privileges(object_type, _privs, privileges) if not has_privileges(name, object_name, object_type, privileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info('The object: %s of type: %s does not' ' have privileges: %s set', object_name, object_type, privileges) return False _grants = ','.join(_privs) if object_type in ['table', 'sequence']: on_part = '{0}.{1}'.format(prepend, object_name) else: on_part = object_name if object_type == 'group': query = 'REVOKE {0} FROM {1}'.format(object_name, name) else: query = 'REVOKE {0} ON {1} {2} FROM {3}'.format( _grants, object_type.upper(), on_part, name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0 def datadir_init(name, auth='password', user=None, password=None, encoding='UTF8', locale=None, runas=None): ''' .. versionadded:: 2016.3.0 Initializes a postgres data directory CLI Example: .. code-block:: bash salt '*' postgres.datadir_init '/var/lib/pgsql/data' name The name of the directory to initialize auth The default authentication method for local connections password The password to set for the postgres user user The database superuser name encoding The default encoding for new databases locale The default locale for new databases runas The system user the operation should be performed on behalf of ''' if salt.utils.which('initdb') is None: log.error('initdb not found in path') return False if datadir_exists(name): log.info('%s already exists', name) return False ret = _run_initdb( name, auth=auth, user=user, password=password, encoding=encoding, locale=locale, runas=runas) return ret['retcode'] == 0 def datadir_exists(name): ''' .. versionadded:: 2016.3.0 Checks if postgres data directory has been initialized CLI Example: .. code-block:: bash salt '*' postgres.datadir_exists '/var/lib/pgsql/data' name Name of the directory to check ''' _version_file = os.path.join(name, 'PG_VERSION') _config_file = os.path.join(name, 'postgresql.conf') return os.path.isfile(_version_file) and os.path.isfile(_config_file)
29.224211
90
0.529742
05cd0f75b7988074b375e302a4990178337e98b1
510
py
Python
meiduo_mall/meiduo_mall/apps/meiduo_admin/serializers/permission.py
yeluoguigen/meiduo_admin1
2f2a890ea44a4ba201bed0dc3193aef4fc705d10
[ "MIT" ]
null
null
null
meiduo_mall/meiduo_mall/apps/meiduo_admin/serializers/permission.py
yeluoguigen/meiduo_admin1
2f2a890ea44a4ba201bed0dc3193aef4fc705d10
[ "MIT" ]
null
null
null
meiduo_mall/meiduo_mall/apps/meiduo_admin/serializers/permission.py
yeluoguigen/meiduo_admin1
2f2a890ea44a4ba201bed0dc3193aef4fc705d10
[ "MIT" ]
null
null
null
from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from rest_framework import serializers class PermissionSerializer(serializers.ModelSerializer): ''' 权限表序列化器 ''' class Meta: model = Permission fields = '__all__' class ContentTypeSerializer(serializers.ModelSerializer): ''' 权限表的序列化 ''' name = serializers.CharField(read_only=True) class Meta: model = ContentType fields = '__all__'
26.842105
58
0.703922
f3e87fb5ccaf4455569a3dfa7c4041c649df4810
688
py
Python
pava/implementation/natives/sun/awt/image/codec/JPEGImageEncoderImpl.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
4
2017-03-30T16:51:16.000Z
2020-10-05T12:25:47.000Z
pava/implementation/natives/sun/awt/image/codec/JPEGImageEncoderImpl.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
null
null
null
pava/implementation/natives/sun/awt/image/codec/JPEGImageEncoderImpl.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
null
null
null
def add_native_methods(clazz): def initEncoder__java_lang_Class__(a0, a1): raise NotImplementedError() def writeJPEGStream__com_sun_image_codec_jpeg_JPEGEncodeParam__java_awt_image_ColorModel__java_io_OutputStream__java_lang_Object__int__int__(a0, a1, a2, a3, a4, a5, a6): raise NotImplementedError() clazz.initEncoder__java_lang_Class__ = initEncoder__java_lang_Class__ clazz.writeJPEGStream__com_sun_image_codec_jpeg_JPEGEncodeParam__java_awt_image_ColorModel__java_io_OutputStream__java_lang_Object__int__int__ = writeJPEGStream__com_sun_image_codec_jpeg_JPEGEncodeParam__java_awt_image_ColorModel__java_io_OutputStream__java_lang_Object__int__int__
62.545455
285
0.880814
725c043298261f335fee7b49092b3d95bf6b2b29
5,516
py
Python
research/inception/inception/slim/inception_test.py
SimiaCryptus/models
c652a23a650070b71e286f1ded93726670161940
[ "Apache-2.0" ]
null
null
null
research/inception/inception/slim/inception_test.py
SimiaCryptus/models
c652a23a650070b71e286f1ded93726670161940
[ "Apache-2.0" ]
null
null
null
research/inception/inception/slim/inception_test.py
SimiaCryptus/models
c652a23a650070b71e286f1ded93726670161940
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for slim.inception.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from inception.slim import inception_model as inception class InceptionTest(tf.test.TestCase): def testBuildLogits(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) def testBuildEndPoints(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue('logits' in end_points) logits = end_points['logits'] self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('aux_logits' in end_points) aux_logits = end_points['aux_logits'] self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['mixed_8x8x2048b'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 2048]) def testVariablesSetDevice(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) # Force all Variables to reside on the device. with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): inception.inception_v3(inputs, num_classes) with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): inception.inception_v3(inputs, num_classes) for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): self.assertDeviceEqual(v.device, '/cpu:0') for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): self.assertDeviceEqual(v.device, '/gpu:0') def testHalfSizeImages(self): batch_size = 5 height, width = 150, 150 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['mixed_8x8x2048b'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 3, 3, 2048]) def testUnknowBatchSize(self): batch_size = 1 height, width = 299, 299 num_classes = 1000 with self.test_session() as sess: inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluation(self): batch_size = 2 height, width = 299, 299 num_classes = 1000 with self.test_session() as sess: eval_inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 with self.test_session() as sess: train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v3(train_inputs, num_classes) tf.get_variable_scope().reuse_variables() eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) if __name__ == '__main__': tf.test.main()
41.164179
80
0.665337
956cecbd4fa6ec86c691551c92ffbcf97a722935
507
py
Python
apps/commons/templatetags/sku_get.py
makethedayunique/pandama-online-store
38c02809a89087f5a6c83fd6ee2c39dab8d66f6c
[ "MIT" ]
null
null
null
apps/commons/templatetags/sku_get.py
makethedayunique/pandama-online-store
38c02809a89087f5a6c83fd6ee2c39dab8d66f6c
[ "MIT" ]
null
null
null
apps/commons/templatetags/sku_get.py
makethedayunique/pandama-online-store
38c02809a89087f5a6c83fd6ee2c39dab8d66f6c
[ "MIT" ]
null
null
null
from django import template from django.template.defaultfilters import stringfilter import json register = template.Library() @register.filter @stringfilter def sku_get(sku_array, sku_number): """Return the string split by sep. Example usage: {{ sku_array|sku_get:sku_number }} """ res = [] for sku in json.loads(sku_array.replace("'", '"')): if sku['sku_number'] == sku_number: for attr in sku['attr_array']: res.append(attr['value']) return res
26.684211
55
0.66075
e0a29cb34a6ff8cb684bd4a399ca26d8f5cc3fd7
301
py
Python
Python/1072.py
Marcelalopes/Questoes-URI
e13894c1bcbcb252ed814d5b5e930d05c7a8494f
[ "MIT" ]
null
null
null
Python/1072.py
Marcelalopes/Questoes-URI
e13894c1bcbcb252ed814d5b5e930d05c7a8494f
[ "MIT" ]
null
null
null
Python/1072.py
Marcelalopes/Questoes-URI
e13894c1bcbcb252ed814d5b5e930d05c7a8494f
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Nov 12 16:43:41 2019 @author: marcela """ n = int(input()) a = 0 b = 0 while n: x = int(input()) if (x>= 10 and x <= 20): a +=1 else: b += 1 n-=1 print("{} in".format(a)) print("{} out".format(b))
14.333333
35
0.478405
9949104398f15ca78196200f1bf775c1d004ea44
4,806
py
Python
treewidth/unoptimized_heuristic.py
TeamNotJava/networkx-related
a045d8a0f66fa365775e4d77ae85e5a1ccb271eb
[ "BSD-3-Clause" ]
2
2018-08-02T15:11:20.000Z
2018-09-18T13:20:36.000Z
treewidth/unoptimized_heuristic.py
TeamNotJava/networkx-related
a045d8a0f66fa365775e4d77ae85e5a1ccb271eb
[ "BSD-3-Clause" ]
3
2018-06-06T15:39:33.000Z
2018-06-06T15:40:11.000Z
treewidth/unoptimized_heuristic.py
TeamNotJava/networkx-related
a045d8a0f66fa365775e4d77ae85e5a1ccb271eb
[ "BSD-3-Clause" ]
1
2018-09-18T13:31:01.000Z
2018-09-18T13:31:01.000Z
import networkx as nx import sys # Returns the node with minimum degree or None (if the abort condition is met) def min_degree_heuristic(G): min_degree = sys.maxsize min_node = None for (node, degree) in G.degree: if degree < min_degree: if degree <= 1: # Return early return node min_node = node min_degree = degree if min_degree == G.number_of_nodes() - 1: # Fully connected: Abort condition return None else: return min_node # Returns the node that needs to be removed next or None (if the abort condition is met) def minimum_fill_in_heuristic(G): candidate_node = None # Still keep track of min_degree to abort earlier min_degree = sys.maxsize min_fill_in = sys.maxsize for (node, degree) in G.degree: min_degree = min(min_degree, degree) num_fill_in = 0 # Convert to list in order to access by index neighbors = list(G.neighbors(node)) for i in range(len(neighbors) - 1): for j in range(i + 1, len(neighbors)): if not G.has_edge(neighbors[i], neighbors[j]): num_fill_in += 1 if num_fill_in < min_fill_in: if num_fill_in == 0: return node min_fill_in = num_fill_in candidate_node = node if min_degree == G.number_of_nodes() - 1: # Fully connected: Abort condition return None else: return candidate_node # Calculates the tree-width from a tree decomposition def tree_width_from_decomp(G): return max(len(bag) for bag in G.nodes) - 1 # Calculates tree width decomposition def tree_decomp(G, heuristic): elim_node = heuristic(G) if elim_node is None: # The abort condition is met. Put all nodes into one bag. decomp = nx.Graph() decomp.add_node(frozenset(G.nodes)) return decomp # Create copy of graph and remove elim_node gp = G.copy() gp.remove_node(elim_node) # Connect all neighbours with each other neighbors = set(G.neighbors(elim_node)) for n in neighbors: for m in neighbors: if (n != m): gp.add_edge(n, m) # Recursively compute tree width decomposition decomp = tree_decomp(gp, heuristic) # Search old bag in decomposition old_bag = None for n in decomp.nodes: if neighbors <= n: old_bag = n break # Create new node for decomposition neighbors.add(elim_node) neighbors = frozenset(neighbors) # Add edge to decomposition (implicitly also adds the new node) decomp.add_edge(old_bag, neighbors) return decomp # version without recursion and copying, rest is still the same as in tree_decomp def tree_decomp_opt(G, heuristic): # stack where nodes and their neighbors are pushed in the order they are selected by the heuristic node_stack = [] elim_node = heuristic(G) while elim_node is not None: # Connect all neighbours with each other neighbors = set(G.neighbors(elim_node)) for n in neighbors: for m in neighbors: if (n != m): G.add_edge(n, m) # remove node from graph and push on stack (including its neighbors) G.remove_node(elim_node) node_stack.append((elim_node, neighbors)); # get next node to be removed according to heuristic elim_node = heuristic(G) # The abort condition is met. Put all nodes into one bag. decomp = nx.Graph() decomp.add_node(frozenset(G.nodes)) while node_stack: # get node and its neighbors from the stack (curr_node, neighbors) = node_stack.pop() # find a bag the neighbors are in old_bag = None for bag in decomp.nodes: if neighbors <= bag: old_bag = bag break # Create new node for decomposition neighbors.add(curr_node) neighbors = frozenset(neighbors) # Add edge to decomposition (implicitly also adds the new node) decomp.add_edge(old_bag, neighbors) return decomp if __name__ == '__main__': # Test on graph from page 2 of "Discovering Treewidth" (Hans L. Bodlaender) G = nx.Graph() G.add_edges_from([('a', 'b'), ('b', 'c'), ('b', 'd'), ('c', 'e'), ('c', 'f'), ('d', 'f'), ('d', 'g'), ('e', 'f'), ('f', 'g')]) decomp = tree_decomp(G, min_degree_heuristic) for (left, right) in decomp.edges: print("".join(left), "".join(right)) print("Treewidth: ", tree_width_from_decomp(decomp)) """ Output: fgd fcd fcd fce fcd bcd bcd ab Treewidth: 2 """
29.127273
102
0.604869
876f3a56616b641f2be36228f14ee4ed68eb70e3
7,000
gyp
Python
third_party/webrtc/src/chromium/src/third_party/class-dump/class-dump.gyp
bopopescu/webrtc-streaming-node
727a441204344ff596401b0253caac372b714d91
[ "MIT" ]
8
2016-02-08T11:59:31.000Z
2020-05-31T15:19:54.000Z
third_party/webrtc/src/chromium/src/third_party/class-dump/class-dump.gyp
bopopescu/webrtc-streaming-node
727a441204344ff596401b0253caac372b714d91
[ "MIT" ]
1
2021-05-05T11:11:31.000Z
2021-05-05T11:11:31.000Z
third_party/webrtc/src/chromium/src/third_party/class-dump/class-dump.gyp
bopopescu/webrtc-streaming-node
727a441204344ff596401b0253caac372b714d91
[ "MIT" ]
7
2016-02-09T09:28:14.000Z
2020-07-25T19:03:36.000Z
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'mac_deployment_target': '10.9', 'mac_sdk_min': '10.9', }, 'targets': [ { 'target_name': 'class-dump', 'toolsets': ['host'], 'type': 'executable', 'sources': [ 'src/class-dump.m', 'src/Source/CDBalanceFormatter.h', 'src/Source/CDBalanceFormatter.m', 'src/Source/CDClassDump.h', 'src/Source/CDClassDump.m', 'src/Source/CDClassDumpVisitor.h', 'src/Source/CDClassDumpVisitor.m', 'src/Source/CDClassFrameworkVisitor.h', 'src/Source/CDClassFrameworkVisitor.m', 'src/Source/CDDataCursor.h', 'src/Source/CDDataCursor.m', 'src/Source/CDExtensions.h', 'src/Source/CDFatArch.h', 'src/Source/CDFatArch.m', 'src/Source/CDFatFile.h', 'src/Source/CDFatFile.m', 'src/Source/CDFile.h', 'src/Source/CDFile.m', 'src/Source/CDFindMethodVisitor.h', 'src/Source/CDFindMethodVisitor.m', 'src/Source/CDLCDataInCode.h', 'src/Source/CDLCDataInCode.m', 'src/Source/CDLCDyldInfo.h', 'src/Source/CDLCDyldInfo.m', 'src/Source/CDLCDylib.h', 'src/Source/CDLCDylib.m', 'src/Source/CDLCDylinker.h', 'src/Source/CDLCDylinker.m', 'src/Source/CDLCDynamicSymbolTable.h', 'src/Source/CDLCDynamicSymbolTable.m', 'src/Source/CDLCEncryptionInfo.h', 'src/Source/CDLCEncryptionInfo.m', 'src/Source/CDLCFunctionStarts.h', 'src/Source/CDLCFunctionStarts.m', 'src/Source/CDLCLinkeditData.h', 'src/Source/CDLCLinkeditData.m', 'src/Source/CDLCMain.h', 'src/Source/CDLCMain.m', 'src/Source/CDLCPrebindChecksum.h', 'src/Source/CDLCPrebindChecksum.m', 'src/Source/CDLCPreboundDylib.h', 'src/Source/CDLCPreboundDylib.m', 'src/Source/CDLCRoutines32.h', 'src/Source/CDLCRoutines32.m', 'src/Source/CDLCRoutines64.h', 'src/Source/CDLCRoutines64.m', 'src/Source/CDLCRunPath.h', 'src/Source/CDLCRunPath.m', 'src/Source/CDLCSegment.h', 'src/Source/CDLCSegment.m', 'src/Source/CDLCSourceVersion.h', 'src/Source/CDLCSourceVersion.m', 'src/Source/CDLCSubClient.h', 'src/Source/CDLCSubClient.m', 'src/Source/CDLCSubFramework.h', 'src/Source/CDLCSubFramework.m', 'src/Source/CDLCSubLibrary.h', 'src/Source/CDLCSubLibrary.m', 'src/Source/CDLCSubUmbrella.h', 'src/Source/CDLCSubUmbrella.m', 'src/Source/CDLCSymbolTable.h', 'src/Source/CDLCSymbolTable.m', 'src/Source/CDLCTwoLevelHints.h', 'src/Source/CDLCTwoLevelHints.m', 'src/Source/CDLCUnixThread.h', 'src/Source/CDLCUnixThread.m', 'src/Source/CDLCUnknown.h', 'src/Source/CDLCUnknown.m', 'src/Source/CDLCUUID.h', 'src/Source/CDLCUUID.m', 'src/Source/CDLCVersionMinimum.h', 'src/Source/CDLCVersionMinimum.m', 'src/Source/CDLoadCommand.h', 'src/Source/CDLoadCommand.m', 'src/Source/CDMachOFile.h', 'src/Source/CDMachOFile.m', 'src/Source/CDMachOFileDataCursor.h', 'src/Source/CDMachOFileDataCursor.m', 'src/Source/CDMethodType.h', 'src/Source/CDMethodType.m', 'src/Source/CDMultiFileVisitor.h', 'src/Source/CDMultiFileVisitor.m', 'src/Source/CDObjectiveC1Processor.h', 'src/Source/CDObjectiveC1Processor.m', 'src/Source/CDObjectiveC2Processor.h', 'src/Source/CDObjectiveC2Processor.m', 'src/Source/CDObjectiveCProcessor.h', 'src/Source/CDObjectiveCProcessor.m', 'src/Source/CDOCCategory.h', 'src/Source/CDOCCategory.m', 'src/Source/CDOCClass.h', 'src/Source/CDOCClass.m', 'src/Source/CDOCClassReference.h', 'src/Source/CDOCClassReference.m', 'src/Source/CDOCInstanceVariable.h', 'src/Source/CDOCInstanceVariable.m', 'src/Source/CDOCMethod.h', 'src/Source/CDOCMethod.m', 'src/Source/CDOCModule.h', 'src/Source/CDOCModule.m', 'src/Source/CDOCProperty.h', 'src/Source/CDOCProperty.m', 'src/Source/CDOCProtocol.h', 'src/Source/CDOCProtocol.m', 'src/Source/CDOCSymtab.h', 'src/Source/CDOCSymtab.m', 'src/Source/CDProtocolUniquer.h', 'src/Source/CDProtocolUniquer.m', 'src/Source/CDRelocationInfo.h', 'src/Source/CDRelocationInfo.m', 'src/Source/CDSearchPathState.h', 'src/Source/CDSearchPathState.m', 'src/Source/CDSection.h', 'src/Source/CDSection.m', 'src/Source/CDStructureInfo.h', 'src/Source/CDStructureInfo.m', 'src/Source/CDStructureTable.h', 'src/Source/CDStructureTable.m', 'src/Source/CDSymbol.h', 'src/Source/CDSymbol.m', 'src/Source/CDTextClassDumpVisitor.h', 'src/Source/CDTextClassDumpVisitor.m', 'src/Source/CDTopologicalSortProtocol.h', 'src/Source/CDTopoSortNode.h', 'src/Source/CDTopoSortNode.m', 'src/Source/CDType.h', 'src/Source/CDType.m', 'src/Source/CDTypeController.h', 'src/Source/CDTypeController.m', 'src/Source/CDTypeFormatter.h', 'src/Source/CDTypeFormatter.m', 'src/Source/CDTypeLexer.h', 'src/Source/CDTypeLexer.m', 'src/Source/CDTypeName.h', 'src/Source/CDTypeName.m', 'src/Source/CDTypeParser.h', 'src/Source/CDTypeParser.m', 'src/Source/CDVisitor.h', 'src/Source/CDVisitor.m', 'src/Source/CDVisitorPropertyState.h', 'src/Source/CDVisitorPropertyState.m', 'src/Source/NSArray-CDExtensions.h', 'src/Source/NSArray-CDExtensions.m', 'src/Source/NSData-CDExtensions.h', 'src/Source/NSData-CDExtensions.m', 'src/Source/NSError-CDExtensions.h', 'src/Source/NSError-CDExtensions.m', 'src/Source/NSScanner-CDExtensions.h', 'src/Source/NSScanner-CDExtensions.m', 'src/Source/NSString-CDExtensions.h', 'src/Source/NSString-CDExtensions.m', 'src/Source/ULEB128.h', 'src/Source/ULEB128.m', 'src/Source/cd_objc2.h', ], 'link_settings': { 'libraries': [ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework', ], 'xcode_settings': { 'OTHER_LDFLAGS': [ '-lcrypto', ], }, }, 'include_dirs': [ 'src/Source', ], 'xcode_settings': { 'GCC_PREFIX_HEADER': 'src/class-dump-Prefix.pch', 'CLANG_ENABLE_OBJC_ARC': 'YES', 'CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS': 'NO', }, }, ], # targets }
36.458333
72
0.607714
17bc9fb6706efb9d8acbddd05e149901d0c164c7
2,217
py
Python
lineuzinho.py
PedroRossiSR/lineuzinho
78a538a450683b0f21cf12511d00ce968d477e1f
[ "MIT" ]
null
null
null
lineuzinho.py
PedroRossiSR/lineuzinho
78a538a450683b0f21cf12511d00ce968d477e1f
[ "MIT" ]
null
null
null
lineuzinho.py
PedroRossiSR/lineuzinho
78a538a450683b0f21cf12511d00ce968d477e1f
[ "MIT" ]
null
null
null
import os from greeter import Greeter from forwarder import Forwarder from beaner import Beaner class Lineuzinho: def __init__(self): self.API_TOKEN = "1700885261:AAETCokNpqNDk44x3d5XASfnQfzxiNOKWfI" ## os.environ["SECRET"] self.contatinhosSheet = "http://bit.ly/contatosbcc021" self.githubRepo = "https://github.com/lineuzinho-icmc/lineuzinho" self.usefulLinks = "Estamos adicionando todo mundo aos poucos. Se puder ajudar a achar o pessoal, passa o link do grupo na descrição!\n\nInscrição na semana de recepção: calouros.icmc.usp.br/\n\nGuia do Bixo: https://bit.ly/3c9mcUG\n\nContatinho de geral: {0}\n\n".format(self.contatinhosSheet) self.docsChannel = "https://t.me/docs21" self.greeter = Greeter() self.forwarder = Forwarder() self.beaner = Beaner() def start(self, update, context): update.message.reply_text("pó fala meu rei") def greet(self, update, context): newMembers = update.message.new_chat_members greetings = self.greeter.generateNewMembersGreetings(newMembers) context.bot.send_message(chat_id=update.effective_chat.id, text=greetings) def links(self, update, context): context.bot.send_message(chat_id=update.effective_chat.id, text=self.usefulLinks, disable_web_page_preview=True) def getRepo(self, update, context): context.bot.send_message(chat_id=update.effective_chat.id, text=self.githubRepo) def getContatinhos(self, update, context): context.bot.send_message(chat_id=update.effective_chat.id, text="CHAMA NOS CONTATINHO\n{0}".format(self.contatinhosSheet), disable_web_page_preview=True) def getDocsChannel(self, update, context): context.bot.send_message(chat_id=update.effective_chat.id, text=self.docsChannel) def save(self, update, context): self.forwarder.saveToDocs(update, context) def getBeanFlavor(self, update, context): beanFlavor = self.beaner.getFlavor() context.bot.send_message(chat_id=update.effective_chat.id, text=beanFlavor) def help(self, update, context): update.message.reply_text("digita \"/\" no teclado pra dar uma olhada nos comandos disponíveis :V")
47.170213
302
0.727109
f47ac23fb53f60cf986ed68abddbfb4c801bb2ed
508
py
Python
005/per-user-todo/main/todo/views/delete.py
akiracadet/django-projects-v1
35ab3b88ed9bb9fab7e3d32a8fb838fff6571937
[ "MIT" ]
null
null
null
005/per-user-todo/main/todo/views/delete.py
akiracadet/django-projects-v1
35ab3b88ed9bb9fab7e3d32a8fb838fff6571937
[ "MIT" ]
5
2021-03-30T14:07:08.000Z
2021-09-22T19:28:43.000Z
006/ui-design/main/todo/views/delete.py
akiracadet/django-projects-v1
35ab3b88ed9bb9fab7e3d32a8fb838fff6571937
[ "MIT" ]
null
null
null
from django.shortcuts import redirect, render from django.views import View from todo.models import Todo class TodoDeleteView(View): template_name = 'todo/delete.html' def get(self, request, *args, **kwargs): return render(request, self.template_name, context=None) def post(self, request, *args, **kwargs): if 'yes_delete_todo' in request.POST: if 'pk' in kwargs: Todo.objects.get(pk=kwargs['pk']).delete() return redirect('todo-index')
26.736842
64
0.659449
e79ca5a16d4a123d820d0b9984e70342d3438f27
1,907
py
Python
DQMOffline/CalibTracker/test/template_DQMProfileConverter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
DQMOffline/CalibTracker/test/template_DQMProfileConverter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
DQMOffline/CalibTracker/test/template_DQMProfileConverter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms process = cms.Process("ICALIB") process.load("DQMServices.Core.DQM_cfg") process.MessageLogger = cms.Service("MessageLogger", debugModules = cms.untracked.vstring('*'), cout = cms.untracked.PSet( threshold = cms.untracked.string('INFO') ), destinations = cms.untracked.vstring('cout') ) process.source = cms.Source("EmptyIOVSource", firstValue = cms.uint64(insertRun), lastValue = cms.uint64(insertRun), timetype = cms.string('runnumber'), interval = cms.uint64(1) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.b = cms.ESSource("PoolDBESSource", BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'), DBParameters = cms.PSet( authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.string('runnumber'), toGet = cms.VPSet(cms.PSet( record = cms.string('SiStripFedCablingRcd'), tag = cms.string('SiStripFedCabling_GR_21X_v2_hlt') )), connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_21X_STRIP') ) process.load("CalibTracker.SiStripCommon.TkDetMapESProducer_cfi") # load TrackerTopology (needed for TkDetMap and TkHistoMap) process.load("Configuration.Geometry.GeometryExtended2017_cff") process.load("Geometry.TrackerGeometryBuilder.trackerParameters_cfi") process.trackerTopology = cms.ESProducer("TrackerTopologyEP") process.sistripconn = cms.ESProducer("SiStripConnectivity") process.prod = cms.EDAnalyzer("SiStripDQMProfileToTkMapConverter", TkMapFileName = cms.untracked.string('CabTkMaptest_insertRun.png'), verbosity = cms.untracked.uint32(0), rootFilename = cms.untracked.string('insertFile'), rootDirPath = cms.untracked.string('Run insertRun/SiStrip'), TkMapDrawOption = cms.untracked.string('Zcol') ) process.p = cms.Path(process.prod)
34.053571
79
0.738332
7879c4cf76e431ce232354188e4a85f284dfe7f1
2,318
py
Python
app/core/tests/test_models.py
EyesOnly1987/external_feed_api
2dc71052074a890ebcedde5140a6940a30044773
[ "MIT" ]
null
null
null
app/core/tests/test_models.py
EyesOnly1987/external_feed_api
2dc71052074a890ebcedde5140a6940a30044773
[ "MIT" ]
null
null
null
app/core/tests/test_models.py
EyesOnly1987/external_feed_api
2dc71052074a890ebcedde5140a6940a30044773
[ "MIT" ]
null
null
null
from django.test import TestCase from django.contrib.auth import get_user_model from core import models def sample_user(email='test@londonappdev.com', password='testpass'): """Create a sample user""" return get_user_model().objects.create_user(email, password) class ModelTests(TestCase): def test_create_user_with_email_successful(self): """Test creating a new user with an email is successful""" email = 'test@londonappdev.com' password = 'Password123' user = get_user_model().objects.create_user( email=email, password=password ) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password)) def test_new_user_email_normalized(self): """Test the email for a new user is normalized""" email = 'test@LONDONAPPDEV.com' user = get_user_model().objects.create_user(email, 'test123') self.assertEqual(user.email, email.lower()) def test_new_user_invalid_email(self): """Test creating user with no email raises error""" with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'test123') def test_new_superuser(self): """Test creating a new superuser""" user = get_user_model().objects.create_superuser( 'test@londonappdev.com', 'test123' ) self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff) def test_tag_str(self): """Test the tag string representation""" tag = models.Tag.objects.create( user=sample_user(), name='Vegan' ) self.assertEqual(str(tag), tag.name) def test_ingredient_str(self): """Test the ingredient string representation""" ingredient = models.Ingredient.objects.create( user=sample_user(), name='Cucumber' ) self.assertEqual(str(ingredient), ingredient.name) def test_recipe_str(self): """Test the recipe string representation""" recipe = models.Recipe.objects.create( user=sample_user(), title='Steak and mushroom sauce', time_minutes=5, price=5.00 ) self.assertEqual(str(recipe), recipe.title)
30.5
69
0.63503
fadfadff872f3105c5ef74f53ec439764450a12c
5,007
py
Python
sdk/python/pulumi_azure_native/recoveryservices/get_protection_policy.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/recoveryservices/get_protection_policy.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/recoveryservices/get_protection_policy.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = [ 'GetProtectionPolicyResult', 'AwaitableGetProtectionPolicyResult', 'get_protection_policy', ] @pulumi.output_type class GetProtectionPolicyResult: """ Base class for backup policy. Workload-specific backup policies are derived from this class. """ def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, type=None): if e_tag and not isinstance(e_tag, str): raise TypeError("Expected argument 'e_tag' to be a str") pulumi.set(__self__, "e_tag", e_tag) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(name="eTag") def e_tag(self) -> Optional[str]: """ Optional ETag. """ return pulumi.get(self, "e_tag") @property @pulumi.getter def id(self) -> str: """ Resource Id represents the complete path to the resource. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name associated with the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> Any: """ ProtectionPolicyResource properties """ return pulumi.get(self, "properties") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/... """ return pulumi.get(self, "type") class AwaitableGetProtectionPolicyResult(GetProtectionPolicyResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetProtectionPolicyResult( e_tag=self.e_tag, id=self.id, location=self.location, name=self.name, properties=self.properties, tags=self.tags, type=self.type) def get_protection_policy(policy_name: Optional[str] = None, resource_group_name: Optional[str] = None, vault_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectionPolicyResult: """ Base class for backup policy. Workload-specific backup policies are derived from this class. API Version: 2021-01-01. :param str policy_name: Backup policy information to be fetched. :param str resource_group_name: The name of the resource group where the recovery services vault is present. :param str vault_name: The name of the recovery services vault. """ __args__ = dict() __args__['policyName'] = policy_name __args__['resourceGroupName'] = resource_group_name __args__['vaultName'] = vault_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:recoveryservices:getProtectionPolicy', __args__, opts=opts, typ=GetProtectionPolicyResult).value return AwaitableGetProtectionPolicyResult( e_tag=__ret__.e_tag, id=__ret__.id, location=__ret__.location, name=__ret__.name, properties=__ret__.properties, tags=__ret__.tags, type=__ret__.type)
33.604027
146
0.633513
92f74f1664a78d1952f7483b42a4a2f77a8d9922
1,522
py
Python
app/exo_currency/utils/fixer_currency_exchanger.py
jcazallasc/exo-investing
c32900dd2fd426a15f3b02389d75b51427e5df66
[ "MIT" ]
null
null
null
app/exo_currency/utils/fixer_currency_exchanger.py
jcazallasc/exo-investing
c32900dd2fd426a15f3b02389d75b51427e5df66
[ "MIT" ]
null
null
null
app/exo_currency/utils/fixer_currency_exchanger.py
jcazallasc/exo-investing
c32900dd2fd426a15f3b02389d75b51427e5df66
[ "MIT" ]
null
null
null
import json import os import requests from rest_framework import status from exo_currency.utils.base_currency_exchanger import BaseCurrencyExchanger class FixerCurrencyExchanger(BaseCurrencyExchanger): API_URL = os.getenv('FIXER_API_URL') API_KEY = os.getenv('FIXER_API_KEY') def get_exchange_rate_data( self, source_currency: str, exchanged_currency: str, valuation_date: str, ) -> float: """Get the rate_value from origin_currency to target_currency in the valuation_date""" _has_already_rate_value = self._has_already_rate_value( source_currency, exchanged_currency, valuation_date, ) if _has_already_rate_value: return _has_already_rate_value.rate_value url = '{}/{}?access_key={}&base={}&symbols={}'.format( self.API_URL, valuation_date, self.API_KEY, source_currency, exchanged_currency, ) response = requests.get(url) if response.status_code != status.HTTP_200_OK: raise Exception json_data = json.loads(response.text) if not json_data['success']: raise Exception(json_data['error']['type']) rate_value = json_data['rates'][exchanged_currency] self._save_exchange_rate_data( source_currency, exchanged_currency, valuation_date, rate_value, ) return rate_value
25.79661
94
0.629435
75679259c5ed73b4a70a0282130eba5b799efe03
965
py
Python
aoc/day10/__init__.py
scorphus/advent-of-code-2020
12270ccc86475a18e587007da0fbfc6c9ef3a6a8
[ "BSD-3-Clause" ]
9
2020-12-04T17:40:49.000Z
2022-01-08T03:14:21.000Z
aoc/day10/__init__.py
scorphus/advent-of-code-2020
12270ccc86475a18e587007da0fbfc6c9ef3a6a8
[ "BSD-3-Clause" ]
1
2021-02-12T20:49:33.000Z
2021-02-12T20:49:33.000Z
aoc/day10/__init__.py
scorphus/advent-of-code-2020
12270ccc86475a18e587007da0fbfc6c9ef3a6a8
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of Advent of Code 2020 # https://github.com/scorphus/advent-of-code-2020 # Licensed under the BSD-3-Clause license: # https://opensource.org/licenses/BSD-3-Clause # Copyright (c) 2020, Pablo S. Blum de Aguiar <scorphus@gmail.com> from aoc import integers_list import collections import math def part1(lines): adaps = integers_list(lines) adaps = sorted([0, *adaps, max(adaps) + 3]) counter = collections.Counter(b - a for a, b in zip(adaps[:-1], adaps[1:])) return counter[1] * counter[3] def part2(lines): adaps = integers_list(lines) adaps = sorted([0, *adaps, max(adaps) + 3]) last_diff = 0 ones = comb = 1 for diff in (b - a for a, b in zip(adaps[:-1], adaps[1:])): if diff == last_diff == 1: ones += 1 elif ones > 1: comb *= math.comb(ones, 2) + 1 ones = 1 last_diff = diff return comb
26.081081
79
0.609326
a066130b402494d5493cd54bff4b791715bc426e
1,810
py
Python
contrib/runners/announcement_runner/setup.py
saucetray/st2
8f507d6c8d9483c8371e386fe2b7998596856fd7
[ "Apache-2.0" ]
2
2021-08-04T01:04:06.000Z
2021-08-04T01:04:08.000Z
contrib/runners/announcement_runner/setup.py
saucetray/st2
8f507d6c8d9483c8371e386fe2b7998596856fd7
[ "Apache-2.0" ]
1
2022-03-31T03:53:22.000Z
2022-03-31T03:53:22.000Z
contrib/runners/announcement_runner/setup.py
saucetray/st2
8f507d6c8d9483c8371e386fe2b7998596856fd7
[ "Apache-2.0" ]
1
2019-10-11T14:42:28.000Z
2019-10-11T14:42:28.000Z
# -*- coding: utf-8 -*- # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os.path from setuptools import setup from setuptools import find_packages from dist_utils import fetch_requirements from dist_utils import apply_vagrant_workaround from announcement_runner import __version__ BASE_DIR = os.path.dirname(os.path.abspath(__file__)) REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt') install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE) apply_vagrant_workaround() setup( name='stackstorm-runner-announcement', version=__version__, description=('Announcement action runner for StackStorm event-driven automation platform'), author='StackStorm', author_email='info@stackstorm.com', license='Apache License (2.0)', url='https://stackstorm.com/', install_requires=install_reqs, dependency_links=dep_links, test_suite='tests', zip_safe=False, include_package_data=True, packages=find_packages(exclude=['setuptools', 'tests']), package_data={'announcement_runner': ['runner.yaml']}, scripts=[], entry_points={ 'st2common.runners.runner': [ 'announcement = announcement_runner.announcement_runner', ], } )
32.909091
95
0.749171
3d35477f9210e8a14b78209d2c470acec21ae950
6,104
py
Python
main.py
ramondfdez/kalman-track-TFG
f185ccf37dfa5fe27720a06583fcd6b7c64a7f03
[ "MIT" ]
null
null
null
main.py
ramondfdez/kalman-track-TFG
f185ccf37dfa5fe27720a06583fcd6b7c64a7f03
[ "MIT" ]
null
null
null
main.py
ramondfdez/kalman-track-TFG
f185ccf37dfa5fe27720a06583fcd6b7c64a7f03
[ "MIT" ]
null
null
null
import cv2 import numpy as np import os import math import sys import json import time import argparse import tensorflow as tf from mrcnn import utils from mrcnn import visualize import mrcnn.model as modellib from mrcnn.model import log from sort import Sort sys.path.append(os.path.join("coco/")) # Path dataset coco (Common Objects in Context) import coco parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", required=True, help="path to input image") args = vars(parser.parse_args()) # Directorio de logs MODEL_DIR = os.path.join("logs/") # Ruta del archivo de pesos de la red rcnn COCO_MODEL_PATH = os.path.join("coco/mask_rcnn_coco.h5") # Si no existe la ruta descargar los pesos a partir de la Release de Coco if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH) # Configuramos numero de GPUs e imagenes por GPU class InferenceConfig(coco.CocoConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 # Skip detections with < 95% confidence DETECTION_MIN_CONFIDENCE = 0.9 config = InferenceConfig() #Cargamos modelo RCNN (constructor) model = modellib.MaskRCNN( mode="inference", model_dir=MODEL_DIR, config=config ) #Volcamos los pesos en el modelo RCNN model.load_weights(COCO_MODEL_PATH, by_name=True) # Clases contempladas en la dataset de coco class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] def asig_color(): #Asigna un color a cada clase colors = [tuple(255 * np.random.rand(3)) for _ in range(512)] return colors colors= asig_color() # Lista de Colores def detecta_personas(boxes, masks, ids, scores, class_detected): personas = [] masks_personas = [] scores_personas = [] n_instances = len(boxes) if not n_instances: print('NO INSTANCES TO DISPLAY') else: for i in range(n_instances): if not np.any(boxes[i]): continue label = class_names[ids[i]] score = scores[i] if (label in class_detected): # Solo personas personas.append(boxes[i]) masks_personas.append(masks[i]) scores_personas.append(scores[i]) return personas, masks_personas, scores_personas def centros(boxes): centers = [] n_dim = len(boxes) for i in range(n_dim): (y1, x1, y2, x2) = boxes[i] x = int(round((x1 + x2)/2.0)) y = int(round((y1 + y2)/2.0)) centro = np.array([x,y]) centers.append(centro) return np.array(centers) # Variables previas writer = None min_distance = 50 tracker = Sort() input = str(args["input"]) video_in = "videos/" + input video_out = "output/" + input.replace(".mp4", "_out.avi") class_detected = 'person' # Detectaremos solo personas #Leemos video capture = cv2.VideoCapture(video_in) # Leemos video #Ajustamos la resolución de los frames a 720x480 capture.set(cv2.CAP_PROP_FRAME_WIDTH, 720) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) fps = capture.get(cv2.CAP_PROP_FPS) # Obtenermos fps print(fps) try: prop = cv2.CAP_PROP_FRAME_COUNT total = int(capture.get(prop)) print("[INFO] {} Frames totales: ".format(total)) # Exception por si no se pueden recuperar los frames except: print("[INFO] No se detectó el número de Frames") total = -1 ''' Bucle de imágenes ''' while True: ret, frame = capture.read() # Capturamos frame if ret == True: # En caso de capturarlo correctamente start = time.time() # Ponemos en marcha timer results = model.detect([frame], verbose=0) # Detectamos objetos r = results[0] # Solo una imagen personas, people_masks, people_scores = detecta_personas( r['rois'], r['masks'], r['class_ids'], r['scores'], class_detected) if (len(centers) > 0): trackers = tracker.update(personas,frame) for j in range(len(trackers)): y1, x1, y2, x2 = personas[j] d1 = abs(y1-c2) d2 = abs(x1-c1) #cv2.circle(frame,(c1,c2), 6, (0,0,0),-1) except: pass if (len(tracker.tracks[j].trace) > 1): x = int(tracker.tracks[j].trace[-1][0,0]) y = int(tracker.tracks[j].trace[-1][0,1]) tl = (x-10,y-10) br = (x+10,y+10) cv2.rectangle(frame,tl,br,colors[j],1) cv2.putText(frame,"Persona: " + str(tracker.tracks[j].trackId+1), (x-10,y-10),0, 1, colors[j],2) cv2.circle(frame,(x,y), 6, colors[j],-1) end = time.time() # Ponemos en marcha timer if writer is None: # Escribimos en el video de salida fourcc = cv2.VideoWriter_fourcc(*"MJPG") writer = cv2.VideoWriter(video_out, fourcc, fps, (frame.shape[1], frame.shape[0]), True) if total > 0: elap = (end - start) print("[INFO] Tiempo que ha tardado el frame: {:.4f} s".format(elap)) print("[INFO] Tiempo estimado: {:.4f}".format(elap * total)) writer.write(frame) # Escribimos en el disco else: break writer.release() capture.release()
30.52
106
0.629915
1d20073c036d909c85054129637ad7db68a00b4b
155
py
Python
src/sentry/mediators/external_requests/__init__.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
2
2019-03-04T12:45:54.000Z
2019-03-04T12:45:55.000Z
src/sentry/mediators/external_requests/__init__.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
196
2019-06-10T08:34:10.000Z
2022-02-22T01:26:13.000Z
src/sentry/mediators/external_requests/__init__.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
1
2017-02-09T06:36:57.000Z
2017-02-09T06:36:57.000Z
from __future__ import absolute_import from .select_requester import SelectRequester # NOQA from .issue_link_requester import IssueLinkRequester # NOQA
31
60
0.851613
50d4d86f469f4638f6bd31275c242deef74bbd42
454
py
Python
minos/migrations/0010_auto_20160713_2221.py
TomWerner/HawkeyeChallenge
69652978b017ebd443c807b8d0091ded2598c86c
[ "MIT" ]
1
2016-07-27T19:10:20.000Z
2016-07-27T19:10:20.000Z
minos/migrations/0010_auto_20160713_2221.py
TomWerner/HawkeyeChallenge
69652978b017ebd443c807b8d0091ded2598c86c
[ "MIT" ]
null
null
null
minos/migrations/0010_auto_20160713_2221.py
TomWerner/HawkeyeChallenge
69652978b017ebd443c807b8d0091ded2598c86c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-07-14 02:21 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('minos', '0009_auto_20160713_2123'), ] operations = [ migrations.AlterField( model_name='submission', name='language', field=models.CharField(max_length=20), ), ]
21.619048
50
0.618943
44bc96a290fb5e90f008bb903afda8d6a33eb4a6
1,024
py
Python
jirafs/commands/subtask.py
coddingtonbear/jirafs
2eeeeee2188f7945560008e76dfe2a15c4a45da3
[ "MIT" ]
119
2015-01-06T08:05:38.000Z
2022-03-22T21:19:50.000Z
jirafs/commands/subtask.py
coddingtonbear/jirafs
2eeeeee2188f7945560008e76dfe2a15c4a45da3
[ "MIT" ]
54
2015-03-18T20:37:48.000Z
2022-03-23T14:26:26.000Z
jirafs/commands/subtask.py
coddingtonbear/jirafs
2eeeeee2188f7945560008e76dfe2a15c4a45da3
[ "MIT" ]
16
2015-04-29T18:39:31.000Z
2020-09-23T12:30:49.000Z
from jirafs import utils from jirafs.plugin import CommandPlugin class Command(CommandPlugin): """Create a subtask of a given issue.""" MIN_VERSION = "2.0.0" MAX_VERSION = "3.0.0" def main(self, folder, args, **kwargs): summary = " ".join(args.summary) issue_data = { "project": {"key": folder.issue.fields.project.key}, "summary": summary, "issuetype": {"name": "Sub-task"}, "parent": {"id": folder.issue.key}, } if args.issuetype is not None: issue_data["issuetype"]["name"] = args.issuetype folder.jira.create_issue(fields=issue_data) commands = utils.get_installed_commands() jira = utils.lazy_get_jira() commands["fetch"].execute_command( [], jira=jira, path=folder.path, command_name="fetch", ) def add_arguments(self, parser): parser.add_argument("summary", nargs="+") parser.add_argument("--issuetype", default=None, type=str)
29.257143
66
0.59668
8598303acfafbc81aaf7689fea27cd9d5c38460f
32
py
Python
Python/cubesat2017/soft/embedded/cpu/boot.py
Misha91908/Portfolio
c10b06462ec45f039778c77aa6c84e871cac34f6
[ "MIT" ]
3
2017-09-03T17:17:44.000Z
2017-12-10T12:26:46.000Z
Python/cubesat2017/soft/embedded/cpu/boot.py
Misha91908/Portfolio
c10b06462ec45f039778c77aa6c84e871cac34f6
[ "MIT" ]
null
null
null
Python/cubesat2017/soft/embedded/cpu/boot.py
Misha91908/Portfolio
c10b06462ec45f039778c77aa6c84e871cac34f6
[ "MIT" ]
2
2017-10-01T01:10:55.000Z
2018-07-15T19:49:29.000Z
import pyb pyb.main('main.py')
8
19
0.6875
c2e5d37212e62ac6be89f006783fa635129f1882
2,836
py
Python
exec_helpers/_helpers.py
python-useful-helpers/exec-helpers
3e0adfa7dded72ac1c9c93bd88db070f4c9050b6
[ "Apache-2.0" ]
12
2018-03-23T23:37:40.000Z
2021-07-16T16:07:28.000Z
exec_helpers/_helpers.py
python-useful-helpers/exec-helpers
3e0adfa7dded72ac1c9c93bd88db070f4c9050b6
[ "Apache-2.0" ]
111
2018-03-26T14:10:52.000Z
2021-07-12T07:12:45.000Z
exec_helpers/_helpers.py
python-useful-helpers/exec-helpers
3e0adfa7dded72ac1c9c93bd88db070f4c9050b6
[ "Apache-2.0" ]
6
2018-03-26T13:37:21.000Z
2018-09-07T03:35:09.000Z
"""Shared helpers.""" from __future__ import annotations # Standard Library import functools import re import shlex import typing def string_bytes_bytearray_as_bytes(src: typing.Union[str, bytes, bytearray]) -> bytes: """Get bytes string from string/bytes/bytearray union. :param src: source string or bytes-like object :return: Byte string :rtype: bytes :raises TypeError: unexpected source type. """ if isinstance(src, bytes): return src if isinstance(src, bytearray): return bytes(src) if isinstance(src, str): return src.encode("utf-8") raise TypeError(f"{src!r} has unexpected type: not conform to Union[str, bytes, bytearray]") # pragma: no cover def _mask_command(text: str, rules: str) -> str: """Mask part of text using rules. :param text: source text :type text: str :param rules: regex rules to mask. :type rules: str :return: source with all MATCHED groups replaced by '<*masked*>' :rtype: str """ masked: typing.List[str] = [] # places to exclude prev = 0 for match in re.finditer(rules, text): for idx, _ in enumerate(match.groups(), start=1): start, end = match.span(idx) masked.append(text[prev:start]) masked.append("<*masked*>") prev = end masked.append(text[prev:]) return "".join(masked) def mask_command(text: str, *rules: typing.Optional[str]) -> str: """Apply all rules to command. :param text: source text :type text: str :param rules: regex rules to mask. :type rules: typing.Optional[str] :return: source with all MATCHED groups replaced by '<*masked*>' :rtype: str """ return functools.reduce(_mask_command, (rule for rule in rules if rule is not None), text) def cmd_to_string(command: typing.Union[str, typing.Iterable[str]]) -> str: """Convert command to string for usage with shell. :param command: original command. :type command: typing.Union[str, typing.Iterable[str]] :return: command as single string :rtype: str """ if isinstance(command, str): return command return " ".join(shlex.quote(elem) for elem in command) def chroot_command(command: str, chroot_path: typing.Optional[str] = None) -> str: """Prepare command for chroot execution. :param command: original command. :type command: str :param chroot_path: chroot path :type chroot_path: typing.Optional[str] :return: command to be executed with chroot rules if applicable :rtype: str """ if chroot_path and chroot_path != "/": chroot_dst: str = shlex.quote(chroot_path.strip()) quoted_command = shlex.quote(command) return f'chroot {chroot_dst} sh -c {shlex.quote(f"eval {quoted_command}")}' return command
29.852632
116
0.657616