blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf3e00e18c62ceff4ee2e392754a7ae771574aa9 | 5b0a71b1b9ff85ea107406f804936035af2352a3 | /question_ask/question_temp.py | bd7a0b8148e41585de7496596c82fa2e41183678 | [] | no_license | FantasticCode2019/kg--demo-Computer-network-course | 58d7c8763d3dc530dc0d833dad734db78b6ffc9b | e8024153c4473e67cd95695bf55071faa1e6d221 | refs/heads/master | 2023-04-12T05:09:59.828476 | 2021-05-10T01:19:46 | 2021-05-10T01:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,339 | py | # encoding=utf-8
"""
@author: xuzelin
@file: question_temp.py
@time: 2020/12/20
@desc:
设置问题模板,为每个模板设置对应的SPARQL语句。demo提供如下模板:
1. 某实体的兄弟关系有哪些
2. 某阶段之后是哪个阶段
3. 某实体包含了哪些实体
4. 与某实体内涵相同的是
5. 与某实体内涵相反的是
6. 某实体继承自哪个实体
7. 某实体参考自哪里/那本教程
8. 与某实体可以相互变换的实体有哪些
9. 与某实体有因果的实体有哪些?
10.某实体的某属性是什么
11.某实体是正确的吗?
"""
from refo import finditer, Predicate, Star, Any, Disjunction
import re
# TODO SPARQL前缀和模板
SPARQL_PREXIX = u"""
PREFIX : <http://www.semanticweb.org/yan/ontologies/2020/9/untitled-ontology-6#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
"""
SPARQL_SELECT_TEM = u"{prefix}\n" + \
u"SELECT DISTINCT {select} WHERE {{\n" + \
u"{expression}\n" + \
u"}}\n"
SPARQL_COUNT_TEM = u"{prefix}\n" + \
u"SELECT COUNT({select}) WHERE {{\n" + \
u"{expression}\n" + \
u"}}\n"
SPARQL_ASK_TEM = u"{prefix}\n" + \
u"ASK {{\n" + \
u"{expression}\n" + \
u"}}\n"
class W(Predicate):
def __init__(self, token=".*", pos=".*"):
self.token = re.compile(token + "$")
self.pos = re.compile(pos + "$")
super(W, self).__init__(self.match)
def match(self, word):
m1 = self.token.match(word.token)
m2 = self.pos.match(word.pos)
return m1 and m2
class Rule(object):
def __init__(self, condition_num, condition=None, action=None):
assert condition and action
self.condition = condition
self.action = action
self.condition_num = condition_num
def apply(self, sentence):
matches = []
for m in finditer(self.condition, sentence):
i, j = m.span()
matches.extend(sentence[i:j])
return self.action(matches), self.condition_num
class KeywordRule(object):
def __init__(self, condition=None, action=None):
assert condition and action
self.condition = condition
self.action = action
def apply(self, sentence):
matches = []
for m in finditer(self.condition, sentence):
i, j = m.span()
matches.extend(sentence[i:j])
if len(matches) == 0:
return None
else:
return self.action()
class QuestionSet:
def __init__(self):
pass
@staticmethod
def has_brother_question(word_objects):
"""
某实体的兄弟关系有哪些
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :兄弟关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_Successive_question(word_objects):
"""
某阶段之后是哪个阶段
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :前后继关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_contain_question(word_objects):
"""
某实体包含了哪些实体
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :包含关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_same_question(word_objects):
"""
与某实体内涵相同的是
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :同一关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_opposition_question(word_objects):
"""
与某实体内涵相反的是
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :对立关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_inherit_question(word_objects):
"""
某实体继承自哪个实体
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :前后继关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_reference_question(word_objects):
"""
某实体参考自哪里
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :参考关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_vary_question(word_objects):
"""
与某实体可以相互变换的实体有哪些
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :变换关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_karma_question(word_objects):
"""
与某实体有因果的实体有哪些?
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?y :名称 '{disanzhang}'." \
u"?y :因果关系 ?z." \
u"?z :名称 ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX,
select=select,
expression=e)
break
return sparql
@staticmethod
def has_basic_disanzhang_info_question(word_objects):
"""
某实体的某属性是什么
:param word_objects:
:return:
"""
keyword = None
for r in disanzhang_basic_keyword_rules:
keyword = r.apply(word_objects)
if keyword is not None:
break
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?s :名称 '{disanzhang}'." \
u"?s {keyword} ?x.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'), keyword=keyword)
sparql = SPARQL_SELECT_TEM.format(prefix=SPARQL_PREXIX, select=select, expression=e)
break
return sparql
@staticmethod
def is_ASKattribute_question(word_objects):
"""
某实体是正确的吗?
:param word_objects:
:return:
"""
sparql = None
for w in word_objects:
if w.pos == pos_disanzhang:
e = u"?s :名称 '{disanzhang}'." \
u"?s rdf:type :正确.".format(disanzhang=w.token.encode('utf-8').decode('utf-8'))
sparql = SPARQL_ASK_TEM.format(prefix=SPARQL_PREXIX, expression=e)
break
return sparql
class PropertyValueSet:
def __init__(self):
pass
@staticmethod
def return_dingyi_value():
return u':定义'
@staticmethod
def return_jieshao_value():
return u':介绍'
@staticmethod
def return_youdian_value():
return u':优点'
@staticmethod
def return_quedian_value():
return u':缺点'
@staticmethod
def return_zuoyong_value():
return u':作用'
@staticmethod
def return_juyou_value():
return u':具有'
@staticmethod
def return_neirong_value():
return u':内容'
@staticmethod
def return_biecheng_value():
return u':别称'
@staticmethod
def return_gongneng_value():
return u':功能'
@staticmethod
def return_baokuo_value():
return u':包括'
@staticmethod
def return_hanyi_value():
return u':含义'
@staticmethod
def return_shuyu_value():
return u':属于'
@staticmethod
def return_shuxing_value():
return u':属性'
@staticmethod
def return_xingzhi_value():
return u':性质'
@staticmethod
def return_yiyi_value():
return u':意义'
@staticmethod
def return_shijian_value():
return u':时间'
@staticmethod
def return_tezheng_value():
return u':特征'
@staticmethod
def return_tedian_value():
return u':特点'
@staticmethod
def return_zhuangtai_value():
return u':状态'
@staticmethod
def return_jiancheng_value():
return u':简称'
@staticmethod
def return_leixing_value():
return u':类型'
@staticmethod
def return_jibie_value():
return u':级别'
@staticmethod
def return_zucheng_value():
return u':组成'
@staticmethod
def return_jiegou_value():
return u':结构'
@staticmethod
def return_zhize_value():
return u':职责'
@staticmethod
def return_yingwen_value():
return u':英文'
@staticmethod
def return_biaodashi_value():
return u':表达式'
@staticmethod
def return_yaosu_value():
return u':要素'
@staticmethod
def return_guize_value():
return u':规则'
@staticmethod
def return_xiangjie_value():
return u':详解'
@staticmethod
def return_shiyi_value():
return u':释义'
@staticmethod
def return_lingyu_value():
return u':领域'
@staticmethod
def return_gainian_value():
return u':概念'
# TODO 定义关键词
pos_disanzhang = "nz"
disanzhang_entity = (W(pos=pos_disanzhang))
dingyi = W("定义")
jieshao = W("介绍")
youdian = W("优点")
quedian = W("缺点")
zuoyong = W("作用")
juyou = W("具有")
neirong = W("内容")
biecheng = W("别称")
gongneng = W("功能")
baokuo = W("包括")
hanyi = W("含义")
shuyu = W("属于")
shuxing = W("属性")
xingzhi = W("性质")
yiyi = W("意义")
shijian = W("时间")
tezheng = W("特征")
tedian = W("特点")
zhuangtai = W("状态")
jiancheng = W("简称")
leixing = W("类型")
jibie = W("级别")
zucheng = W("组成")
jiegou = W("结构")
zhize = W("职责")
yingwen = W("英文")
biaodashi = W("表达式")
yaosu = W("要素")
guize = W("规则")
xiangjie = W("详解")
shiyi = W("释义")
lingyu = W("领域")
gainian = W("概念")
attribute = (dingyi | jieshao | youdian | quedian | zuoyong | juyou
| neirong | biecheng | gongneng | baokuo | hanyi |
shuyu | shuxing | xingzhi | yiyi | shijian | tezheng |
tedian | zhuangtai | jiancheng | leixing | jibie |
zucheng | jiegou | zhize | yingwen | biaodashi |
yaosu | guize | xiangjie | shiyi | lingyu | gainian)
brother = W("兄弟")
Successive = W("阶段")
contain = W("包含")
connotation = W("内涵") | W("意思")
same = (W("相同") | W("一致") | W("一样") )
opposition = (W("相反") | W("对立") )
inherit = W("继承")
reference = W("参考")
vary = W("变换")
karma = W("因果")
zhengque = W("正确")
# TODO 问题模板/匹配规则
"""
1. 某实体的兄弟关系有哪些
2. 某阶段之后是哪个阶段
3. 某实体包含了哪些实体
4. 与某实体内涵相同的是
5. 与某实体内涵相反的是
6. 某实体继承自哪个实体
7. 某实体参考自哪里/那本教程
8. 与某实体可以相互变换的实体有哪些
9. 与某实体有因果的实体有哪些?
10.某实体的某属性是什么
11.某实体是正确的吗?
"""
rules = [
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + brother + Star(Any(), greedy=False), action=QuestionSet.has_brother_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + Successive + Star(Any(), greedy=False), action=QuestionSet.has_Successive_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + contain + Star(Any(), greedy=False), action=QuestionSet.has_contain_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + connotation + Star(Any(), greedy=False) + same + Star(Any(), greedy=False), action=QuestionSet.has_same_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + connotation + Star(Any(), greedy=False) + opposition + Star(Any(), greedy=False), action=QuestionSet.has_opposition_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + inherit + Star(Any(), greedy=False), action=QuestionSet.has_inherit_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + reference + Star(Any(), greedy=False),action=QuestionSet.has_reference_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + vary + Star(Any(), greedy=False), action=QuestionSet.has_vary_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + karma + Star(Any(), greedy=False), action=QuestionSet.has_karma_question),
Rule(condition_num=2, condition=disanzhang_entity + Star(Any(), greedy=False) + attribute + Star(Any(), greedy=False),action=QuestionSet.has_basic_disanzhang_info_question),
Rule(condition_num=3, condition=disanzhang_entity + Star(Any(), greedy=False) + zhengque + Star(Any(), greedy=False),action=QuestionSet.is_ASKattribute_question)
]
# TODO 具体的属性词匹配规则
disanzhang_basic_keyword_rules = [
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + dingyi + Star(Any(), greedy=False),action=PropertyValueSet.return_dingyi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + jieshao + Star(Any(), greedy=False),action=PropertyValueSet.return_jieshao_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + youdian + Star(Any(), greedy=False),action=PropertyValueSet.return_youdian_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + quedian + Star(Any(), greedy=False),action=PropertyValueSet.return_quedian_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + zuoyong + Star(Any(), greedy=False),action=PropertyValueSet.return_zuoyong_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + juyou + Star(Any(), greedy=False),action=PropertyValueSet.return_juyou_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + neirong + Star(Any(), greedy=False),action=PropertyValueSet.return_neirong_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + biecheng + Star(Any(), greedy=False),action=PropertyValueSet.return_biecheng_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + gongneng + Star(Any(), greedy=False),action=PropertyValueSet.return_gongneng_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + baokuo + Star(Any(), greedy=False),action=PropertyValueSet.return_baokuo_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + hanyi + Star(Any(), greedy=False),action=PropertyValueSet.return_hanyi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + shuyu + Star(Any(), greedy=False),action=PropertyValueSet.return_shuyu_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + shuxing + Star(Any(), greedy=False),action=PropertyValueSet.return_shuxing_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + xingzhi + Star(Any(), greedy=False),action=PropertyValueSet.return_xingzhi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + yiyi + Star(Any(), greedy=False),action=PropertyValueSet.return_yiyi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + shijian + Star(Any(), greedy=False),action=PropertyValueSet.return_shijian_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + tezheng + Star(Any(), greedy=False),action=PropertyValueSet.return_tezheng_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + tedian + Star(Any(), greedy=False),action=PropertyValueSet.return_tedian_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + zhuangtai + Star(Any(), greedy=False),action=PropertyValueSet.return_zhuangtai_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + jiancheng + Star(Any(), greedy=False),action=PropertyValueSet.return_jiancheng_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + leixing + Star(Any(), greedy=False),action=PropertyValueSet.return_leixing_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + jibie + Star(Any(), greedy=False),action=PropertyValueSet.return_jibie_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + zucheng + Star(Any(), greedy=False),action=PropertyValueSet.return_zucheng_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + jiegou + Star(Any(), greedy=False),action=PropertyValueSet.return_jiegou_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + zhize + Star(Any(), greedy=False),action=PropertyValueSet.return_zhize_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + yingwen + Star(Any(), greedy=False),action=PropertyValueSet.return_yingwen_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + biaodashi + Star(Any(), greedy=False),action=PropertyValueSet.return_biaodashi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + yaosu + Star(Any(), greedy=False),action=PropertyValueSet.return_yaosu_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + guize + Star(Any(), greedy=False),action=PropertyValueSet.return_guize_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + xiangjie + Star(Any(), greedy=False),action=PropertyValueSet.return_xiangjie_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + shiyi + Star(Any(), greedy=False),action=PropertyValueSet.return_shiyi_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + lingyu + Star(Any(), greedy=False),action=PropertyValueSet.return_lingyu_value),
KeywordRule(condition=disanzhang_entity + Star(Any(), greedy=False) + gainian + Star(Any(), greedy=False),action=PropertyValueSet.return_gainian_value),
]
| [
"1127485560@qq.com"
] | 1127485560@qq.com |
06e27836ec9ad1be7688bc3689db92957c6d74ec | 7f2aae79774d166173b2e003bf63c88369159d6f | /game_types/Board.py | fc76f18aafb07cf33d67913ed478c918b6a7d68d | [] | no_license | enisnazif/battleships | 6605df2c8581491b302daed26f0e697fcfb47cab | d99ecb60defb69996ac32cb3fe9661e263aec56a | refs/heads/master | 2021-06-11T13:59:25.088685 | 2019-05-19T10:53:12 | 2019-05-19T10:53:12 | 180,877,603 | 0 | 0 | null | 2021-05-06T19:33:56 | 2019-04-11T21:03:35 | Python | UTF-8 | Python | false | false | 5,196 | py | from typing import Tuple, List
from config import BOARD_SIZE
from exceptions import (
InvalidShotException,
InvalidShipPlacementException,
)
from game_types.Orientation import Orientation
from game_types.Point import Point
from game_types.Ship import Ship
from game_types.ShipType import ShipType
class Board:
def __init__(self, board_size=BOARD_SIZE):
assert board_size > 0
self.board_size = board_size
self._shot_locations = set()
self._all_ship_locations = set()
self._individual_ship_locations = dict() # A dict of sets - one for each ships
@property
def board(self):
return frozenset(
[
Point(x, y)
for x in range(self.board_size)
for y in range(self.board_size)
]
)
@property
def all_ship_locations(self):
return self._all_ship_locations
@property
def individual_ship_locations(self):
return self._individual_ship_locations
@property
def shot_locations(self):
return self._shot_locations
@all_ship_locations.setter
def all_ship_locations(self, value):
self._all_ship_locations = value
@individual_ship_locations.setter
def individual_ship_locations(self, value):
self._individual_ship_locations = value
@shot_locations.setter
def shot_locations(self, value):
self._shot_locations = value
def point_is_shot(self, point: Point):
"""
Checks to see if 'point' on the board has already been shot
:param point:
:return:
"""
return point in self.shot_locations
def is_board_lost(self):
"""
Returns true if the board is currently in a losing state for the owning player (i.e, all ships have been shot)
:return:
"""
return bool(self.all_ship_locations) and bool(
not self.all_ship_locations.difference(self.shot_locations)
)
def place_ship(self, ship: Ship, location: Point, orientation: Orientation) -> None:
"""
Places a ship at the given location / orientation
:param ship:
:param location:
:param orientation:
:return:
"""
ship_point_set = ship.get_points(location, orientation)
ship_type = ship.ship_type
if self.board.issuperset(
ship.get_points(location, orientation)
) and ship_point_set.isdisjoint(self.all_ship_locations):
self.all_ship_locations.update(ship_point_set)
self.individual_ship_locations[ship_type] = set(ship_point_set)
else:
raise InvalidShipPlacementException(f'Placement of {ship} at {location} in orientation {orientation.value} is invalid')
def shoot(self, point: Point) -> Tuple[bool, bool, ShipType]:
"""
Shoot the board location given by 'point'. Will raise ShotOffBoardException if 'point' is not on the board,
and PointAlreadyShotException if 'point'
has previously been shot
:param point:
:return:
"""
# Shot off board
if not self.point_in_board(point):
raise InvalidShotException(f'{point} is not on the board')
# Point has already been shot
elif self.point_is_shot(point):
raise InvalidShotException(f'{point} has already been shot')
else:
self.shot_locations.add(point)
is_hit = True if point in self.all_ship_locations else False
is_sunk = False
ship_sunk = None
if is_hit:
# find out which one of the ships was shot
for k, v in self.individual_ship_locations.items():
# if v was the ship that was shot
if point in v:
# remove the point from v
v.remove(point)
if len(v) == 0:
is_sunk = True
ship_sunk = k
return is_hit, is_sunk, ship_sunk
@staticmethod
def is_valid_ship_placement(placements: List[Tuple[Ship, Point, Orientation]]) -> bool:
"""
A static helper function that checks to see if ship placements are valid
:param placements:
:return:
"""
all_points = set()
for ship, point, orientation in placements:
all_points.update(ship.get_points(point, orientation))
# Check there are no overlapping placements
if not len(all_points) == sum([len(s[0]) for s in placements]):
return False
# Check all points are within the board
return all_points.issubset(set([Point(x, y) for x in range(BOARD_SIZE) for y in range(BOARD_SIZE)]))
@staticmethod
def point_in_board(point: Point):
"""
Checks to see if 'point' is within the board
:param point: Tuple
:return: bool
"""
return point in frozenset(
[
Point(x, y)
for x in range(BOARD_SIZE)
for y in range(BOARD_SIZE)
]
)
| [
"e.nazif@warwick.ac.uk"
] | e.nazif@warwick.ac.uk |
34b242e509f466f320688594ae1b456377f19fc0 | 3cd5de408139f0be09bf58ba406043819aca1d2c | /program1.py | 8500ead54844d796387e618284300bc569f77521 | [] | no_license | nadgabriel/first_repo | 79417e8ae9c73a00a7cd4ef0da836c109149761b | 500ea88fbd6da9c7cb60e363a9e733b9d5a6cb35 | refs/heads/master | 2023-04-01T14:08:43.941135 | 2020-12-23T22:30:25 | 2020-12-23T22:30:25 | 271,045,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # Do premennej mozme ulozit hocijaky datovy typ
pocet_jablk=2
pocet_hrusiek=3.4
number_of_windows=10
print("pocet_jabl = ", pocet_jablk)
# Premenna je case sensitive cize tieto dve premenne su rozdielne
cars=1
Cars=2
# Premennu mozme definovat aj niekolko krat
jablka=4
jablka=3
# Nazvy premennych mozu pozostavat len z malych a velkych pismen anglickej abecedy a podtrzniku _
toto_je_premenna = 4
# Vzdy pouzivajte nazvy ktore popisuju obsah premennej
pocet_byvalych_frajeriek = 12
# Medzi rovnasa premenou a cislom moze byt lubovolny pocet medzier. Ja odporucam nedavat ziadnu
pocet_zubov=32
pocet_zubov = 32
pocet_zubov = 32
print("pocet_zubov= ", pocet_zubov)
# Miso odporuca pouzivat anglicke nazvy premennych. Ked budete programovat pre firmy kazdy bude pouzivat anglicke nazvy
number_of_limbs=2 | [
"nadgabriell@gmail.com"
] | nadgabriell@gmail.com |
992131d644d1a04bd07ae41cf367774e1e0adef0 | fb7cb229a8f68f9ba3cc23ce51238008841516e8 | /Sensorslab2/Task2/first_pkg/first_pkg/publish_gps_messge.py | b99bf288a6eb3c6f23f35d8631db0fca8795ff89 | [] | no_license | RozanMagdy/ITI-Labs | 24852442c8cae3f9d0fe44e55e5995853f18a9b5 | 3e3a4b85a415492c6eb539c79be128504fefaf96 | refs/heads/master | 2023-06-04T18:07:58.256689 | 2021-06-17T11:43:30 | 2021-06-17T11:43:30 | 359,421,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,978 | py | #!/usr/bin/env python3
import rclpy
import csv
from rclpy.node import Node
from sensor_msgs.msg import NavSatFix
#TODO Import needed messages
class my_node (Node):
def __init__(self):
super().__init__("Node_name")
self.csv_file_path = "GGA_GST.csv"
self.lines = []
with open(self.csv_file_path, newline='\n') as csvfile:
self.readCSV = csv.reader(csvfile, delimiter = ',')
for row in self.readCSV:
self.lines.append(row)
self.count = 1 #Skip header
self.create_timer(5,self.timer_call)
self.obj_pub=self.create_publisher(NavSatFix,"fix",10)
def timer_call(self):
row = self.lines[self.count]
self.count +=1
if (self.count >= len(self.lines)): # repeat csv file continously
self.count = 0
#TODO get The following values from csv
latitude_value = row [2]
latitude_direction = row [3]
longitude_value = row [4]
longitude_direction = row [5]
altitude_value = row [9]
# The following functions convert the string data in degrees/minutes to float data in degrees as ROS message requires.
latitude = self.convert_latitude(latitude_value, latitude_direction)
longitude = self.convert_longitude(longitude_value, longitude_direction)
altitude = self.safe_float(altitude_value)
hdop = float(row[8])
lat_std_dev = float(row[21])
lon_std_dev = float(row[22])
alt_std_dev = float(row[23])
#TODO Fill the gps message and publish
current_fix = NavSatFix()
#current_fix.header.stamp = current_time
#current_fix.header.frame_id = frame_id
current_fix.latitude = latitude
current_fix.longitude = longitude
current_fix.altitude = altitude
current_fix.position_covariance[0] = (hdop * lon_std_dev) ** 2
current_fix.position_covariance[4] = (hdop * lat_std_dev) ** 2
current_fix.position_covariance[8] = (2 * hdop * alt_std_dev) ** 2
self._logger.info(str(current_fix))
self.obj_pub.publish(current_fix)
def convert_latitude(self, field_lat, lat_direction):
latitude = self.safe_float(field_lat[0:2]) + self.safe_float(field_lat[2:]) / 60.0
if lat_direction == 'S':
latitude = -latitude
return latitude
def convert_longitude(self, field_long, long_direction):
longitude = self.safe_float(field_long[0:2]) + self.safe_float(field_long[2:]) / 60.0
if long_direction == 'W':
longitude = -longitude
return longitude
def safe_float(self, field):
try:
return float(field)
except ValueError:
return float('NaN')
def main (args=None):
rclpy.init(args=args)
node=my_node()
rclpy.spin(node)
rclpy.shutdown()
if __name__=="__main__":
main()
| [
"rozanabdelmawla@gmail.com"
] | rozanabdelmawla@gmail.com |
224ec89ac931e042e1944042b2a3f733ec7c52c3 | 5894844ffd7edfe04047a1310535679904f23b31 | /app/api/serializers/lesson.py | c2e9d0608a6e2745d2a10a7d73fac58208370d86 | [] | no_license | kevbrygil/dacodes-API | 778108017584be01f88a7b74f4a5fbdd258ca886 | ec51c4e8103236e9302db419ab1bce1b14526ac1 | refs/heads/master | 2022-07-25T16:30:11.831860 | 2019-10-23T14:38:33 | 2019-10-23T14:38:33 | 216,649,135 | 0 | 0 | null | 2022-07-05T21:47:09 | 2019-10-21T19:29:20 | Python | UTF-8 | Python | false | false | 507 | py | from marshmallow import Schema, fields
from .validations.validators import validate_mandatory_courses, validate_mandatory_courses_code
class LessonSchema(Schema):
id = fields.Str()
name = fields.Str(required=True)
course_id = fields.Str()
description = fields.Str()
question_details = fields.Str()
code = fields.Str(required=True)
order = fields.Integer()
hours = fields.Integer()
score = fields.Integer(required=True)
aproval_score = fields.Integer(required=True)
| [
"tamayo_144@hotmail.com"
] | tamayo_144@hotmail.com |
ad3ee97e6a9f4657ce8b2dda99cafee6dda6323b | 2f7f6dda2a7fc1c949450e03cbd567ac7f9a46d9 | /tools/batch/submit-job.py | 9bbbc6fe81ee4b2045ef15802d86cbf469148ea1 | [
"Apache-2.0"
] | permissive | cuulee/gluon-nlp | fe31952925a483b14df4a9faaf1e5259805f9856 | 46c9d014ac0a022b208ce335cb6ee8a54771bae4 | refs/heads/master | 2022-12-19T16:00:32.269318 | 2020-10-22T03:33:47 | 2020-10-22T03:33:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | import argparse
import random
import re
import sys
import time
from datetime import datetime
import boto3
from botocore.compat import total_seconds
instance_type_info = {
'g4dn.4x': {
'job_definition': 'gluon-nlp-g4dn_4xlarge:5',
'job_queue': 'g4dn'
},
'g4dn.8x': {
'job_definition': 'gluon-nlp-g4dn_8xlarge:5',
'job_queue': 'g4dn'
},
'g4dn.12x': {
'job_definition': 'gluon-nlp-g4dn_12xlarge:5',
'job_queue': 'g4dn-multi-gpu'
},
'p3.2x': {
'job_definition': 'gluon-nlp-p3_2xlarge:5',
'job_queue': 'p3'
},
'p3.8x': {
'job_definition': 'gluon-nlp-p3_8xlarge:5',
'job_queue': 'p3-4gpu'
},
'p3.16x': {
'job_definition': 'gluon-nlp-p3_16xlarge:5',
'job_queue': 'p3-8gpu'
},
'p3dn.24x': {
'job_definition': 'gluon-nlp-p3_24xlarge:5',
'job_queue': 'p3dn-8gpu'
},
'c5n.4x': {
'job_definition': 'gluon-nlp-c5_4xlarge:3',
'job_queue': 'c5n'
},
'c5n.18x': {
'job_definition': 'gluon-nlp-c5_18xlarge:3',
'job_queue': 'c5n'
}
}
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--profile', help='profile name of aws account.', type=str,
default=None)
parser.add_argument('--region', help='Default region when creating new connections', type=str,
default=None)
parser.add_argument('--name', help='name of the job', type=str, default='dummy')
parser.add_argument('--job-type', help='type of job to submit.', type=str,
choices=instance_type_info.keys(), default='g4dn.4x')
parser.add_argument('--source-ref',
help='ref in GluonNLP main github. e.g. master, refs/pull/500/head',
type=str, default='master')
parser.add_argument('--work-dir',
help='working directory inside the repo. e.g. scripts/preprocess',
type=str, default='scripts/preprocess')
parser.add_argument('--saved-output',
help='output to be saved, relative to working directory. '
'it can be either a single file or a directory',
type=str, default='.')
parser.add_argument('--save-path',
help='s3 path where files are saved.',
type=str, default='batch/temp/{}'.format(datetime.now().isoformat()))
parser.add_argument('--command', help='command to run', type=str,
default='git rev-parse HEAD | tee stdout.log')
parser.add_argument('--remote',
help='git repo address. https://github.com/dmlc/gluon-nlp',
type=str, default="https://github.com/dmlc/gluon-nlp")
parser.add_argument('--wait', help='block wait until the job completes. '
'Non-zero exit code if job fails.', action='store_true')
parser.add_argument('--timeout', help='job timeout in seconds', default=None, type=int)
args = parser.parse_args()
session = boto3.Session(profile_name=args.profile, region_name=args.region)
batch, cloudwatch = [session.client(service_name=sn) for sn in ['batch', 'logs']]
def printLogs(logGroupName, logStreamName, startTime):
kwargs = {'logGroupName': logGroupName,
'logStreamName': logStreamName,
'startTime': startTime,
'startFromHead': True}
lastTimestamp = 0
while True:
logEvents = cloudwatch.get_log_events(**kwargs)
for event in logEvents['events']:
lastTimestamp = event['timestamp']
timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat()
print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message']))
nextToken = logEvents['nextForwardToken']
if nextToken and kwargs.get('nextToken') != nextToken:
kwargs['nextToken'] = nextToken
else:
break
return lastTimestamp
def nowInMillis():
endTime = long(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000
return endTime
def main():
spin = ['-', '/', '|', '\\', '-', '/', '|', '\\']
logGroupName = '/aws/batch/job'
jobName = re.sub('[^A-Za-z0-9_\-]', '', args.name)[:128] # Enforce AWS Batch jobName rules
jobType = args.job_type
jobQueue = instance_type_info[jobType]['job_queue']
jobDefinition = instance_type_info[jobType]['job_definition']
command = args.command.split()
wait = args.wait
parameters = {
'SOURCE_REF': args.source_ref,
'WORK_DIR': args.work_dir,
'SAVED_OUTPUT': args.saved_output,
'SAVE_PATH': args.save_path,
'COMMAND': args.command,
'REMOTE': args.remote
}
kwargs = dict(
jobName=jobName,
jobQueue=jobQueue,
jobDefinition=jobDefinition,
parameters=parameters,
)
if args.timeout is not None:
kwargs['timeout'] = {'attemptDurationSeconds': args.timeout}
submitJobResponse = batch.submit_job(**kwargs)
jobId = submitJobResponse['jobId']
print('Submitted job [{} - {}] to the job queue [{}]'.format(jobName, jobId, jobQueue))
spinner = 0
running = False
status_set = set()
startTime = 0
logStreamName = None
while wait:
time.sleep(random.randint(5, 10))
describeJobsResponse = batch.describe_jobs(jobs=[jobId])
status = describeJobsResponse['jobs'][0]['status']
if status == 'SUCCEEDED' or status == 'FAILED':
print('=' * 80)
print('Job [{} - {}] {}'.format(jobName, jobId, status))
if logStreamName:
startTime = printLogs(logGroupName, logStreamName, startTime) + 1
sys.exit(status == 'FAILED')
elif status == 'RUNNING':
logStreamName = describeJobsResponse['jobs'][0]['container']['logStreamName']
if not running:
running = True
print('\rJob [{}, {}] is RUNNING.'.format(jobName, jobId))
if logStreamName:
print('Output [{}]:\n {}'.format(logStreamName, '=' * 80))
if logStreamName:
startTime = printLogs(logGroupName, logStreamName, startTime) + 1
elif status not in status_set:
status_set.add(status)
print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),)
sys.stdout.flush()
spinner += 1
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | cuulee.noreply@github.com |
2018328ca867ccc87ff04f9b378c17cac512df31 | 747f759311d404af31c0f80029e88098193f6269 | /addons/sale_payment/sale_payment.py | 807c9c55bec7e7ec51aae95ef58e50ca79e28d7c | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | /home/openerp/production/extra-addons/sale_payment/sale_payment.py | [
"geerish@omerp.net"
] | geerish@omerp.net |
98d57a8478c682b8842dbf0da010110c3fcd6a8c | 90e10e5fe73272557b01bc600d3b0f1121b0ce0e | /worldbuildr/schema.py | 6a6350b487e09c3ac9177321e61102c1d591cc2a | [] | no_license | nanderv/worldbuildr | e18809599bfdc573e398cb35b65caca528f4a552 | 3c69ef51408350c9b215996d16ac4f63837785c2 | refs/heads/master | 2020-03-30T02:01:44.498920 | 2018-09-29T10:13:01 | 2018-09-29T10:13:01 | 150,608,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | import when.schema
import graphene
from graphene_django.debug import DjangoDebug
import who.schema
class Query(when.schema.Query, who.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| [
"nander@nander.net"
] | nander@nander.net |
a7f52a070ab9786932134e6185e25c4294abacda | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/113677/KaggleBillionWordImputation-master/scripts/test_to_train.py | d6a8b8242d2e01d61592d440427057247ee7db57 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/usr/bin/env python
'''Convert test file format to train file format'''
import sys
if __name__ == '__main__':
header = sys.stdin.readline()
for line in sys.stdin:
i, sentence = line.rstrip().split(',', 1)
print(sentence[1:-1].replace('""', '"')) | [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
996df91cc5baeba8d648a84074940c67fb599b8c | 9a811b17415c7a19731c42cffd18177f15adf56e | /list_all_pixels.py | efb3ef048aebe8777b914caa5513c8d0e8e93616 | [] | no_license | sanjitk7/ImageSpacialIntensityTransformations | 2e8fcd16e896374ecf05688e425b772b6d14bffc | 337c3661fdb631afe096b697c5ae064f49db8d46 | refs/heads/master | 2022-12-24T19:52:03.391437 | 2020-09-29T11:56:14 | 2020-09-29T11:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # Listing All The pixel Values of the Image
def list_all_pixels(im):
for i in range (im.size[0]):
for j in range(im.size[1]):
print("f(" + str(i) + "," + str(j)+") = " + str(pixelMap[i,j][0])) | [
"sanjitk2018@gmail.com"
] | sanjitk2018@gmail.com |
e9da2dde9e797e1e181779543be225efdc001fb3 | 0fe003ace0c47ff027de3c5497ef9bd04925939a | /day23.py | 0d5fffbeb3b042325ecfb43ede3588fa80f58148 | [] | no_license | russford/advent2016 | 132dde45adadf2c78e1ab9544af66a72541700ac | 1e92cf36a5870e05297ddba3d5d254213e4f0aaf | refs/heads/master | 2020-06-15T14:49:43.566431 | 2017-01-02T12:26:27 | 2017-01-02T12:26:27 | 75,284,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,291 | py | test_code = """cpy 2 a
tgl a
tgl a
tgl a
cpy 1 a
dec a
dec a"""
# cpy x y copies x (either an integer or the value of a register) into register y.
# inc x increases the value of register x by one.
# dec x decreases the value of register x by one.
# jnz x y jumps to an instruction y away (positive means forward; negative means backward), but only if x is not zero.
# For one-argument instructions, inc becomes dec, and all other one-argument instructions become inc.
# For two-argument instructions, jnz becomes cpy, and all other two-instructions become jnz.
# The arguments of a toggled instruction are not affected.
# If an attempt is made to toggle an instruction outside the program, nothing happens.
# If toggling produces an invalid instruction (like cpy 1 2) and an attempt is later made to execute that instruction, skip it instead.
# If tgl toggles itself (for example, if a is 0, tgl a would target itself and become inc a), the resulting instruction is not executed until the next time it is reached.
def toggle (instr):
instr = instr.split()
if len(instr) == 2:
if instr[0] == "inc":
return "dec " + instr[1]
else: return "inc " + instr[1]
if len(instr) == 3:
if instr[0] == "jnz":
return "cpy {} {}".format(instr[1], instr[2])
else:
return "jnz {} {}".format(instr[1], instr[2])
def val(v, registers):
if "a" <= v[0] <= "d":
return registers[v]
else:
return int(v)
def exec (code, code_ptr, registers):
if code_ptr == 2:
registers["a"] = registers["a"] * registers["b"]
registers["b"] -= 1
registers["c"] = 2 * registers["b"]
registers["d"] = 0
return 14
if code_ptr == 20:
registers["a"] += 95*96
return 6
instr = code[code_ptr].split()
if instr[0] == "cpy":
if instr[2] in registers:
registers[instr[2]] = val(instr[1], registers)
if instr[0] == "inc":
if instr[1] in registers:
registers[instr[1]] += 1
if instr[0] == "dec":
if instr[1] in registers:
registers[instr[1]] -= 1
if instr[0] == "jnz":
cmp = val(instr[1], registers)
if cmp != 0:
return val(instr[2], registers)
if instr[0] == "tgl":
jmp = val(instr[1], registers)
if code_ptr+jmp < len(code):
new_ins = toggle(code[code_ptr+jmp])
print ("toggled {}:{} to {}".format(code_ptr+jmp, code[code_ptr+jmp], new_ins))
code[code_ptr+jmp] = new_ins
if code_ptr+jmp == 18:
print ('\n'.join(code[16:]))
return 0
def run_code(code):
registers = {"a": 12, "b": 0, "c": 0, "d": 0}
code_ptr = 0
print(code)
i=0
while code_ptr < len(code):
jmp = exec(code, code_ptr, registers)
if code_ptr < 2 or 21 > code_ptr > 15 or code_ptr == 10 or code_ptr > 23:
print("{:>3}: {:<8} | {}".format(code_ptr, code[code_ptr], ' '.join(["{}:{:>5}".format(k,v) for k,v in sorted(registers.items())])))
if jmp == 0: jmp = 1
code_ptr += jmp
i += 1
print (sorted(registers.items()))
with open("day23.txt", "r") as f:
file_code = [l.strip('\n') for l in f.readlines()]
run_code(file_code)
| [
"russford@gmail.com"
] | russford@gmail.com |
34c4d58dbc00a029cccf06bca3604352c7a3dc0b | 833e9e3b34b271aa2522471bd0b281b892adff78 | /backend/forms.py | 9f1014a729fa0d32ce2cc205096f506180fa41c4 | [] | no_license | emilte/case | b3fcd869468e093ec754980824c6b155f283caa7 | 35eadb05bdd224f845353a952c9aa18b03d95591 | refs/heads/master | 2021-06-27T13:19:32.550253 | 2019-11-24T23:21:36 | 2019-11-24T23:21:36 | 223,599,299 | 0 | 0 | null | 2021-03-19T08:42:52 | 2019-11-23T14:10:19 | JavaScript | UTF-8 | Python | false | false | 2,377 | py | from django import forms
from urllib import request
from captcha.fields import ReCaptchaField
from django.conf import settings
def between(x, a, b):
return x >= a and x <= b
class Info(forms.Form):
applicant = forms.CharField(initial="emil", required=True, widget=forms.HiddenInput)
name = forms.CharField(initial="Emil Telstad", required=True, min_length=2)
email = forms.EmailField(initial="emil.telstad@gmail.com", required=True)
phone = forms.IntegerField(initial="41325358", required=True)
areacode = forms.CharField(initial="7051", required=False, min_length=4, max_length=4)
comment = forms.CharField(required=False, widget=forms.Textarea)
captcha = ReCaptchaField(
public_key=settings.RECAPTCHA_PUBLIC_KEY,
private_key=settings.RECAPTCHA_PRIVATE_KEY,
)
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs.update({'class': 'form-control'})
self.fields['name'].widget.attrs.update({'placeholder': 'Ola Nordmann'})
self.fields['email'].widget.attrs.update({'placeholder': 'navn@domene.no'})
self.fields['phone'].widget.attrs.update({'placeholder': '12345678'})
self.fields['areacode'].widget.attrs.update({'placeholder': '1234'})
def clean_phone(self):
data = self.cleaned_data['phone']
if between(data, 40000000, 49999999) or between(data, 90000000, 99999999):
return data
raise forms.ValidationError("Invalid Norwegian phone number")
def clean_areacode(self):
data = self.cleaned_data['areacode']
if not data: # Areacode is not required
return data
try: int(data)
except: raise forms.ValidationError("Areacodes contain only digits (0-9)")
if len(data) != 4:
raise forms.ValidationError("Norwegian areacodes contain exactly 4 digits")
resource = request.urlopen("https://www.bring.no/postnummerregister-ansi.txt")
encode = resource.headers.get_content_charset()
for line in resource:
line = line.decode(encode)
n = line.split('\t')[0]
if int(n) == int(data):
return data
raise forms.ValidationError("Areacode does not exist")
| [
"emil.telstad@gmail.com"
] | emil.telstad@gmail.com |
b17cc3de2699bc9c2e6d1909d80eeecd1bc07a76 | 62e27febb6c9891d02c792620ca2a8b3e4e8fd09 | /ai.py | e2ef16676cd84e6ca95beaf9071498d8365f2f5c | [] | no_license | ffidni/tictactoe | cc08cb736cc6eb277bee6c211c09aed0a7c6e006 | 125e9497ffa38abd72b9488b66945c5797c7790c | refs/heads/master | 2023-06-10T14:23:08.861220 | 2021-07-06T04:08:09 | 2021-07-06T04:08:09 | 375,467,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,477 | py | from random import choice
#Credit to divyesh072019 for this code
def isMovesLeft(board) :
for i in range(3) :
for j in range(3) :
if (not board[i][j]) :
return True
return False
def evaluate(b, bot, opponent) :
# Checking for Rows for X or O victory.
for row in range(3) :
if (b[row][0] == b[row][1] and b[row][1] == b[row][2]) :
if (b[row][0] == bot) :
return 10
elif (b[row][0] == opponent) :
return -10
# Checking for Columns for X or O victory.
for col in range(3) :
if (b[0][col] == b[1][col] and b[1][col] == b[2][col]) :
if (b[0][col] == bot) :
return 10
elif (b[0][col] == opponent) :
return -10
# Checking for Diagonals for X or O victory.
if (b[0][0] == b[1][1] and b[1][1] == b[2][2]) :
if (b[0][0] == bot) :
return 10
elif (b[0][0] == opponent) :
return -10
if (b[0][2] == b[1][1] and b[1][1] == b[2][0]) :
if (b[0][2] == bot) :
return 10
elif (b[0][2] == opponent) :
return -10
# Else if none of them have won then return 0
return 0
def minimax(board, depth, is_max, bot, opponent) :
score = evaluate(board, bot, opponent)
# If Maximizer has won the game return his/her
# evaluated score
if (score == 10) :
return score
# If Minimizer has won the game return his/her
# evaluated score
if (score == -10) :
return score
# If there are no more moves and no winner then
# it is a tie
if (isMovesLeft(board) == False) :
return 0
# If this maximizer's move
if (is_max) :
best = -1000
# Traverse all cells
for i in range(3) :
for j in range(3) :
# Check if cell is empty
if (not board[i][j]) :
# Make the move
board[i][j] = bot
# Call minimax recursively and choose
# the maximum value
best = max( best, minimax(board,
depth + 1,
not is_max, bot, opponent) )
# Undo the move
board[i][j] = ""
return best
# If this minimizer's move
else :
best = 1000
# Traverse all cells
for i in range(3) :
for j in range(3) :
# Check if cell is empty
if (not board[i][j]) :
# Make the move
board[i][j] = opponent
# Call minimax recursively and choose
# the minimum value
best = min(best, minimax(board, depth + 1, not is_max, bot, opponent))
# Undo the move
board[i][j] = ""
return best
# This will return the best possible move for the player
def find_best_move(board, bot, opponent, mark_count) :
best_val = -1000
best_move = (-1, -1)
board = [[box.text() for box in row] for row in board.values()]
if mark_count < 9:
if mark_count == 0:
i = choice([0, 2])
if i == 1:
j = choice([0, 1, 2])
else:
j = choice([0, 2])
return (i, j)
else:
for i in range(3) :
for j in range(3):
# Check if cell is empty
if not board[i][j]:
# Make the move
board[i][j] = bot
# compute evaluation function for this
# move.
move_val = minimax(board, 0, False, bot, opponent)
# Undo the move
board[i][j] = ""
# If the value of the current move is
# more than the best value, then update
# best/
if move_val > best_val:
best_move = (i, j)
best_val = move_val
return best_move
| [
"realityinaship@gmail.com"
] | realityinaship@gmail.com |
950ec69546022992c15201e1068609a18c452f15 | 835566e5f9ab26bf459c47678728719775d96c33 | /venv/lib/python2.7/codecs.py | a708b1569ffa72266e1aebcd14af601581eff916 | [] | no_license | BrandonWalk/bacon | a0e15898bcd8ba0ab2871a7021a0c0e92e58cf98 | 22e674da1bc94e7f608675a022b51c99e47e58e2 | refs/heads/master | 2020-04-18T20:20:59.896319 | 2019-01-26T20:32:33 | 2019-01-26T20:32:33 | 167,735,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | /Users/brandonwalker/anaconda2/lib/python2.7/codecs.py | [
"branwalker19@gmail.com"
] | branwalker19@gmail.com |
af1029b1cb2a722afefefe70be3427a8519bf118 | 10f3e91eb6b7d155f0421f836a2fd9696bd4f184 | /getWeather.py | 466b96ed80f369d7e097565df13eafab22a31e8e | [] | no_license | Claire0223/Python_demo | b61fdcd9448bc7c57a4f13241f48a732406f4ec3 | 9c266503defeca8fae717797482303e126c66ebf | refs/heads/master | 2022-11-15T04:06:19.964521 | 2020-07-10T06:09:58 | 2020-07-10T06:09:58 | 276,874,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | #coding=utf-8
# 微信聊天+天气预报机器人
# 和风天气https://dev.heweather.com/docs/legacy/api/s6
#
import requests
import json
def weather_forecast():
city=input('请输入想要查询的城市名称,如‘江门’:')
api='https://free-api.heweather.com/s6/weather/'
weather_type='forecast'
value={
'location':city,
'key':'63d7ffe16c3743e1af28b8ad4423e5af'
}
url=api+weather_type
weather_dict=requests.get(url,params=value).json()
return weather_dict
def get_data():
weather_dict=weather_forecast()
he_weather=weather_dict['HeWeather6']#['daily_forecast']#天气预报,list
cityname=he_weather[0]['basic']['location']
daily_forecast=he_weather[0]['basic']
for i in range(len(daily_forecast)):
date=daily_forecast[i]['date']
cond_txt_d=daily_forecast[i]['cond_txt_d']
cond_txt_n=daily_forecast[i]['cond_txt_n']
tmp_max=daily_forecast[i]['tmp_max']
tmp_min=daily_forecast[i]['tmp_min']
wind_dir=daily_forecast[i]['wind_dir']
weather_data=cityname+' '+date+' 白天天气:'+cond_txt_d+' 晚上天气:'+cond_txt_n+'\n最高温:'+ tmp_max +' 最低温:'+tmp_min+' 风向:'+wind_dir
print(weather_data)
return True
if __name__=="__main__":
# date=time.strftime('%Y-%m-%d',time.localtime())
# jinshanApi='http://open.iciba.com/dsapi?date='+date
# # print(jinshanApi)
# sentence=get_sentence(jinshanApi)
# sentenceDict=json.loads(sentence)
# content=sentenceDict['content']
# note=sentenceDict['note']
weather_forecast=get_data()
# print(type(weather_forecast))
print(weather_forecast)
| [
"1327686271@qq.com"
] | 1327686271@qq.com |
0bb8dade96e13574b8b889e8b669455156c204b5 | e3d756f8723dc133fc8ed8339ade09ed3fde4bfe | /src/0_CD/src/interpolate.py | 2e85244f686e5dbaed290eef016eaee049f4ab78 | [] | no_license | chomamat/fit-bp | 6efb89711a4070d6d6b0cbf0f9d4560af2b64a69 | ba8261ba07cd5c36683a30526126ccc0808caf03 | refs/heads/master | 2020-04-24T02:53:48.026649 | 2019-05-19T19:44:49 | 2019-05-19T19:44:49 | 171,653,155 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import getopt
import cv2 as cv
import numpy as np
import sys
import torch
import torch.nn as nn
from models.interpolation import Model
# Device for running computations
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Not computing gradients for better computationl performance
torch.set_grad_enabled(False)
# Parse script arguments
arg_weights = "data/interpolation85.pth"
arg_frame1 = "examples/interpolation/03_1.png"
arg_frame2 = "examples/interpolation/03_3.png"
arg_out = "examples/interpolation/out.png"
for opt, arg in getopt.getopt(sys.argv[1:], '', [ param[2:] + '=' for param in sys.argv[1::2] ])[0]:
if opt == '--model' and arg != '': arg_weights = arg
if opt == '--first' and arg != '': arg_frame1 = arg
if opt == '--second' and arg != '': arg_frame2 = arg
if opt == '--out' and arg != '': arg_out = arg
#######################################
def interpolate(arg_frame1, arg_frame2, arg_out):
# Read input images and check dimensions
img1 = cv.imread(arg_frame1, cv.IMREAD_GRAYSCALE).astype('float32') / 255.
img2 = cv.imread(arg_frame2, cv.IMREAD_GRAYSCALE).astype('float32') / 255.
assert img1.shape == img2.shape
shape = img1.shape
img1 = img1.reshape((1,1,shape[0],shape[1]))
img2 = img2.reshape((1,1,shape[0],shape[1]))
# Create input tensor and compute output tensor
tensor_in = torch.tensor( np.concatenate((img1,img2),axis=1) ).to(device)
tensor_out = model(tensor_in)
# Save output image from the output tensor
img_out = (tensor_out[0,0].cpu().detach().numpy() * 255).astype('int')
cv.imwrite(arg_out, img_out)
#######################################
# Create model for interpolation
model = Model().to(device)
model.load_state_dict(torch.load(arg_weights, map_location=device))
model.eval()
#######################################
if __name__ == '__main__':
interpolate(arg_frame1, arg_frame2, arg_out) | [
"chomamat@fit.cvut.cz"
] | chomamat@fit.cvut.cz |
7914eab270311d6a94213bb0d0fa5edfa4c36fb0 | 863d32f9adc6890600a7a114574be66e80dc4ec7 | /models/seg_model.py | 0e3d6fddf9a0d4b5e475694ffe2eb863038fda1d | [] | no_license | dsl2009/dsl_instance | 9e60dc36a3106a9500a9486208533c2eb23578ae | ca299c16feaf58eadfd21f282bf681194b6c118f | refs/heads/master | 2020-04-24T15:18:08.246023 | 2019-07-26T08:38:19 | 2019-07-26T08:38:19 | 172,060,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from models import resnet
import torch
from torch import nn
from torch.nn import functional as F
from layer import renet
class SegModel(nn.Module):
def __init__(self):
super(SegModel, self).__init__()
self.cnn = resnet.resnet50(pretrained=False)
self.cov1 = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, stride=1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
)
self.cov2 = nn.Sequential(
nn.Conv2d(768, 256, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.cov3 = nn.Sequential(
nn.Conv2d(320, 64, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.seg = nn.Conv2d(64, 1, kernel_size=3,padding=1, stride=1, bias=False)
self.edge = nn.Conv2d(64, 1, kernel_size=3, padding=1, stride=1, bias=False)
def forward(self, img):
x1, x2, x3 = self.cnn(img)
x3 = self.cov1(x3)
x3_up = F.interpolate(x3,scale_factor=2, mode='bilinear')
x2 = torch.cat([x3_up, x2],dim =1)
x2 = self.cov2(x2)
x2_up = F.interpolate(x2,scale_factor=2, mode='bilinear')
x1 = torch.cat([x2_up, x1],dim =1)
x1 = self.cov3(x1)
x0 = F.interpolate(x1,scale_factor=2, mode='bilinear')
seg = self.seg(x0)
edge = self.edge(x0)
return seg,edge
if __name__ == '__main__':
x = torch.randn(2,3,256,256).cuda()
md = SegModel().cuda()
md(x)
| [
"dsl"
] | dsl |
44f3dee156facd70866135a80c736611e2656831 | ce88c0222e5c770ecfc4e05bf61c55371e8d9a92 | /termext/abs_kw_pair.py | fdbe8a9e205b722f7db84f8d23657b3267f917af | [] | no_license | melsk125/ner | 31683f83fc6343a49421ae3879f5aae80c601267 | 77d9ccad029f1d5d9c916f5d3d73a7132a6e411a | refs/heads/master | 2021-01-10T21:59:30.940959 | 2012-04-01T08:44:33 | 2012-04-01T08:44:33 | 2,889,299 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | import lib
import sys
import re
from optparse import OptionParser
from nltk import word_tokenize
optionParser = OptionParser()
options, args = optionParser.parse_args()
if len(args) == 0:
raw = sys.stdin.read()
else:
f = open(args[0])
raw = f.read()
lines = lib.get_dat_sgml(raw)
"""
Assume the input is in the format
<Abstract text> <Count of keyword> <Keyword 1> ... <Keyword n>
Output
<Token> <Tag (BIO)> (If Tag==B <Abstract number> <Keyword number>)
"""
sys.stderr.write(str(len(lines)) + " entries\n")
for i in range(len(lines)):
if i % 100 == 0:
sys.stderr.write(str(i) + "/" + str(len(lines)) + "\n")
line = dict(lines[i])
if 'EKYWD' in line and 'EABST' in line:
abstract = line['EABST']
keywords = re.split('\t', line['EKYWD'])
abstract = word_tokenize(abstract)
output = []
keywords = [word_tokenize(keyword) for keyword in keywords]
j = 0
while j < len(abstract):
found = False
for k in range(len(keywords)):
keyword = keywords[k]
keyword_len = len(keyword)
if keyword_len > 0 and keyword == abstract[j:j+keyword_len]:
output.append((keyword[0], "B", k+1))
print keyword[0] + "\tB\t" + str(i+1) + "\t" + str(k+1)
for l in keyword[1:]:
output.append((l, "I", k+1))
print l + "\tI\t" + str(i+1) + "\t" + str(k+1)
found = True
j += keyword_len
if found:
break
if j >= len(abstract):
break
output.append((abstract[j], "O", 0))
print abstract[j] + "\tO\t" + str(i+1) + "\t0"
j += 1
sys.stderr.write("Finished\n")
| [
"mel.sk125@gmail.com"
] | mel.sk125@gmail.com |
aa7749e5a5c46e9b294ba65e63edbafd2bdc540c | e63e8963f36689e525876dd877017352e96df12d | /DFCM_Electricity.py | 00f385ba96cff565eea8439932912a7aba1a0fba | [] | no_license | FieldDoctor/DFCM | b966d01b945e8ba0d233cc353d0c9815c743f5e2 | 7da7eecbdfd2c3b34c92ef79029de47364f2eded | refs/heads/master | 2023-03-28T23:39:19.861009 | 2021-03-26T10:17:39 | 2021-03-26T10:17:39 | 321,042,829 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,511 | py | configure = {}
configure['SourceFilePath'] = './1-Electricity/1-temp.csv'
configure['InputFilePath'] = './1-Electricity/2plus-supervisedDataSet_zscore.csv'
configure['OutputFilePath'] = './1-Electricity/6-DFCM.csv'
configure['PltFilePath'] = './1-Electricity/6-DFCM/'
configure['AllAttributes'] = 8
configure['TargetAttributes'] = 3
configure['InputAttributes'] = [1,2,3,4,5,6,7]
configure['OutputAttributes'] = [13,14,15]
configure['TimeAttributes'] = [0]
configure['Length'] = 21899
configure['global_epochs'] = 400
configure['f_batch_size'] = 25000
configure['f_epochs'] = 15
configure['hidden_layer'] = 10
configure['n_batch_size'] = 25000
configure['n_epochs'] = 15
configure['LSTM_hiddenDim'] = 15
import os
import time
from pandas import DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
from keras.layers import Dense, LSTM, Dropout
from keras.models import Sequential
from keras import optimizers
#optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
global_epochs = configure['global_epochs']
f_batch_size = configure['f_batch_size']
f_epochs = configure['f_epochs']
hidden_layer = configure['hidden_layer']
n_batch_size = configure['n_batch_size']
n_epochs = configure['n_epochs']
LSTM_hiddenDim = configure['LSTM_hiddenDim']
# 函数 - sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print(path + ' 创建成功')
return True
else:
print(path + ' 目录已存在')
return False
mkdir(configure['PltFilePath'])
# 加载数据集
dataset = pd.read_csv(configure['InputFilePath'])
# 狗造训练集(70%)和测试集(30%)
values = dataset.values
n_train = int(0.7 * configure['Length'])
train = values[:n_train, :]
test = values[n_train:, :]
train_X, train_Y = sigmoid( train[:, configure['InputAttributes']] ) , train[:,configure['OutputAttributes']]
test_X, test_Y = sigmoid( test[:, configure['InputAttributes']] ), test[:,configure['OutputAttributes']]
train_U = train[:,configure['TimeAttributes']]
train_U = train_U.reshape((train_U.shape[0], 1, train_U.shape[1]))
test_U = test[:,configure['TimeAttributes']]
test_U = test_U.reshape((test_U.shape[0], 1, test_U.shape[1]))
print('Train dataset length : ' + str(len(train)) + '.')
print('Test dataset length : ' + str(len(test)) + '.')
print('------')
print('X dim : ' + str(train_X.shape[1]) + '.')
print('Y dim : ' + str(train_Y.shape[1]) + '.')
print('------')
print('train_X shape : ' + str(train_X.shape))
print('train_Y shape : ' + str(train_Y.shape))
print('train_U shape : ' + str(train_U.shape))
print('------')
print('test_X shape : ' + str(test_X.shape))
print('test_Y shape : ' + str(test_Y.shape))
print('test_U shape : ' + str(test_U.shape))
# 设计DFCM_3网络
model_f = [0 for i in range(len(configure['OutputAttributes']))]
for i in range(len(configure['OutputAttributes'])):
model_f[i] = Sequential()
model_f[i].add(Dense(hidden_layer, input_dim=train_X.shape[1], activation='relu', use_bias=False))
#model_f[i].add(Dense(hidden_layer, input_dim=hidden_layer, activation='relu', use_bias=False))
#model_f[i].add(Dense(hidden_layer, input_dim=hidden_layer, activation='relu', use_bias=False))
model_f[i].add(Dense(1, input_dim=hidden_layer, use_bias=False))
model_f[i].compile(loss='mean_squared_error', optimizer='adam')
model_u = [0 for i in range(len(configure['OutputAttributes']))]
for i in range(len(configure['OutputAttributes'])):
model_u[i] = Sequential()
model_u[i].add(LSTM(LSTM_hiddenDim, input_shape=(train_U.shape[1], train_U.shape[2])))
model_u[i].add(Dense(1, input_dim=LSTM_hiddenDim, use_bias=True))
model_u[i].compile(loss='mean_squared_error', optimizer='adam')
for i in range(global_epochs):
start = time.time()
if i == 0:
y_f = train_Y
else:
y_f = train_Y - y_u_predict
for j in range(len(configure['OutputAttributes'])):
model_f[j].fit(train_X, y_f[:, j], f_batch_size, f_epochs, verbose=0, shuffle=False)
y_f_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
y_f_predict[str(j)] = model_f[j].predict(train_X).reshape(-1)
y_f_predict = y_f_predict.values
y_u = train_Y - y_f_predict
# for j in range(len(configure['OutputAttributes'])):
# print('f' + str(j + 1) + ' : ' + str(model_f[j].evaluate(train_X,y_f[:,j],verbose=2)))
# print('The ' + str(i + 1) + ' times f() training finished. loss:' + str( pow(abs(y_u), 2).mean().mean() ))
for j in range(len(configure['OutputAttributes'])):
model_u[j].fit(train_U, y_u[:, j], n_batch_size, n_epochs, verbose=0)
y_u_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
y_u_predict[str(j)] = model_u[j].predict(train_U).reshape(-1)
y_u_predict = y_u_predict.values
# for j in range(len(configure['OutputAttributes'])):
# print('u' + str(j + 1) + ' : ' + str(model_u[j].evaluate(train_U, y_u[:,j],verbose=2)))
# print('The ' + str(i + 1) + ' times u() training finished. loss:' + str( pow(abs(train_Y - y_u_predict), 2).mean().mean() ))
# evaluate
yhat_f_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
yhat_f_predict[str(j)] = model_f[j].predict(test_X).reshape(-1)
yhat_f_predict = yhat_f_predict.values
yhat_u_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
yhat_u_predict[str(j)] = model_u[j].predict(test_U).reshape(-1)
yhat_u_predict = yhat_u_predict.values
predict_train = y_u_predict + y_f_predict
predict_test = yhat_u_predict + yhat_f_predict
real_train = train_Y
real_test = test_Y
error_train = pow(abs(real_train - predict_train), 2)
error_test = pow(abs(real_test - predict_test), 2)
# print('The ' + str(i + 1) + ' times train error: ' + str(error_train.mean().mean()))
# print('The ' + str(i + 1) + ' times test error: ' + str(error_test.mean().mean()))
print(i + 1, error_train.mean().mean(), error_test.mean().mean())
if (error_test.mean().mean() < 0.125):
break
# print('This epoch TimeCost:' + str(time.time()-start) + 's.')
# 预测 & 输出
yhat_f_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
yhat_f_predict[str(j)] = model_f[j].predict(test_X).reshape(-1)
yhat_f_predict = yhat_f_predict.values
yhat_u_predict = DataFrame()
for j in range(len(configure['OutputAttributes'])):
yhat_u_predict[str(j)] = model_u[j].predict(test_U).reshape(-1)
yhat_u_predict = yhat_u_predict.values
yhat = yhat_u_predict + yhat_f_predict
DataFrame(yhat).to_csv(configure['OutputFilePath'],index=False)
for j in range(len(configure['OutputAttributes'])):
model_f[j].save(configure['PltFilePath'] + 'model_f_' + str(j+1) + '.h5')
model_u[j].save(configure['PltFilePath'] + 'model_u_' + str(j+1) + '.h5')
# 数据概览 - 1
values = yhat
original = test_Y
# 指定要绘制的列
groups = list(range(configure['TargetAttributes']))
i = 1
# 绘制每一列
plt.figure(figsize=(15,15))
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(original[:, group])
plt.plot(values[:, group])
i += 1
plt.savefig(configure['PltFilePath'] + 'performance.png')
plt.show() | [
"963138743@qq.com"
] | 963138743@qq.com |
d49733bfac92a4f491e624790358f0aa6cb9d05f | a65cdc270f7c900c8f0dce75c88f4eb23bfcd856 | /tryzero.py | 77cf02d8b20fd5a1942b87b1b5e7e16c09235699 | [] | no_license | noufila/python-programs | a31ff0916d987f8307f809c12c44d11989245a0a | 8ddfeeb0aae757bdf4e269cb28b55271f3888726 | refs/heads/master | 2020-03-28T01:25:00.730879 | 2018-09-11T10:37:19 | 2018-09-11T10:37:19 | 147,503,389 | 0 | 0 | null | 2018-09-11T10:37:20 | 2018-09-05T10:51:57 | Python | UTF-8 | Python | false | false | 187 | py | try:
n=int(input("enter a number"))
n1=int(input("enter a number"))
print(n/n1)
except ZeroDivisionError as err:
print("second number cannot be zero")
print(err) | [
"noreply@github.com"
] | noufila.noreply@github.com |
485d3cc56b43af702b13d75f3c85981c119aa6fc | f8908de51fdee29875c7720efb3ef1584328086b | /tools/RemywikiSonglistScraper.py | 491726b3a629c4ad4878e48d975681e731ccc1ae | [
"MIT"
] | permissive | cyberkitsune/DDRGenie | 634e2e24323022181ed39a541d6594db958bcb16 | 6d2a78c84e33049c1541d761744da0868f23e0bb | refs/heads/master | 2022-08-07T09:52:17.850326 | 2022-07-25T04:16:21 | 2022-07-25T04:16:21 | 241,182,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | import sys, requests
import wikitextparser as wtp
base_uri = 'https://remywiki.com'
query = '/api.php?action=query&prop=revisions&titles=%s&formatversion=2&redirects=1&rvprop=content&rvslots=*&format=json'
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: RemywikiSonglistScraper.py [Page_Name]")
exit(1)
page = sys.argv[1]
page = page.replace(' ','_')
print(page)
final_uri = "%s%s" % (base_uri, query % page)
r = requests.get(final_uri)
if r.status_code != 200:
print("Failure getting URI...")
exit(1)
j = r.json()
content = j['query']['pages'][0]['revisions'][0]['slots']['main']['content']
songs = []
parsed = wtp.parse(content)
lists = parsed.get_lists()
for list in lists:
for item in list.items:
# Weird hack to make sure we're the only newline in town
songs.append("%s\n" % wtp.remove_markup(item).strip('\n').lstrip())
with open("%s.txt" % page, 'w', encoding='utf-8') as f:
f.writelines(songs)
print("Output: ", "%s.txt" % page)
| [
"cyberkitsune09@gmail.com"
] | cyberkitsune09@gmail.com |
87cb6e36d3ce8f25552e58055a81a96c81d016d0 | 9994911f0ff388c92c21ca8178eec2d3af57082d | /teamup/cli.py | 8379e8bc873e2b905aca6bd2f170758de61ca15c | [
"MIT"
] | permissive | BruceEckel/TeamUp | 2809b36b8946b51bf96fcc113ef24ef02508f3c9 | 23e29301b462c329ad17253b4d4fb7f56fb7881b | refs/heads/master | 2023-01-05T19:06:21.010258 | 2022-12-26T23:30:44 | 2022-12-26T23:30:44 | 127,565,232 | 7 | 1 | MIT | 2022-12-26T23:30:45 | 2018-03-31T19:42:07 | Python | UTF-8 | Python | false | false | 1,527 | py | # -*- coding: utf-8 -*-
"""
Combine people for group activities
"""
from pathlib import Path
import os, sys
import click
import webbrowser
from teamup.pairings import Pairings
from teamup.PersistentLoopCounter import PersistentLoopCounter
attendees = Path("Attendees.txt")
html = Path() / "html"
@click.group()
@click.version_option()
def main():
"""
Generates and displays all combinations of 2-person teams using a
round-robin algorithm. Requires an Attendees.txt file containing
one name per line. Remove the 'html' directory to restart.
"""
def display(index):
pairing = html / f"pairing{index}.html"
assert pairing.exists()
webbrowser.open_new_tab(pairing)
@main.command()
def current():
"""
Show current teams
"""
if not attendees.exists():
print("Attendees.txt not found")
sys.exit(1)
pairings = Pairings.from_file(Path("Attendees.txt"))
if not html.exists():
pairings.create_html_files()
PersistentLoopCounter.create(html, pairings.bound)
display(PersistentLoopCounter.get(html).index())
@main.command()
def next():
"""
Moves to next team grouping and shows
"""
if not html.exists():
print("No 'html' directory, first run 'teamup current'")
sys.exit(1)
display(PersistentLoopCounter.get(html).next())
# @main.command()
# def clean():
# """
# Erases the 'html' directory
# """
# if html.exists():
# html.unlink()
if __name__ == "__main__":
main()
| [
"mindviewinc@gmail.com"
] | mindviewinc@gmail.com |
051eb317acccff8a7d27506a3e72e3c1e18d19f3 | ebce276eb1e7391fd33ce3b6488846c9907b889e | /mymodule_demo.py | 77859133138abb9e4d3670598557130e5212f278 | [] | no_license | junlongsun/PythonDemo | 9630eec7ff3de5ee92ae2d2f00906a9155e7c4bb | 086d72ae3228756fd3155ba1a3f1128be534c317 | refs/heads/master | 2016-08-06T06:07:46.951234 | 2015-08-29T19:02:52 | 2015-08-29T19:02:52 | 41,603,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/python
# Filename: mymodule_demo.py
import mymodule
dir(mymodule)
mymodule.sayhi()
print 'Version', mymodule.version
| [
"junlong.sun@colorado.edu"
] | junlong.sun@colorado.edu |
2665b0d21ad75e4516c94f4328876d29cfbd5752 | 5c52589d28b48539eacf034bb3eaf2ab7efbed58 | /venv/Scripts/pip-script.py | eef5a04da23847758712b0c627c4d6c93ac05638 | [] | no_license | ShaeLin983/pythonTestProject | 9a96844d69b23af6779c88afdac5273e8ca83f36 | 788de2be7696028552dd9316d74de2ab77363d53 | refs/heads/master | 2020-06-09T17:03:33.331876 | 2019-07-01T13:10:32 | 2019-07-01T13:10:32 | 193,473,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!f:\PycharmProjects\pythonTestProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"linx0220@163.com"
] | linx0220@163.com |
6e99c540a9920a214daca8c0db37c178bf0617b8 | 6a2a0ba0b3bb304fe8c844474f874619dd5b7df4 | /week1/ex8.py | 247891629a29d1b0bf731d0d4d7d07e8d5f97038 | [] | no_license | pbenipal61/iot-data-analysis | 583e1491c0819fd10c7e07b31d93c5ef11f37d57 | 7e2f3ed4b85f83cf6bc9e3ccf1d8b250ead0e995 | refs/heads/master | 2022-12-15T07:00:25.305896 | 2020-09-17T21:18:04 | 2020-09-17T21:18:04 | 291,765,540 | 0 | 0 | null | 2020-09-17T21:18:05 | 2020-08-31T16:20:14 | Python | UTF-8 | Python | false | false | 88 | py | import numpy as np
matrix = np.reshape(np.arange(100, 200, 10), (5, 2) )
print(matrix) | [
"t8sipr00@students.oamk.fi"
] | t8sipr00@students.oamk.fi |
15974039e082f50a6ca79584bc79968741955199 | dbdc26d866057457f2e511bd881148faf2996643 | /old/refers/_search_word_old.py | e17cdb015856eb428308920e267259d06a14fd47 | [] | no_license | yzyDavid/furigana | 2dc3376e8779ea3cfed57b6fdb4f6d31ffe68df4 | cc72db866d539687532808d69d6be5ac1a95443e | refs/heads/master | 2021-01-10T00:58:37.260389 | 2018-04-04T06:16:03 | 2018-04-04T06:16:03 | 51,136,928 | 0 | 1 | null | 2018-04-04T06:16:04 | 2016-02-05T09:14:27 | Python | UTF-8 | Python | false | false | 1,856 | py | # -*- coding:utf-8 -*-
# import urllib.request as ur
# import codecs
import requests
import re
DEBUG = False
BASIC_URL = r'http://dict.hjenglish.com/jp/jc/'
# def search_word(word):
# basic_url = r'http://dict.hjenglish.com/jp/jc/'
# search_url = basic_url + word
# #search_url = search_url.encode('ascii')
# fp = ur.urlopen(search_url)
# html_str = fp.read().decode('utf-8')
# print(html_str)
def search_word(word):
search_url = BASIC_URL + word
r = requests.get(search_url)
content_str = r.content.decode('utf-8')
content_str = re.sub('\n', '', content_str)
content_str = ''.join(content_str.split())
if DEBUG:
'''
print(search_url)
print(r.url)
print(content_str)
print(r.encoding)
'''
with open('out.txt', 'w', encoding='utf-8') as fp:
fp.write(content_str)
if DEBUG:
with open('../../res/html_part.txt', encoding='utf-8') as fpsaved:
content_str = fpsaved.readline()
kana = ''
# re1_str = r'([/u2E80-/u9FFF]+)'
re1_str = '假名">【([/u2E80-/u9FFF]+)】<'
re1_str = 'title="假名">【(.*?)】<'
# re1_str = r'<span id="kana_1" class="trs_jp bold" title="假名">【(\w+)】</span>'
re2_str = '<span id="kana_1" class="trs_jp bold" title="假名"><font color="red">【(\S+)】</font></span>'
m1 = re.search(re1_str, content_str)
c1 = re.compile(re1_str, re.MULTILINE)
res1 = c1.search(content_str)
m2 = re.search(re2_str, content_str)
print(type(m1))
print(type(res1))
print(c1.flags)
print(re.findall(re1_str, content_str))
print(res1.groups())
print(m1.group(0))
print(m1.start(1))
print(m1.groups())
# print(m2.group(1))
'''
[/u2E80-/u9FFF]+
'''
| [
"yzyDavid@qq.com"
] | yzyDavid@qq.com |
1c5daec5e4fda16f1120b32e7f9d688b02254b60 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/IB-DHCPONE-MIB.py | aea222e97e72ae77fa4c45e1500e93446cf69240 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 11,349 | py | #
# PySNMP MIB module IB-DHCPONE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IB-DHCPONE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:50:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
IbString, IbIpAddr, ibDHCPOne = mibBuilder.importSymbols("IB-SMI-MIB", "IbString", "IbIpAddr", "ibDHCPOne")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Gauge32, ModuleIdentity, IpAddress, Integer32, Counter32, ObjectIdentity, TimeTicks, MibIdentifier, Unsigned32, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Gauge32", "ModuleIdentity", "IpAddress", "Integer32", "Counter32", "ObjectIdentity", "TimeTicks", "MibIdentifier", "Unsigned32", "iso", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ibDhcpModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1))
ibDhcpModule.setRevisions(('2010-03-23 00:00', '2008-02-14 00:00', '2005-01-10 00:00', '2004-05-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibDhcpModule.setRevisionsDescriptions(('Fixed smilint errors', 'change ibDHCPSubnetPercentUsed syntax', 'Added copyright', 'Creation of the MIB file',))
if mibBuilder.loadTexts: ibDhcpModule.setLastUpdated('201003230000Z')
if mibBuilder.loadTexts: ibDhcpModule.setOrganization('Infoblox')
if mibBuilder.loadTexts: ibDhcpModule.setContactInfo('See IB-SMI-MIB for information.')
if mibBuilder.loadTexts: ibDhcpModule.setDescription('This file defines the Infoblox DHCP One MIB.')
ibDHCPSubnetTable = MibTable((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1), )
if mibBuilder.loadTexts: ibDHCPSubnetTable.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetTable.setDescription('A table of DHCP Subnet statistics.')
ibDHCPSubnetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1), ).setIndexNames((0, "IB-DHCPONE-MIB", "ibDHCPSubnetNetworkAddress"))
if mibBuilder.loadTexts: ibDHCPSubnetEntry.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetEntry.setDescription('A conceptual row of the ibDHCPSubnetEntry containing info about a particular network using DHCP.')
ibDHCPSubnetNetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 1), IbIpAddr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetNetworkAddress.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetNetworkAddress.setDescription('DHCP Subnet in IpAddress format. A subnetwork may have many ranges for lease.')
ibDHCPSubnetNetworkMask = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 2), IbIpAddr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetNetworkMask.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetNetworkMask.setDescription('DHCP Subnet mask in IpAddress format.')
ibDHCPSubnetPercentUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetPercentUsed.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetPercentUsed.setDescription('Percentage of dynamic DHCP address for subnet leased out at this time. Fixed addresses are always counted as leased for this calcuation if the fixed addresses are within ranges of leases.')
ibDHCPStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3))
ibDhcpTotalNoOfDiscovers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfDiscovers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfDiscovers.setDescription('This variable indicates the number of discovery messages received')
ibDhcpTotalNoOfRequests = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfRequests.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfRequests.setDescription('This variable indicates the number of requests received')
ibDhcpTotalNoOfReleases = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfReleases.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfReleases.setDescription('This variable indicates the number of releases received')
ibDhcpTotalNoOfOffers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfOffers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfOffers.setDescription('This variable indicates the number of offers sent')
ibDhcpTotalNoOfAcks = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfAcks.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfAcks.setDescription('This variable indicates the number of acks sent')
ibDhcpTotalNoOfNacks = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfNacks.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfNacks.setDescription('This variable indicates the number of nacks sent')
ibDhcpTotalNoOfDeclines = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfDeclines.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfDeclines.setDescription('This variable indicates the number of declines received')
ibDhcpTotalNoOfInforms = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfInforms.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfInforms.setDescription('This variable indicates the number of informs received')
ibDhcpTotalNoOfOthers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfOthers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfOthers.setDescription('This variable indicates the number of other messages received')
ibDhcpDeferredQueueSize = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpDeferredQueueSize.setStatus('current')
if mibBuilder.loadTexts: ibDhcpDeferredQueueSize.setDescription('The size of deferred dynamic DNS update queue')
ibDHCPDDNSStats = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5))
ibDHCPDDNSAvgLatency5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency5.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 5 minutes')
ibDHCPDDNSAvgLatency15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency15.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 15 minutes')
ibDHCPDDNSAvgLatency60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency60.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 60 minutes')
ibDHCPDDNSAvgLatency1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency1440.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 1 day')
ibDHCPDDNSTimeoutCount5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount5.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 5 minutes')
ibDHCPDDNSTimeoutCount15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount15.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 15 minutes')
ibDHCPDDNSTimeoutCount60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount60.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 60 minutes')
ibDHCPDDNSTimeoutCount1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount1440.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 1 day')
mibBuilder.exportSymbols("IB-DHCPONE-MIB", ibDhcpTotalNoOfAcks=ibDhcpTotalNoOfAcks, ibDhcpTotalNoOfOthers=ibDhcpTotalNoOfOthers, ibDHCPSubnetNetworkAddress=ibDHCPSubnetNetworkAddress, ibDHCPDDNSAvgLatency5=ibDHCPDDNSAvgLatency5, ibDhcpTotalNoOfReleases=ibDhcpTotalNoOfReleases, ibDhcpTotalNoOfInforms=ibDhcpTotalNoOfInforms, ibDHCPDDNSTimeoutCount5=ibDHCPDDNSTimeoutCount5, ibDhcpTotalNoOfOffers=ibDhcpTotalNoOfOffers, ibDhcpTotalNoOfRequests=ibDhcpTotalNoOfRequests, ibDHCPSubnetTable=ibDHCPSubnetTable, ibDHCPStatistics=ibDHCPStatistics, ibDHCPDDNSAvgLatency60=ibDHCPDDNSAvgLatency60, ibDhcpModule=ibDhcpModule, ibDhcpTotalNoOfDiscovers=ibDhcpTotalNoOfDiscovers, ibDHCPDDNSTimeoutCount60=ibDHCPDDNSTimeoutCount60, ibDHCPDDNSAvgLatency15=ibDHCPDDNSAvgLatency15, ibDHCPDDNSTimeoutCount15=ibDHCPDDNSTimeoutCount15, ibDHCPDDNSStats=ibDHCPDDNSStats, ibDhcpTotalNoOfDeclines=ibDhcpTotalNoOfDeclines, ibDHCPSubnetNetworkMask=ibDHCPSubnetNetworkMask, ibDhcpTotalNoOfNacks=ibDhcpTotalNoOfNacks, ibDHCPSubnetEntry=ibDHCPSubnetEntry, ibDHCPSubnetPercentUsed=ibDHCPSubnetPercentUsed, ibDhcpDeferredQueueSize=ibDhcpDeferredQueueSize, PYSNMP_MODULE_ID=ibDhcpModule, ibDHCPDDNSTimeoutCount1440=ibDHCPDDNSTimeoutCount1440, ibDHCPDDNSAvgLatency1440=ibDHCPDDNSAvgLatency1440)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
6bb0bb620a727e137539fa2541dcdc9c70c36bb4 | c9bc95759aef6c068a9fbb170c40c255b2f4e451 | /plugin/CutsceneSkipper.py | c85430b6ee289a25d0abf2093ca399f891c71bd7 | [] | no_license | lumptyd/FFxivPythonTriggerPlus | 52847420d866414024330bf8ff074fea65f6b239 | d7d783f7f4412f4c2fa965d12f74585010d12e09 | refs/heads/master | 2023-06-06T00:55:59.194873 | 2021-06-23T18:46:20 | 2021-06-23T18:46:20 | 379,401,467 | 0 | 0 | null | 2021-06-22T21:27:03 | 2021-06-22T21:10:01 | null | UTF-8 | Python | false | false | 2,865 | py | from FFxivPythonTrigger import PluginBase
import logging
"""
patch code to skip cutscene in some zone
command: @cutscene
format: /e @cutscene [p(patch)/d(dispatch)]
"""
nop = b"\x90\x90"
pattern = b"\x8B\xD7\x48\x8B\x08\x4C\x8B\x01"
command="@cutscene"
class CutsceneSkipper(PluginBase):
name = "Cutscene Skipper"
def plugin_onload(self):
self.original_0 = None
self.original_1 = None
self.scanAddress = self.FPT.api.MemoryHandler.pattern_scan_main_module(pattern)
self.FPT.log("found scan address at %s"%hex(self.scanAddress),logging.DEBUG)
self.FPT.api.command.register(command, self.process_command)
# self.FPT.register_event("log_event", self.process_command)
def process_command(self, args):
self.FPT.api.Magic.echo_msg(self._process_command(args))
def _process_command(self, arg):
try:
if arg[0] == "patch" or arg[0] == "p":
return "patch success" if self.patch() else "invalid patch"
elif arg[0] == "dispatch" or arg[0] == "d":
return "dispatch success" if self.dispatch() else "invalid dispatch"
else:
return "unknown arguments {}".format(arg[0])
except Exception as e:
return str(e)
def patch(self):
if self.scanAddress is None:
raise Exception("address scan not found")
original_0 = self.FPT.api.MemoryHandler.read_bytes(self.scanAddress + 0x11, 2)
original_1 = self.FPT.api.MemoryHandler.read_bytes(self.scanAddress + 0x2c, 2)
if original_0 == nop and original_1 == nop:
raise Exception("already patched")
self.original_0 = original_0
self.original_1 = original_1
self.FPT.api.MemoryHandler.write_bytes(self.scanAddress + 0x11, nop, len(nop))
self.FPT.api.MemoryHandler.write_bytes(self.scanAddress + 0x2c, nop, len(nop))
return True
def dispatch(self):
if self.scanAddress is None:
raise Exception("address scan not found")
original_0 = self.FPT.api.MemoryHandler.read_bytes(self.scanAddress + 0x11, 2)
original_1 = self.FPT.api.MemoryHandler.read_bytes(self.scanAddress + 0x2c, 2)
if original_0 != nop or original_1 != nop:
raise Exception("not patched")
if self.original_0 is None:
raise Exception("original data not found")
self.FPT.api.MemoryHandler.write_bytes(self.scanAddress + 0x11, self.original_0, len(nop))
self.FPT.api.MemoryHandler.write_bytes(self.scanAddress + 0x2c, self.original_1, len(nop))
self.original_0 = None
self.original_1 = None
return True
def plugin_onunload(self):
self.FPT.api.command.unregister(command)
try:
self.dispatch()
except:
pass
| [
"hhh"
] | hhh |
482dcaf3edc7af83efcf3fa4d13ebaf71dfa5d7b | e9e6ce520a2abae5e13363b47fbd6e9ebfc6c73f | /descriptions/three_pi_description_copy/scripts/move.py | ab89d306bb17e85d91a935f83254f8d8ee76dcd8 | [] | no_license | Lizzylizard/ReinforcementLearningByElisabeth | 755b5fff13f06f3f452e12a7eb6b48722e3bf3c2 | 10d612d454864028462e85d5349d4440833a3797 | refs/heads/main | 2022-12-29T07:52:47.828097 | 2020-10-16T13:04:07 | 2020-10-16T13:04:07 | 304,057,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | #!/usr/bin/env python
import rospy
#from /home/elisabeth/catkin_ws/src/ROS_Packages/my_msgs.msg import VelJoint
from my_msgs.msg import VelJoint
def move():
# Starts a new node
rospy.init_node('move_three_pi', anonymous=True)
velocity_publisher = rospy.Publisher('/cmd_vel', VelJoint, queue_size=10)
vel_msg = VelJoint()
#Receiveing the user's input
print("Let's move your robot")
speed = float(input("Input your speed:"))
distance = float(input("Type your distance:"))
isForward = bool(input("Foward?: "))#True or False
#Checking if the movement is forward or backwards
if(isForward):
vel_msg.left_vel = abs(speed)
vel_msg.right_vel = abs(speed)
else:
vel_msg.left_vel = -abs(speed)
vel_msg.right_vel = -abs(speed)
#Since we are moving just in x-axis
'''vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 0'''
while not rospy.is_shutdown():
#Setting the current time for distance calculus
t0 = rospy.Time.now().to_sec()
current_distance = 0
#Loop to move the turtle in an specified distance
while(current_distance < distance):
#Publish the velocity
velocity_publisher.publish(vel_msg)
#Takes actual time to velocity calculus
t1=rospy.Time.now().to_sec()
#Calculates distancePoseStamped
current_distance= speed*(t1-t0)
#After the loop, stops the robot
vel_msg.left_vel = float(0)
vel_msg.right_vel = float(0)
#Force the robot to stop
velocity_publisher.publish(vel_msg)
if __name__ == '__main__':
try:
#Testing our function
move()
except rospy.ROSInterruptException: pass | [
"elisabeth.milde@informatik.hs-fulda.de"
] | elisabeth.milde@informatik.hs-fulda.de |
29b762ec133fdade4911774d3363521b2f7f9ba4 | 3a5c7d812fac93b4a2543a36a07149f864d4ed0b | /Hmm/hmm_new/2H.py | 59a37551d7613858f64e6cebb22695e8f0e26a66 | [
"MIT"
] | permissive | MrAlexLemon/GeneratorOfPoems | d54a17476cd3cc581f0bcc03865d5adb7e1063df | 467f798153fed967c9c5994c9a796b90d565f597 | refs/heads/master | 2020-04-23T08:36:43.411172 | 2019-02-16T19:30:45 | 2019-02-16T19:30:45 | 171,042,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,861 | py |
from HMM import unsupervised_HMM
from Utility import Utility
import re
import copy
def unsupervised_learning(n_states, n_iters):
'''
Trains an HMM using supervised learning on the file 'ron.txt' and
prints the results.
Arguments:
n_states: Number of hidden states that the HMM should have.
'''
genres, genre_map, rhyming = Utility.load_shakespeare_hidden_stripped_poems()
# Train the HMM.
HMM = unsupervised_HMM(genres, n_states, n_iters)
# Print the transition matrix.
print("Transition Matrix:")
print('#' * 70)
for i in range(len(HMM.A)):
print(''.join("{:<12.3e}".format(HMM.A[i][j]) for j in range(len(HMM.A[i]))))
print('')
print('')
# Print the observation matrix.
print("Observation Matrix: ")
print('#' * 70)
for i in range(len(HMM.O)):
print(''.join("{:<12.3e}".format(HMM.O[i][j]) for j in range(len(HMM.O[i]))))
print('')
print('')
return HMM, genre_map, rhyming
def syllables(word):
'''
This function counts number of syllables in a word
'''
count = 0
vowels = 'aeiouy'
word = word.lower().strip(".:;?!")
if word[0] in vowels:
count +=1
for index in range(1,len(word)):
if word[index] in vowels and word[index-1] not in vowels:
count +=1
if word.endswith('e'):
count -= 1
if word.endswith('le'):
count+=1
if count == 0:
count +=1
return count
def sylco(word) :
word = word.lower()
# exception_add are words that need extra syllables
# exception_del are words that need less syllables
exception_add = ['serious','crucial']
exception_del = ['fortunately','unfortunately']
co_one = ['cool','coach','coat','coal','count','coin','coarse','coup','coif','cook','coign','coiffe','coof','court']
co_two = ['coapt','coed','coinci']
pre_one = ['preach']
syls = 0 #added syllable number
disc = 0 #discarded syllable number
#1) if letters < 3 : return 1
if len(word) <= 3 :
syls = 1
return syls
#2) if doesn't end with "ted" or "tes" or "ses" or "ied" or "ies", discard "es" and "ed" at the end.
# if it has only 1 vowel or 1 set of consecutive vowels, discard. (like "speed", "fled" etc.)
if word[-2:] == "es" or word[-2:] == "ed" :
doubleAndtripple_1 = len(re.findall(r'[eaoui][eaoui]',word))
if doubleAndtripple_1 > 1 or len(re.findall(r'[eaoui][^eaoui]',word)) > 1 :
if word[-3:] == "ted" or word[-3:] == "tes" or word[-3:] == "ses" or word[-3:] == "ied" or word[-3:] == "ies" :
pass
else :
disc+=1
#3) discard trailing "e", except where ending is "le"
le_except = ['whole','mobile','pole','male','female','hale','pale','tale','sale','aisle','whale','while']
if word[-1:] == "e" :
if word[-2:] == "le" and word not in le_except :
pass
else :
disc+=1
#4) check if consecutive vowels exists, triplets or pairs, count them as one.
doubleAndtripple = len(re.findall(r'[eaoui][eaoui]',word))
tripple = len(re.findall(r'[eaoui][eaoui][eaoui]',word))
disc+=doubleAndtripple + tripple
#5) count remaining vowels in word.
numVowels = len(re.findall(r'[eaoui]',word))
#6) add one if starts with "mc"
if word[:2] == "mc" :
syls+=1
#7) add one if ends with "y" but is not surrouned by vowel
if word[-1:] == "y" and word[-2] not in "aeoui" :
syls +=1
#8) add one if "y" is surrounded by non-vowels and is not in the last word.
for i,j in enumerate(word) :
if j == "y" :
if (i != 0) and (i != len(word)-1) :
if word[i-1] not in "aeoui" and word[i+1] not in "aeoui" :
syls+=1
#9) if starts with "tri-" or "bi-" and is followed by a vowel, add one.
if word[:3] == "tri" and word[3] in "aeoui" :
syls+=1
if word[:2] == "bi" and word[2] in "aeoui" :
syls+=1
#10) if ends with "-ian", should be counted as two syllables, except for "-tian" and "-cian"
if word[-3:] == "ian" :
#and (word[-4:] != "cian" or word[-4:] != "tian") :
if word[-4:] == "cian" or word[-4:] == "tian" :
pass
else :
syls+=1
#11) if starts with "co-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
if word[:2] == "co" and word[2] in 'eaoui' :
if word[:4] in co_two or word[:5] in co_two or word[:6] in co_two :
syls+=1
elif word[:4] in co_one or word[:5] in co_one or word[:6] in co_one :
pass
else :
syls+=1
#12) if starts with "pre-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
if word[:3] == "pre" and word[3] in 'eaoui' :
if word[:6] in pre_one :
pass
else :
syls+=1
#13) check for "-n't" and cross match with dictionary to add syllable.
negative = ["doesn't", "isn't", "shouldn't", "couldn't","wouldn't"]
if word[-3:] == "n't" :
if word in negative :
syls+=1
else :
pass
#14) Handling the exceptional words.
if word in exception_del :
disc+=1
if word in exception_add :
syls+=1
# calculate the output
return numVowels - disc + syls
if __name__ == '__main__':
print('')
print('')
print('#' * 70)
print("{:^70}".format("Running Code For Question 2H"))
print('#' * 70)
print('')
print('')
HMM, mapping, rhyming = unsupervised_learning(8,100)
inv_map = {v: k for k, v in mapping.items()}
numLines = 0
count = 0
topN = 15
# Find the top 10 words associated with each state
toPrint = [0. for i in range(topN)]
for i, row in enumerate(HMM.O):
# Need to map probability to word, not just index to word, because of sorting
d = {row[i]: inv_map[i] for i in range(len(row))}
probs = sorted(row)
for j, p in enumerate(probs[-topN:]):
toPrint[j] = d[p]
print(i, toPrint)
while numLines != 14:
numSyllables = 0
currentLine = (HMM.generate_emission(8))
currentNumberLine = copy.deepcopy(currentLine)
for i in range(len(currentLine)):
currentLine[i] = inv_map[int(currentLine[i])]
currentLine[0] = currentLine[0][0].upper() + currentLine[0][1:]
for i in currentLine:
if syllables(i) == sylco(i):
numSyllables += syllables(i)
if numSyllables == 10:
print (" ". join(currentLine))
print()
numLines += 1
for i in range(1):
print()
lst =[]
lst2 = []
count = 0
while (count < 7):
flag = 0
numSyllables = 0
numSyllables2 = 0
currentLine = (HMM.generate_emission(8))
currentLine2 = (HMM.generate_emission(8))
lastNum1 = currentLine[-1]
lastNum2 = currentLine2[-1]
if (lastNum1 == lastNum2):
continue
for i in rhyming:
if lastNum1 in i and lastNum2 in i:
flag = 1
break
if flag == 0:
continue
for i in range(len(currentLine)):
currentLine[i] = inv_map[int(currentLine[i])]
currentLine2[i] = inv_map[int(currentLine2[i])]
currentLine[0] = currentLine[0][0].upper() + currentLine[0][1:]
currentLine2[0] = currentLine2[0][0].upper() + currentLine2[0][1:]
for i in range(len(currentLine)):
if syllables(currentLine[i]) == sylco(currentLine[i]) and syllables(currentLine2[i]) == sylco(currentLine2[i]):
numSyllables += syllables(currentLine[i])
numSyllables2 += syllables(currentLine2[i])
if numSyllables == 10 and numSyllables2 == 10:
lst.append(" ". join(currentLine))
lst2.append(" ". join(currentLine2))
count += 1
assert(len(lst) == 7)
print(lst[0])
print(lst[1])
print(lst2[0])
print(lst2[1])
print(lst[2])
print(lst[3])
print(lst2[2])
print(lst2[3])
print(lst[4])
print(lst[5])
print(lst2[4])
print(lst2[5])
print(lst[6])
print(lst2[6])
| [
"lileynuk@gmail.com"
] | lileynuk@gmail.com |
4f6302572228b7efcecdd72f4d4e5d237a66da92 | a8577e7ad1652458de236b85636069a1ca0c9c96 | /oscn/parse/docket_report.py | 88017f3e596727cda11a3d9842e190147e4cbcc4 | [
"MIT"
] | permissive | codefortulsa/oscn | 596678649b9e5e0db58ad6ad7313cfcd90b907b0 | 012f721127849ff24f3f8b3c17c640c388e82591 | refs/heads/main | 2023-02-11T15:11:59.651899 | 2023-01-26T23:16:05 | 2023-01-26T23:16:05 | 140,227,962 | 11 | 10 | MIT | 2022-12-26T21:31:55 | 2018-07-09T03:42:54 | Python | UTF-8 | Python | false | false | 890 | py | import urllib.parse as urlparse
from bs4 import BeautifulSoup
def cases(oscn_html):
case_list = []
soup = BeautifulSoup(oscn_html, "html.parser")
case_tables = soup.findAll("table", "clspg")
for case in case_tables:
case_link = case.find("a")
parsed = urlparse.urlparse(case_link["href"])
db = urlparse.parse_qs(parsed.query)["db"][0]
cn = case_link.text
case_index = f"{db}-{cn}"
case_list.append(case_index)
return case_list
setattr(cases, "target", ["Docket"])
setattr(cases, "_default_value", [])
def tables(oscn_html):
case_list = []
soup = BeautifulSoup(oscn_html, "html.parser")
case_tables = soup.findAll("table", "clspg")
for case in case_tables:
case_list.append(case.get_text)
return case_list
setattr(tables, "target", ["Docket"])
setattr(tables, "_default_value", [])
| [
"johnadungan@gmail.com"
] | johnadungan@gmail.com |
4271d8331175e5148e8949a67e93f8ab2c93e395 | 8a3cc7cee5da2cfc69270feb502e71a52ebe7684 | /MinMax & Alphabeta/game_agent.py | 3d65c7087bdcaaab87a1284fcdf6485d3a1c0e29 | [] | no_license | LearnedVector/AI-Foundation | 1ff6287cee2c92e8f9ead03b106431307ea64c07 | f191feb10ca47d5281c8002ee990228723ab850f | refs/heads/master | 2021-07-13T19:43:25.282277 | 2017-10-16T01:33:55 | 2017-10-16T01:33:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,501 | py | import random
class SearchTimeout(Exception):
"""Subclass base exception for code clarity. """
pass
def center_score(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w, h = game.width / 2., game.height / 2.
y, x = game.get_player_location(player)
return float((h - y)**2 + (w - x)**2)
def improved_score(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(own_moves - opp_moves)
def open_move(game, player):
if game.is_loser(player):
return float("inf")
if game.is_winner(player):
return float("inf")
return float(len(game.get_legal_moves(player)))
def weighted_improved_score(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w1 = game.move_count
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(w1*own_moves - opp_moves)
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This should be the best heuristic function for your project submission.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
return center_score(game, player)*open_move(game, player)
def custom_score_2(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
return weighted_improved_score(game, player)
def custom_score_3(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
return open_move(game, player)*improved_score(game, player)
class IsolationPlayer:
"""Base class for minimax and alphabeta agents -- this class is never
constructed or tested directly.
******************** DO NOT MODIFY THIS CLASS ********************
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score, timeout=10.):
self.search_depth = search_depth
self.score = score_fn
self.time_left = None
self.TIMER_THRESHOLD = timeout
class MinimaxPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using depth-limited minimax
search. You must finish and test this player to make sure it properly uses
minimax to return a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
************** YOU DO NOT NEED TO MODIFY THIS FUNCTION *************
For fixed-depth search, this function simply wraps the call to the
minimax method, but this method provides a common interface for all
Isolation agents, and you will replace it in the AlphaBetaPlayer with
iterative deepening search.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
# The try/except block will automatically catch the exception
# raised when the timer is about to expire.
return self.minimax(game, self.search_depth)
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def minimax(self, game, depth):
"""Implement depth-limited minimax search algorithm as described in
the lectures.
This should be a modified version of MINIMAX-DECISION in the AIMA text.
https://github.com/aimacode/aima-pseudocode/blob/master/md/Minimax-Decision.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
def terminal_state(legal_moves, depth):
if not legal_moves or depth <= 0:
return True
return False
def min_value(game, depth):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if terminal_state(legal_moves, depth):
return self.score(game, game._inactive_player)
min_val = float("inf")
for coordinates in legal_moves:
min_val = min(min_val,
max_value(game.forecast_move(coordinates), depth - 1))
return min_val
def max_value(game, depth):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if terminal_state(legal_moves, depth):
return self.score(game, game._active_player)
max_val = float("-inf")
for coordinates in legal_moves:
max_val = max(max_val,
min_value(game.forecast_move(coordinates), depth - 1))
return max_val
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if terminal_state(legal_moves, depth):
return (-1, -1)
return max(legal_moves,
key=lambda m: min_value(game.forecast_move(m), depth - 1))
class AlphaBetaPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using iterative deepening minimax
search with alpha-beta pruning. You must finish and test this player to
make sure it returns a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
Modify the get_move() method from the MinimaxPlayer class to implement
iterative deepening search instead of fixed-depth search.
**********************************************************************
NOTE: If time_left() < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
# The try/except block will automatically catch the exception
# raised when the timer is about to expire.
depth = 0
while True:
best_move = self.alphabeta(game, depth)
self.search_depth = depth
depth += 1
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf")):
"""Implement depth-limited minimax search with alpha-beta pruning as
described in the lectures.
This should be a modified version of ALPHA-BETA-SEARCH in the AIMA text
https://github.com/aimacode/aima-pseudocode/blob/master/md/Alpha-Beta-Search.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
def terminal_state(legal_moves, depth):
if not legal_moves or depth <= 0:
return True
return False
def min_value(game, depth, alpha, beta):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if terminal_state(legal_moves, depth):
return self.score(game, game._inactive_player)
min_val = float("inf")
for coordinates in legal_moves:
min_val = min(min_val,
max_value(
game.forecast_move(coordinates),
depth - 1,
alpha,
beta))
if min_val <= alpha:
return min_val
beta = min(beta, min_val)
return min_val
def max_value(game, depth, alpha, beta):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if terminal_state(legal_moves, depth):
return self.score(game, game._active_player)
max_val = float("-inf")
for coordinates in legal_moves:
max_val = max(max_val,
min_value(
game.forecast_move(coordinates),
depth - 1,
alpha,
beta))
if max_val >= beta:
return max_val
alpha = max(alpha, max_val)
return max_val
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
legal_moves = game.get_legal_moves()
if len(legal_moves) == 0:
return (-1. -1)
move = (-1, -1)
for coordinates in legal_moves:
val = min_value(game.forecast_move(coordinates),
depth -1,
alpha,
beta)
if val > alpha:
alpha = val
move = coordinates
return move
| [
"ppnguyen91@gmail.com"
] | ppnguyen91@gmail.com |
70dea9681850a7d8176cc8fc66d927d5f1513732 | d9b48edd175aadbacf47c94872217b652e3a0add | /cvxpy/reductions/cone2cone/approximations.py | d4d8bff936acd93d4cb10a9c3aa978eb58ae6b55 | [
"Apache-2.0"
] | permissive | Fage2016/cvxpy | 106149f5f9a1b9bb957f41ecdca72194c4d065c3 | 2a23c109e49577d8da4b97bbf6c866400da105c4 | refs/heads/master | 2023-08-03T21:28:48.143370 | 2023-03-08T20:53:07 | 2023-03-08T20:53:07 | 86,796,794 | 0 | 0 | Apache-2.0 | 2023-03-18T04:56:08 | 2017-03-31T08:31:20 | C++ | UTF-8 | Python | false | false | 7,058 | py | """
Copyright 2022 the CVXPY developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
import numpy as np
import cvxpy as cp
from cvxpy.atoms.affine.upper_tri import upper_tri
from cvxpy.constraints.constraint import Constraint
from cvxpy.constraints.exponential import (ExpCone, OpRelEntrConeQuad,
RelEntrConeQuad,)
from cvxpy.constraints.zero import Zero
from cvxpy.expressions.variable import Variable
from cvxpy.reductions.canonicalization import Canonicalization
from cvxpy.reductions.dcp2cone.canonicalizers.von_neumann_entr_canon import (
von_neumann_entr_canon,)
APPROX_CONES = {
RelEntrConeQuad: {cp.SOC},
OpRelEntrConeQuad: {cp.PSD}
}
def gauss_legendre(n):
"""
Helper function for returning the weights and nodes for an
n-point Gauss-Legendre quadrature on [0, 1]
"""
beta = 0.5/np.sqrt(np.ones(n-1)-(2*np.arange(1, n, dtype=float))**(-2))
T = np.diag(beta, 1) + np.diag(beta, -1)
D, V = np.linalg.eigh(T)
x = D
x, i = np.sort(x), np.argsort(x)
w = 2 * (np.array([V[0][k] for k in i]))**2
x = (x + 1)/2
w = w/2
return w, x
def rotated_quad_cone(X: cp.Expression, y: cp.Expression, z: cp.Expression):
"""
For each i, enforce a constraint that
(X[i, :], y[i], z[i])
belongs to the rotated quadratic cone
{ (x, y, z) : || x ||^2 <= y z, 0 <= (y, z) }
This implementation doesn't enforce (x, y) >= 0! That should be imposed by the calling function.
"""
m = y.size
assert z.size == m
assert X.shape[0] == m
if len(X.shape) < 2:
X = cp.reshape(X, (m, 1))
#####################################
# Comments from quad_over_lin_canon:
# quad_over_lin := sum_{i} x^2_{i} / y
# t = Variable(1,) is the epigraph variable.
# Becomes a constraint
# SOC(t=y + t, X=[y - t, 2*x])
####################################
soc_X_col0 = cp.reshape(y - z, (m, 1))
soc_X = cp.hstack((soc_X_col0, 2*X))
soc_t = y + z
con = cp.SOC(t=soc_t, X=soc_X, axis=1)
return con
def RelEntrConeQuad_canon(con: RelEntrConeQuad, args) -> Tuple[Constraint, List[Constraint]]:
"""
Use linear and SOC constraints to approximately enforce
con.x * log(con.x / con.y) <= con.z.
We rely on an SOC characterization of 2-by-2 PSD matrices.
Namely, a matrix
[ a, b ]
[ b, c ]
is PSD if and only if (a, c) >= 0 and a*c >= b**2.
That system of constraints can be expressed as
a >= quad_over_lin(b, c).
Note: constraint canonicalization in CVXPY uses a return format
(lead_con, con_list) where lead_con is a Constraint that might be
used in dual variable recovery and con_list consists of extra
Constraint objects as needed.
"""
k, m = con.k, con.m
x, y = con.x, con.y
n = x.size
# Z has been declared as so to allow for proper vectorization
Z = Variable(shape=(k+1, n))
w, t = gauss_legendre(m)
T = Variable(shape=(m, n))
lead_con = Zero(w @ T + con.z/2**k)
constrs = [Zero(Z[0] - y)]
for i in range(k):
# The following matrix needs to be PSD.
# [Z[i] , Z[i+1]]
# [Z[i+1], x ]
# The below recipe for imposing a 2x2 matrix as PSD follows from Pg-35, Ex 2.6
# of Boyd's convex optimization. Where the constraint simply becomes a
# rotated quadratic cone, see `dcp2cone/quad_over_lin_canon.py` for the very similar
# scalar case
epi = Z[i, :]
stackedZ = Z[i+1, :]
cons = rotated_quad_cone(stackedZ, epi, x)
constrs.append(cons)
constrs.extend([epi >= 0, x >= 0])
for i in range(m):
off_diag = -(t[i]**0.5) * T[i, :]
# The following matrix needs to be PSD.
# [ Z[k] - x - T[i] , off_diag ]
# [ off_diag , x - t[i]*T[i] ]
epi = (Z[k, :] - x - T[i, :])
cons = rotated_quad_cone(off_diag, epi, x-t[i]*T[i, :])
constrs.append(cons)
constrs.extend([epi >= 0, x-t[i]*T[i, :] >= 0])
return lead_con, constrs
def OpRelEntrConeQuad_canon(con: OpRelEntrConeQuad, args) -> Tuple[Constraint, List[Constraint]]:
k, m = con.k, con.m
X, Y = con.X, con.Y
assert X.is_real()
assert Y.is_real()
assert con.Z.is_real()
Zs = {i: Variable(shape=X.shape, symmetric=True) for i in range(k+1)}
Ts = {i: Variable(shape=X.shape, symmetric=True) for i in range(m+1)}
constrs = [Zero(Zs[0] - Y)]
if not X.is_symmetric():
ut = upper_tri(X)
lt = upper_tri(X.T)
constrs.append(ut == lt)
if not Y.is_symmetric():
ut = upper_tri(Y)
lt = upper_tri(Y.T)
constrs.append(ut == lt)
if not con.Z.is_symmetric():
ut = upper_tri(con.Z)
lt = upper_tri(con.Z.T)
constrs.append(ut == lt)
w, t = gauss_legendre(m)
lead_con = Zero(cp.sum([w[i] * Ts[i] for i in range(m)]) + con.Z/2**k)
for i in range(k):
# [Z[i] , Z[i+1]]
# [Z[i+1], x ]
constrs.append(cp.bmat([[Zs[i], Zs[i+1]], [Zs[i+1].T, X]]) >> 0)
for i in range(m):
off_diag = -(t[i]**0.5) * Ts[i]
# The following matrix needs to be PSD.
# [ Z[k] - x - T[i] , off_diag ]
# [ off_diag , x - t[i]*T[i] ]
constrs.append(cp.bmat([[Zs[k] - X - Ts[i], off_diag], [off_diag.T, X-t[i]*Ts[i]]]) >> 0)
return lead_con, constrs
def von_neumann_entr_QuadApprox(expr, args):
m, k = expr.quad_approx[0], expr.quad_approx[1]
epi, initial_cons = von_neumann_entr_canon(expr, args)
cons = []
for con in initial_cons:
if isinstance(con, ExpCone): # should only hit this once.
qa_con = con.as_quad_approx(m, k)
qa_con_canon_lead, qa_con_canon = RelEntrConeQuad_canon(
qa_con, None)
cons.append(qa_con_canon_lead)
cons.extend(qa_con_canon)
else:
cons.append(con)
return epi, cons
def von_neumann_entr_canon_dispatch(expr, args):
if expr.quad_approx:
return von_neumann_entr_QuadApprox(expr, args)
else:
return von_neumann_entr_canon(expr, args)
class QuadApprox(Canonicalization):
CANON_METHODS = {
RelEntrConeQuad: RelEntrConeQuad_canon,
OpRelEntrConeQuad: OpRelEntrConeQuad_canon
}
def __init__(self, problem=None) -> None:
super(QuadApprox, self).__init__(
problem=problem, canon_methods=QuadApprox.CANON_METHODS)
| [
"noreply@github.com"
] | Fage2016.noreply@github.com |
6180214e717d6e06e85875f87e397b17f6826576 | d61711ceb3c505067956b37be08cf9edab2ee4d4 | /I0320014_Soal2_Tugas4.py | 7916dee501d70ba0813698ae9479a45c38c7d618 | [] | no_license | audreyalexandra/Audrey-Alexandra_I0320014_Wildan_Tugas4 | de8b4debb0a66be097360bf530c0fc4e41c13b82 | 46fc9966a42c4e0305c86e79bdd37843e5b7912f | refs/heads/main | 2023-03-15T05:02:07.011153 | 2021-03-27T01:34:01 | 2021-03-27T01:34:01 | 351,607,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | bil1 = int(input("Masukan Bilangan bulat pertama : "))
bil2 = int(input("Masukan Bilangan bulat kedua : "))
print("Hasil %d // %d = %d" % (bil1, bil2, bil1//bil2)) | [
"audreyalexandra18@gmail.com"
] | audreyalexandra18@gmail.com |
9770331cc4ed8b9caba652786a87ec8aced75466 | e94c7bd97d8b8b3b2945d357521bd346e66d5d75 | /test/lmp/script/gen_txt/test_signature.py | 1a75301a671acbdfbd9ac9ea870cb204b57d9bc1 | [
"Beerware"
] | permissive | ProFatXuanAll/language-model-playground | 4d34eacdc9536c57746d6325d71ebad0d329080e | ec4442a0cee988a4412fb90b757c87749b70282b | refs/heads/main | 2023-02-19T16:21:06.926421 | 2022-09-25T13:35:01 | 2022-09-25T13:35:01 | 202,471,099 | 11 | 26 | NOASSERTION | 2023-02-16T06:39:40 | 2019-08-15T03:57:23 | Python | UTF-8 | Python | false | false | 1,040 | py | """Test :py:mod:`lmp.script.gen_txt` signatures."""
import argparse
import inspect
from inspect import Parameter, Signature
from typing import List
import lmp.script.gen_txt
def test_module_method() -> None:
"""Ensure module methods' signatures."""
assert hasattr(lmp.script.gen_txt, 'parse_args')
assert inspect.isfunction(lmp.script.gen_txt.parse_args)
assert inspect.signature(lmp.script.gen_txt.parse_args) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=argparse.Namespace,
)
assert hasattr(lmp.script.gen_txt, 'main')
assert inspect.isfunction(lmp.script.gen_txt.main)
assert inspect.signature(lmp.script.gen_txt.main) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=None,
)
| [
"ProFatXuanAll@gmail.com"
] | ProFatXuanAll@gmail.com |
da6a4ecd79cdde4a64fed17365c2700d3e0e3243 | b801f7f8258660ab5e186aa64108f9a1e481c785 | /eithne.py | ac1437d6922c8f5dfbb3580b43d10ed6519d4137 | [] | no_license | aureoares/eithne | 8ab9c094c6bf49861e86fda9f6a23b2a4e5bf844 | 4d8020753a272d283b7e14c6ae9b5129853dd17f | refs/heads/master | 2021-01-22T06:58:46.730744 | 2010-04-06T18:07:29 | 2010-04-06T18:07:29 | 37,327,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,772 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ConfigParser
#import optparse
import pickle
#import BeautifulSoup
import MySQLdb
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import ansley
def loadConf(conf_file):
"""Carga la configuración de un fichero y la guarda en un diccionario."""
configuration = {} # Diccionario que contendrá la configuración.
conf = ConfigParser.ConfigParser()
try:
conf.readfp(file(conf_file))
except:
print "Eithne: no se pudo leer el fichero de configuración '%s' ." % conf_file
return
configuration['server_addr'] = conf.get('SERVER', 'Address'.lower())
try:
configuration['server_port'] = conf.getint('SERVER','Port'.lower())
except:
print "Catherine: valor incorrecto para el puerto del servidor, se usará el 8000."
configuration['server_port'] = 8000
# Configuración para la conexión con la base de datos.
configuration['dbserver'] = conf.get('DATABASE', 'Server'.lower())
configuration['db'] = conf.get('DATABASE', 'Database'.lower())
configuration['dbuser'] = conf.get('DATABASE', 'User')
configuration['dbpasswd'] = conf.get('DATABASE', 'Passwd')
return configuration
class MiManejador(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler para el servidor HTTP.
Implementa los métodos PUT y GET adaptados a la aplicación."""
def do_PUT(self):
"""El método PUT recoge una cadena empaquetada mediante pickle,
recupera el objeto con la información del equipo y la almacena
en la base de datos."""
print 'Conectado PUT '+str(self.client_address)
self.send_response(200, 'OK')
self.end_headers()
self.request.close()
database = str(self.client_address[0])
print 'Recogiendo datos...'
computer_pickled = str(self.rfile.read())
computer_object = pickle.loads(computer_pickled)
traductor = ansley.Ansley(computer_object)
print 'Introduciendo datos en la Base de Datos...'
traductor.ListToDb(configuration['dbuser'], configuration['dbpasswd'], configuration['db'], configuration['dbserver'], configuration['network_id'])
#traductor.printNodes()
#traductor.printNodeProperties(1)
print 'Petición finalizada.'
def do_GET(self):
"""El método GET recibe un path de la forma /red/equipo y
devuelve el informe XML correspondiente."""
print 'Conectado GET '+str(self.client_address)
self.send_response(200, 'OK')
self.end_headers()
try:
network_id = self.path.split('/')[1]
computer_id = self.path.split('/')[2]
except:
self.wfile.write('Ruta incorrecta.')
self.request.close()
return
# Conectamos con la base de datos.
try:
connection = MySQLdb.connect(user=configuration['dbuser'], passwd=configuration['dbpasswd'], db=configuration['db'], host=configuration['dbserver'])
except:
print "Eithne: No se pudo conectar con la base de datos: %s." % self.database
return
cursor = connection.cursor()
cursor.execute('''select IDMem from MEMBERS where Computer=%s and Network=%s''', (computer_id, network_id))
if(cursor.rowcount == 0):
self.wfile.write('El equipo no existe o no pertenece a la red.')
self.request.close()
return
computer = []
traductor = ansley.Ansley(computer)
traductor.DbToList(configuration['dbuser'], configuration['dbpasswd'], configuration['db'], configuration['dbserver'], computer_id)
document = traductor.ListToXml()
pretty_document = document.prettify()
pretty_document = '<?xml version="1.0" standalone="yes" ?>'+pretty_document
self.wfile.write(pretty_document)
self.request.close()
class ThreadingHTTPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer, BaseHTTPServer.HTTPServer):
pass
if __name__ == "__main__":
config_file='/etc/eithne/eithne.conf'
configuration = loadConf(config_file)
# Conectamos con la base de datos.
try:
connection = MySQLdb.connect(user=configuration['dbuser'], passwd=configuration['dbpasswd'], db=configuration['db'], host=configuration['dbserver'])
except:
print "Eithne: No se pudo conectar con la base de datos: %s." % configuration['db']
exit()
cursor = connection.cursor()
cursor.execute('''set character_set_client = utf8''')
cursor.execute('''set character_set_results = utf8''')
# Pedimos los datos de la red.
network_name = raw_input("Introduzca un nombre para identificar la red: ")
# Comprobamos si la red existe en la base de datos.
# Si ya existe, preguntamos si sustituirla o escoger otro nombre.
net_ok = 'n'
while(net_ok != 'y'):
cursor.execute('''select IDNet from NETWORKS where Name like %s''', (network_name,))
if(cursor.rowcount > 0):
row = cursor.fetchone()
net_id = row[0]
print "La red %s ya existe en la base de datos." % network_name
net_ok = raw_input("¿Sustituir? (y/n/a): ") # yes / no / add
if(net_ok == 'y'):
print "Eliminando la red anterior..."
# Busco los equipos de la red.
cursor.execute('''select Computer from MEMBERS where Network=%s''', (net_id,))
computers = cursor.fetchall()
# Por cada equipo busco los dispositivos que tiene.
for computer in computers:
cursor.execute('''select IDDev from DEVICES where Computer=%s''', (computer[0],))
devices = cursor.fetchall()
# Por cada dispositivo elimino sus propiedades.
for device in devices:
cursor.execute('''delete from PROPERTIES where Device=%s''', (device[0],))
# Elimino el dispositivo
cursor.execute('''delete from DEVICES where Computer=%s''', (computer[0],))
# Elimino la relación entre el equipo y la red.
cursor.execute('''delete from MEMBERS where Computer=%s and Network=%s''', (computer[0],net_id))
# Elimino el equipo.
cursor.execute('''delete from COMPUTERS where IDCom=%s''', (computer[0],))
# Elimino la red.
cursor.execute('''delete from NETWORKS where IDNet=%s''', (net_id,))
connection.commit()
else:
if(net_ok == 'a'):
net_ok = 'y'
else:
network_name = raw_input("Introduzca un nombre para identificar la red: ")
else:
net_ok = 'y'
network_desc = raw_input("Descripción de la red: ")
network_addr = raw_input("Dirección IP de la red: ")
network_mask = raw_input("Máscara de red: ")
print "Creando la nueva red..."
cursor.execute('''insert into NETWORKS (Name, Description, IP, Netmask, Parent) values (%s,%s,%s,%s,NULL)''', (network_name, network_desc, network_addr, network_mask))
configuration['network_id'] = cursor.lastrowid
connection.commit()
Clase_Servidor = ThreadingHTTPServer
Clase_Manejador = MiManejador
Dir_Servidor = (configuration['server_addr'], configuration['server_port'])
httpd = Clase_Servidor(Dir_Servidor, Clase_Manejador)
print "Iniciando servidor HTTP (%s:%s) ID: %s." % (configuration['server_addr'], configuration['server_port'], configuration['network_id'])
httpd.serve_forever()
| [
"?ureo Ares@localhost"
] | ?ureo Ares@localhost |
26d03fcefa5d70539bb6d822b5978722de681a0c | a9e578a66a4706dedf83838ec3288adb893e57fd | /src/impute.py | e82f2af49047a8a2aa131493f61070eb732b20a6 | [] | no_license | jgondin/predict-water-pump-failure | 4e741c9385717c9f802e7ccc5fbc1c5032b81129 | 73d7664fb6b0ab605b6b3d6605ede256a56024fa | refs/heads/master | 2020-12-25T17:16:36.944533 | 2016-07-09T22:53:14 | 2016-07-09T22:53:14 | 62,754,230 | 1 | 0 | null | 2016-07-06T21:22:52 | 2016-07-06T21:22:52 | null | UTF-8 | Python | false | false | 13,041 | py | import pandas as pd
#import matplotlib.pyplot as plt
#import statsmodels.api as sm
#import seaborn as sbrn
import numpy as np
#import re
#import trainetime
import pickle
#from collections import OrderedDict
#import sklearn
def imputeTrain(trn):
"""
Input: Training dataset
Output: Returns copy of imputed training set; and a reference map (nested dictionary)
Function takes in a trainaset for the "water pump failure" driventraina.org competition
and returns a list of two items:
1. A training dataframe that contains imputed columns, namely:
- gps_height
- population
- latitude
- longitude
- construction_year
*Note: An exception will be thrown if any one of these columns are missing
*Note: Columns do not need to contain 'NaN' values. The function will replace
zeroes with NaNs as well as erroneous lat, long values
*Note: Uses a heirarchical geographically nearest neighbors mean measure
2. A nested dictionary in the following format that contains trained imputed values for
each variable above, by a heirarchical geography. The intent is to use this nested
dictionary to inform unseen test observations during prediction.
"""
train = trn.copy()
imputeCols = ['gps_height','population','latitude','longitude','construction_year', 'subvillage','ward','lga','region_code']
imputeMap = {'population':{'subvillage':{},'ward':{},'lga':{},'region_code':{}},
'gps_height':{'subvillage':{},'ward':{},'lga':{},'region_code':{}},
'construction_year':{'subvillage':{},'ward':{},'lga':{},'region_code':{}},
'latitude':{'subvillage':{},'ward':{},'lga':{},'region_code':{}},
'longitude':{'subvillage':{},'ward':{},'lga':{},'region_code':{}} }
exception = 'Missing Columns! Please make sure all of the following columns are in your training frame: \n'+str(imputeCols)
if not set(imputeCols) < set(list(train.columns)):
raise Exception(exception)
#replace continuous predictor missing values (0s) with NaN
train.population.replace({0:np.nan,1:np.nan,2:np.nan}, inplace=True)
train.gps_height.replace({0:np.nan}, inplace=True)
train['construction_year']=train['construction_year'].astype('int64')
train.loc[train.construction_year==0,['construction_year']]=np.nan
#replace lat/long outliers with NaN; replace in plce won't work for multiple columns
train.loc[((train.longitude==0)&(train.latitude==-2.000000e-08)),['latitude','longitude']]=train.loc[((train.longitude==0)&(train.latitude==-2.000000e-08)),['latitude','longitude']].replace({'latitude':{-2.000000e-08:np.nan}, 'longitude':{0.0:np.nan}}, regex=False)
#now, impute NaNs with the mean of hierarchical geographies going from nearest to farthest:
#sub-village > ward > lga > region_code
#population
#first, store location mean per location unit
imputeMap=generateMap('subvillage','population',train,imputeMap)
train.population.fillna(train.groupby(['subvillage'])['population'].transform('mean'), inplace=True)
imputeMap=generateMap('ward','population',train,imputeMap)
train.population.fillna(train.groupby(['ward'])['population'].transform('mean'), inplace=True)
imputeMap=generateMap('lga','population',train,imputeMap)
train.population.fillna(train.groupby(['lga'])['population'].transform('mean'), inplace=True)
imputeMap=generateMap('region_code','population',train,imputeMap)
train.population.fillna(train.groupby(['region_code'])['population'].transform('mean'), inplace=True)
#gps_height (do the same thing)
imputeMap=generateMap('subvillage','gps_height',train,imputeMap)
train.gps_height.fillna(train.groupby(['subvillage'])['gps_height'].transform('mean'), inplace=True)
imputeMap=generateMap('ward','gps_height',train,imputeMap)
train.gps_height.fillna(train.groupby(['ward'])['gps_height'].transform('mean'), inplace=True)
imputeMap=generateMap('lga','gps_height',train,imputeMap)
train.gps_height.fillna(train.groupby(['lga'])['gps_height'].transform('mean'), inplace=True)
imputeMap=generateMap('region_code','gps_height',train,imputeMap)
train.gps_height.fillna(train.groupby(['region_code'])['gps_height'].transform('mean'), inplace=True)
#construction_year (same! just set construction year back to int64 at the end)
imputeMap=generateMap('subvillage','construction_year',train,imputeMap)
train.construction_year.fillna(train.groupby(['subvillage'])['construction_year'].transform('mean'), inplace=True)
imputeMap=generateMap('ward','construction_year',train,imputeMap)
train.construction_year.fillna(train.groupby(['ward'])['construction_year'].transform('mean'), inplace=True)
imputeMap=generateMap('lga','construction_year',train,imputeMap)
train.construction_year.fillna(train.groupby(['lga'])['construction_year'].transform('mean'), inplace=True)
imputeMap=generateMap('region_code','construction_year',train,imputeMap)
train.construction_year.fillna(train.groupby(['region_code'])['construction_year'].transform('mean'), inplace=True)
train['construction_year']=train.construction_year.astype('int64') #set to int! or we'll have too many
#same for lats and longs
imputeMap=generateMap('subvillage','latitude',train,imputeMap)
train.latitude.fillna(train.groupby(['subvillage'])['latitude'].transform('mean'), inplace=True)
imputeMap=generateMap('ward','latitude',train,imputeMap)
train.latitude.fillna(train.groupby(['ward'])['latitude'].transform('mean'), inplace=True)
imputeMap=generateMap('lga','latitude',train,imputeMap)
train.latitude.fillna(train.groupby(['lga'])['latitude'].transform('mean'), inplace=True)
imputeMap=generateMap('region_code','latitude',train,imputeMap)
train.latitude.fillna(train.groupby(['region_code'])['latitude'].transform('mean'), inplace=True)
#long
imputeMap=generateMap('subvillage','longitude',train,imputeMap)
train.longitude.fillna(train.groupby(['subvillage'])['longitude'].transform('mean'), inplace=True)
imputeMap=generateMap('ward','longitude',train,imputeMap)
train.longitude.fillna(train.groupby(['ward'])['longitude'].transform('mean'), inplace=True)
imputeMap=generateMap('lga','longitude',train,imputeMap)
train.longitude.fillna(train.groupby(['lga'])['longitude'].transform('mean'), inplace=True)
imputeMap=generateMap('region_code','longitude',train,imputeMap)
train.longitude.fillna(train.groupby(['region_code'])['longitude'].transform('mean'), inplace=True)
return train, imputeMap
def generateMap(geog, col, train, imputeMap):
"""helps the imputeTrain function out by storing the means of each location breakdown
for that column in the nested dictionary"""
grpdf = train.groupby(train[geog])[col].mean().reset_index()
grpdf = grpdf.loc[~grpdf[col].isnull()]
grpdf.set_index(grpdf.iloc[:,0], inplace=True)
grpdf.drop(geog, inplace=True, axis=1)
#insert into nested dict
imputeMap[col][geog].update(grpdf.iloc[:,0].to_dict())
return imputeMap
def fillTest(tst, imputeMap):
"""
Inputs: Test dataframe, reference map nested dictionary
Outputs: Copy of Test dataframe with filled in trained values.
uses a passed in reference map that contains trained means by geographical
nearness for numerics
- gps_height
- population
- latitude
- longitude
- construction_year.
Function returns the passed in test dataframe with any missing values filled
in according to the reference map.
*Note: if input dataframe is sorted in any order the order will be lost as
missing values are removed, filled in, and appended back to the dataframe.
Simply re-sort if original order is desired.
"""
test_imp=tst.copy()
imputeCols = ['gps_height','population','latitude','longitude','construction_year', 'subvillage','ward','lga','region_code']
exception = 'Missing Columns! Please make sure all of the following columns are in your test frame: \n'+str(imputeCols)
numCols = ['gps_height','population','latitude','longitude','construction_year']
if not set(imputeCols) < set(list(test_imp.columns)):
raise Exception(exception)
geogHierarch = np.array(['subvillage','ward','lga','region_code'])
#replace continuous predictor missing values (0s) with NaN
test_imp.population.replace({0:np.nan, 1:np.nan, 2:np.nan}, inplace=True)
test_imp.gps_height.replace({0:np.nan}, inplace=True)
test_imp['construction_year']=test_imp['construction_year'].astype('int64')
test_imp.loc[test_imp.construction_year==0,['construction_year']]=np.nan
#replace lat/long outliers with NaN; replace in plce won't work for multiple columns
test_imp.loc[((test_imp.longitude==0)&(test_imp.latitude==-2.000000e-08)),['latitude','longitude']]=test_imp.loc[((test_imp.longitude==0)&(test_imp.latitude==-2.000000e-08)),['latitude','longitude']].replace({'latitude':{-2.000000e-08:np.nan}, 'longitude':{0.0:np.nan}}, regex=False)
#BACKUP IMPUTE STRATEGY: NOT USING REFERENCE MAP
"""
test.gps_height.fillna(test.groupby(['subvillage'])['gps_height'].transform('mean'), inplace=True)
test.gps_height.fillna(test.groupby(['ward'])['gps_height'].transform('mean'), inplace=True)
test.gps_height.fillna(test.groupby(['lga'])['gps_height'].transform('mean'), inplace=True)
test.gps_height.fillna(test.groupby(['region_code'])['gps_height'].transform('mean'), inplace=True)
test.population.fillna(test.groupby(['subvillage'])['population'].transform('mean'), inplace=True)
test.population.fillna(test.groupby(['ward'])['population'].transform('mean'), inplace=True)
test.population.fillna(test.groupby(['lga'])['population'].transform('mean'), inplace=True)
test.populationr.fillna(test.groupby(['region_code'])['population'].transform('mean'), inplace=True)
test.construction_year.fillna(test.groupby(['subvillage'])['construction_year'].transform('mean'), inplace=True)
test.construction_year.fillna(test.groupby(['ward'])['construction_year'].transform('mean'), inplace=True)
test.construction_year.fillna(test.groupby(['lga'])['construction_year'].transform('mean'), inplace=True)
test.construction_year.fillna(test.groupby(['region_code'])['construction_year'].transform('mean'), inplace=True)
test.latitude.fillna(test.groupby(['subvillage'])['latitude'].transform('mean'), inplace=True)
test.latitude.fillna(test.groupby(['ward'])['latitude'].transform('mean'), inplace=True)
test.latitude.fillna(test.groupby(['lga'])['latitude'].transform('mean'), inplace=True)
test.latitude.fillna(test.groupby(['region_code'])['latitude'].transform('mean'), inplace=True)
test.longitude.fillna(test.groupby(['subvillage'])['longitude'].transform('mean'), inplace=True)
test.longitude.fillna(test.groupby(['ward'])['longitude'].transform('mean'), inplace=True)
test.longitude.fillna(test.groupby(['lga'])['longitude'].transform('mean'), inplace=True)
test.longitude.fillna(test.groupby(['region_code'])['longitude'].transform('mean'), inplace=True)
"""
df_id = test_imp[['id']]
test = test_imp
for col in numCols:
if test[col].isnull().sum():
#subset ad remove from test frame col specific nulls (will append filled values later)
test_sub = test[test[col].isnull()]
test = test[~test[col].isnull()]
#fill in missing values by tiered geography
test_filled = test_sub[~test_sub[col].isnull()] #empty at first
for geog in geogHierarch:
#get col and geog specific reference map
refdf = extractMap(imputeMap, col, geog)
#now merge col and geog missing values in test with ref map
test_sub=pd.merge(test_sub, refdf, how='left', on=geog)
test_sub[col+'_x']=test_sub[col+'_y']
test_sub.drop(col+'_y', axis=1, inplace=True)
test_sub=test_sub.rename(columns={col+'_x':col}) #remove _x
#get all non NaNs from test_sub
test_filled = pd.concat([test_filled,test_sub[~test_sub[col].isnull()]], axis=0)
test_sub = test_sub[test_sub[col].isnull()]
if test_sub.shape[0]==0:
break
#merge filled set and any remaining (could not fill) back to Test
test = pd.concat([test, test_filled, test_sub], axis=0, ignore_index=True)
#make sure construction year is an integer col
test['construction_year']=test['construction_year'].astype('int64')
df_merge = pd.merge(df_id, test, on='id')
return df_merge
def extractMap(imap, col, geog):
"""
Extract impute column and geography specific values from trained reference map.
Returns a reference dataframe, with columns col, geog.
"""
#extract col and geog specific values from reference map as dataframe
mapdf = pd.DataFrame()
mapdf = mapdf.from_dict(imap[col][geog],orient='index')
mapdf[geog]=mapdf.index
mapdf.columns=[col,geog]
return mapdf
| [
"ashirwad08@yahoo.com"
] | ashirwad08@yahoo.com |
36e937ed9a02e89828503fe4075624dadcde6ed4 | bf3bc3abdb7b2660c02bc1375ba146461b188364 | /modules/loto/loto.py | 7328f3cabee6872a53226de8c3ab6bb8d672bf29 | [] | no_license | JeremyMet/matrix_bot | 39a3d942ad091f49445b5e5bcd8600175c919b8f | be76fb8276d031dc796ce3c329c871ec8854c30b | refs/heads/master | 2023-05-13T00:36:39.384341 | 2021-05-01T19:44:03 | 2021-05-01T19:44:03 | 143,750,633 | 0 | 0 | null | 2023-05-01T22:16:13 | 2018-08-06T15:49:40 | Python | UTF-8 | Python | false | false | 7,150 | py | import json;
import random;
import datetime;
from collections import namedtuple;
import os.path
import pickle
Draw_Time = namedtuple("Draw_Time", "hour minute");
class loto(object):
pt_table = {} ;
pt_table[0] = 0 ;
pt_table[1] = 1 ;
pt_table[2] = 5 ;
pt_table[3] = 75 ;
pt_table[4] = 3400 ;
pt_table[5] = 800000 ;
pt_table[6] = 10000000 ;
def __init__(self, hour=0, minute=0, scoreboard_file="./modules/loto/scoreboard_file.dic", \
dailybet_file="./modules/loto/dailybet_file.dic", log_file = "./modules/loto/log.dic", nb_numbers=49, combination_length=6):
self.scoreboard_file = scoreboard_file;
self.dailybet_file = dailybet_file;
self.log_file = log_file;
self.nb_numbers = nb_numbers;
self.combination_length = combination_length;
self.scoreboard = {} ;
self.dailybet = {} ;
# On initialise au jour précédent ; typiquement si on lance le script le 1er juin et que l'on souhaite un tirage à 22h
# on initialise "le tirage précédent" (fictif, il n'a pas eu lieu) le 31 mai à 22h ; le module loto_bot.py (le "wrapper de cette classe")
# vérifie toutes les secondes s'il y a eu 24h écoulés entre la date datetime.now() et la date du tirage précédent. Ainsi, même si l'on lance le script
# à 21h45 un premier juin, en utilisant le tirage "fictif" (celui de 31 mai à 22h), nous aurons bien à tirage à 22h, le 1er juin.
self.draw_time = Draw_Time(hour, minute);
if os.path.isfile(self.log_file):
with open(self.log_file, "rb") as pickle_file:
self.log = pickle.load(pickle_file);
else:
tmp_datetime = datetime.datetime.now();
self.log = {} ;
self.log["last_draw"] = datetime.datetime(year=tmp_datetime.year, month = tmp_datetime.month, \
day = tmp_datetime.day, hour = self.draw_time.hour, minute = self.draw_time.minute)-datetime.timedelta(days=1) ;
self.load_previous_state();
random.seed(datetime.datetime.now()); # Seed initialisation
def set_scoreboard_file(self, scoreboard_file):
self.scoreboard_file = scoreboard_file;
def set_log_file(self, log_file):
self.log_file = log_file;
def set_dailybet_file(self, dailybet_file):
self.dailybet_file = dailybet_file;
# def set_draw_time(self, hour, minute):
# self.draw_time = Draw_Time(hour, minute);
def get_draw_time(self):
return self.draw_time;
def get_log(self):
return self.log;
def load_previous_state(self):
if os.path.isfile(self.scoreboard_file):
with open(self.scoreboard_file, "rb") as pickle_file:
self.scoreboard = pickle.load(pickle_file);
if os.path.isfile(self.dailybet_file):
with open(self.dailybet_file, "rb") as pickle_file:
self.dailybet = pickle.load(pickle_file);
if os.path.isfile(self.log_file):
with open(self.log_file, "rb") as pickle_file:
self.log = pickle.load(pickle_file);
def save_current_state(self):
with open(self.scoreboard_file, "wb") as pickle_file:
pickle.dump(self.scoreboard, pickle_file);
with open(self.dailybet_file, "wb") as pickle_file:
pickle.dump(self.dailybet, pickle_file);
with open(self.log_file, "wb") as pickle_file:
pickle.dump(self.log, pickle_file);
def draw(self):
self.current_result = set();
while(len(self.current_result) < self.combination_length):
rd = random.randint(1, self.nb_numbers);
self.current_result.add(rd);
tmp_datetime = datetime.datetime.now();
self.log["last_draw"] = datetime.datetime(year=tmp_datetime.year, month = tmp_datetime.month, day = tmp_datetime.day, hour = self.draw_time.hour, minute = self.draw_time.minute) ;
#self.current_result = {1,2,3,8,33,2}; # todo remove!
def check_result(self):
self.draw(); # tirage
ret = "\U0001F3B2 Le tirage du {} est {}. \nBravo à".format(datetime.datetime.today().strftime('%d-%m-%Y'), self.current_result);
is_there_a_winner = False;
for key, value in self.dailybet.items():
tmp_nb_pt = len(self.current_result & value);
nb_pt = loto.pt_table[tmp_nb_pt];
if nb_pt > 0:
is_there_a_winner = True;
ret += "\n\t- {} avec {} point(s) ({} nombre(s) correct(s))".format(key.capitalize(), nb_pt, tmp_nb_pt)
if key in self.scoreboard.keys(): # on ajoute quand même les participants avec zero point.
self.scoreboard[key] += nb_pt;
else:
self.scoreboard[key] = nb_pt;
self.dailybet = {} ; # réinitialisation des paris ;)
if is_there_a_winner:
return ret;
else:
return "\U0001F3B2 Pas de vainqueurs aujourd'hui ({}) !\nLe tirage était le suivant : {}.".format(datetime.datetime.today().strftime('%d-%m-%Y'), self.current_result);
def bet(self, sender, proposition):
# check if proposition is well-formed
proposition = proposition.replace(" ", "");
if (proposition[0] != "(" or proposition[-1] != ")"):
return "";
proposition = proposition[1:-1];
proposition_array = proposition.split(",");
for i in proposition_array:
if not(i.isnumeric()):
return "" # On ne traite pas ce cas
if (len(proposition_array) != self.combination_length):
return "\U0001F3B2 La combinaison doit être de longueur {}.".format(self.combination_length);
proposition_array = [(int(i) if (int(i) <= self.nb_numbers) else 0) for i in proposition_array];
if (0 in proposition_array):
return "\U0001F3B2 Les valeurs doivent être comprises entre 1 et {}.".format(self.nb_numbers);
proposition_set = set(proposition_array);
if (len(proposition_set) != self.combination_length):
return "\U0001F3B2 Les propositions ne doivent pas contenir deux fois le même nombre."
# proposition is well-formed,
self.dailybet[sender] = proposition_set;
return "\U0001F3B2 La proposition {} de {} a bien été prise en compte.".format(self.dailybet[sender], sender.capitalize());
def get_dailybet(self):
ret = "\U0001F3B2 Joueurs Participants - Grille";
for key, value in self.dailybet.items():
ret = "{}\n\t- {}: {} ".format(ret, key.capitalize(), value);
return ret;
#todo mettre dans l'ordre croissant
def get_scoreboard(self):
medals_array = ["\U0001F947", "\U0001f948", "\U0001f949"] ;
ret = "\U0001F3B2 Tableau des Scores :";
cpt = 0 ;
for key_value in sorted(self.scoreboard.items(), key=lambda x: x[1], reverse=True):
ret = "{}\n\t- {}: {}".format(ret, key_value[0].capitalize(), key_value[1]);
if cpt < 3:
ret+= (" ({})".format(medals_array[cpt]));
cpt+=1;
return ret;
| [
"metairie.jeremy@gmail.com"
] | metairie.jeremy@gmail.com |
e578ca55d3417235a395630d3703b4a15be034e3 | 0d16ed8dffd7b951abd66bf895c254749ed8195a | /Lab4_Data_Structure__And_Iternation/List_questions/Three_largest_Four_smallest.py | d88b064b48df1c3232635c99c3140107e8c2ceb9 | [] | no_license | sanjiv576/LabExercises | 1cfea292e5b94537722b2caca42f350ab8fc7ab8 | ef5adb4fbcff28162fe9e3e80782172b93127b33 | refs/heads/master | 2023-07-22T08:13:04.771784 | 2021-08-17T12:23:21 | 2021-08-17T12:23:21 | 372,397,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | # Write a Python program to get the largest number from a list.
# Write a Python program to get the smallest number from a list.
ThreeFour = [23,11,5,12,4]
print(max(ThreeFour)) # max() gives the largest number of the list
print(min(ThreeFour)) # min() gives the smallest | [
"83968516+sanjiv576@users.noreply.github.com"
] | 83968516+sanjiv576@users.noreply.github.com |
fb2ac13cd6ae9f98927d9133160b287216fab5b2 | 2d7c21a793c8080a090ce8c9f05df38f6477c7c7 | /creator/referral_tokens/apps.py | 1e50dca4c827c703ff396ac486c4cdb4d6185f08 | [
"Apache-2.0"
] | permissive | kids-first/kf-api-study-creator | c40e0a8a514fd52a857e9a588635ef76d16d5bc7 | ba62b369e6464259ea92dbb9ba49876513f37fba | refs/heads/master | 2023-08-17T01:09:38.789364 | 2023-08-15T14:06:29 | 2023-08-15T14:06:29 | 149,347,812 | 3 | 0 | Apache-2.0 | 2023-09-08T15:33:40 | 2018-09-18T20:25:38 | Python | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class ReferralTokensConfig(AppConfig):
name = "referral_tokens"
| [
"xzhu.fg@gmail.com"
] | xzhu.fg@gmail.com |
667fbc214431aa1477babf4a3224675a2d8da21f | 63e3d4bfd14b1bc7eb0fdb735312dfa23210051e | /credentials.py | aeba98bbb4d23c799c98f05cc2981ff77b91febc | [] | no_license | Arcadonauts/BES-2018 | 1ec7331c4882603acdafa261a5c92de8d768fa42 | c73dc15e7178ea565cde24702ac69611e31324cc | refs/heads/master | 2023-02-21T03:48:34.418590 | 2022-01-08T05:15:42 | 2022-01-08T05:15:42 | 128,519,979 | 0 | 0 | null | 2023-02-02T03:47:29 | 2018-04-07T10:59:09 | JavaScript | UTF-8 | Python | false | false | 1,127 | py | from twitter import OAuth
from oauth2client.file import Storage
yieldcurve = OAuth(
token = "952297957788971008-kecr8AFjWcsTPMoXfsmmbnp7gbldcX2",
token_secret = "GMEDO1ZJEbPuI2LWSd1b7Kk95NymnreYQ6xrR7KdT7yXB",
#owner = "DailyYieldCurve",
#owner_id = "952297957788971008",
consumer_key = "DOWlrAasc9EE55AdBu273lqOu",
consumer_secret = "vw7LdB54TghtBLHNjS7E7GEUz7I05zhIQonOvnpSocMvmRKvtY"
)
tweemail = OAuth(
token = "959943269743554560-Yfvjnh9VyExbApCajMBfA2YMBADPb1h",
token_secret = "CyQRXxj4NWoJQawxKceUYmK3bXsvA9wGMYF55R7WS4tEU",
#owner = "DailyYieldCurve",
#owner_id = "952297957788971008",
consumer_key = "Zv1xXykj2ERarXluzWBQxhnWc",
consumer_secret = "RyRjeq4gXc0Ab3Ir6t03korCv6CPUw1TfF8n7qxcbV67ZjGjDI"
)
x = 'C:\\Users\\Nick\\Documents\\GitHub\\BES-2018\\credentials.py'
if __file__ == x or __file__ == (x+'c'):
home = 'C:/Users/Nick/Documents/GitHub/BES-2018/'
else:
home = '/home/NickFegley/mysite/'
json = home + 'tweetmail.json'
fegleyapi = Storage(json).get()
if not fegleyapi: # If at first you don't succeed...
fegleyapi = Storage(json).get() | [
"fegleynick@gmail.com"
] | fegleynick@gmail.com |
d614a2dc512cfe4f235594be6aaf24e0db8ac8fd | bc8b9ca228fb90ce3e0aefd53b135cdd68329caa | /telethon/events/chataction.py | 2927c8d0f0b65e052e3609fbb2fae46a45097883 | [
"MIT"
] | permissive | huangdehui2013/Telethon | 1147ce9acba4db087efa39514a7cab6276becb92 | dd954b8fbd1957844c8e241183764c3ced7698a9 | refs/heads/master | 2020-03-16T18:49:25.989083 | 2018-05-10T07:44:25 | 2018-05-10T07:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,657 | py | from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types, functions
@name_inner_event
class ChatAction(EventBuilder):
"""
Represents an action in a chat (such as user joined, left, or new pin).
"""
def build(self, update):
if isinstance(update, types.UpdateChannelPinnedMessage) and update.id == 0:
# Telegram does not always send
# UpdateChannelPinnedMessage for new pins
# but always for unpin, with update.id = 0
event = ChatAction.Event(types.PeerChannel(update.channel_id),
unpin=True)
elif isinstance(update, types.UpdateChatParticipantAdd):
event = ChatAction.Event(types.PeerChat(update.chat_id),
added_by=update.inviter_id or True,
users=update.user_id)
elif isinstance(update, types.UpdateChatParticipantDelete):
event = ChatAction.Event(types.PeerChat(update.chat_id),
kicked_by=True,
users=update.user_id)
elif (isinstance(update, (
types.UpdateNewMessage, types.UpdateNewChannelMessage))
and isinstance(update.message, types.MessageService)):
msg = update.message
action = update.message.action
if isinstance(action, types.MessageActionChatJoinedByLink):
event = ChatAction.Event(msg,
added_by=True,
users=msg.from_id)
elif isinstance(action, types.MessageActionChatAddUser):
event = ChatAction.Event(msg,
added_by=msg.from_id or True,
users=action.users)
elif isinstance(action, types.MessageActionChatDeleteUser):
event = ChatAction.Event(msg,
kicked_by=msg.from_id or True,
users=action.user_id)
elif isinstance(action, types.MessageActionChatCreate):
event = ChatAction.Event(msg,
users=action.users,
created=True,
new_title=action.title)
elif isinstance(action, types.MessageActionChannelCreate):
event = ChatAction.Event(msg,
created=True,
users=msg.from_id,
new_title=action.title)
elif isinstance(action, types.MessageActionChatEditTitle):
event = ChatAction.Event(msg,
users=msg.from_id,
new_title=action.title)
elif isinstance(action, types.MessageActionChatEditPhoto):
event = ChatAction.Event(msg,
users=msg.from_id,
new_photo=action.photo)
elif isinstance(action, types.MessageActionChatDeletePhoto):
event = ChatAction.Event(msg,
users=msg.from_id,
new_photo=True)
elif isinstance(action, types.MessageActionPinMessage):
# Telegram always sends this service message for new pins
event = ChatAction.Event(msg,
users=msg.from_id,
new_pin=msg.reply_to_msg_id)
else:
return
else:
return
event._entities = update._entities
return self._filter_event(event)
class Event(EventCommon):
"""
Represents the event of a new chat action.
Members:
action_message (`MessageAction <https://lonamiwebs.github.io/Telethon/types/message_action.html>`_):
The message invoked by this Chat Action.
new_pin (`bool`):
``True`` if there is a new pin.
new_photo (`bool`):
``True`` if there's a new chat photo (or it was removed).
photo (:tl:`Photo`, optional):
The new photo (or ``None`` if it was removed).
user_added (`bool`):
``True`` if the user was added by some other.
user_joined (`bool`):
``True`` if the user joined on their own.
user_left (`bool`):
``True`` if the user left on their own.
user_kicked (`bool`):
``True`` if the user was kicked by some other.
created (`bool`, optional):
``True`` if this chat was just created.
new_title (`str`, optional):
The new title string for the chat, if applicable.
unpin (`bool`):
``True`` if the existing pin gets unpinned.
"""
def __init__(self, where, new_pin=None, new_photo=None,
added_by=None, kicked_by=None, created=None,
users=None, new_title=None, unpin=None):
if isinstance(where, types.MessageService):
self.action_message = where
where = where.to_id
else:
self.action_message = None
super().__init__(chat_peer=where, msg_id=new_pin)
self.new_pin = isinstance(new_pin, int)
self._pinned_message = new_pin
self.new_photo = new_photo is not None
self.photo = \
new_photo if isinstance(new_photo, types.Photo) else None
self._added_by = None
self._kicked_by = None
self.user_added, self.user_joined, self.user_left,\
self.user_kicked, self.unpin = (False, False, False, False, False)
if added_by is True:
self.user_joined = True
elif added_by:
self.user_added = True
self._added_by = added_by
if kicked_by is True:
self.user_left = True
elif kicked_by:
self.user_kicked = True
self._kicked_by = kicked_by
self.created = bool(created)
self._user_peers = users if isinstance(users, list) else [users]
self._users = None
self._input_users = None
self.new_title = new_title
self.unpin = unpin
def respond(self, *args, **kwargs):
"""
Responds to the chat action message (not as a reply).
Shorthand for ``client.send_message(event.chat, ...)``.
"""
return self._client.send_message(self.input_chat, *args, **kwargs)
def reply(self, *args, **kwargs):
"""
Replies to the chat action message (as a reply). Shorthand for
``client.send_message(event.chat, ..., reply_to=event.message.id)``.
Has the same effect as ``.respond()`` if there is no message.
"""
if not self.action_message:
return self.respond(*args, **kwargs)
kwargs['reply_to'] = self.action_message.id
return self._client.send_message(self.input_chat, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the chat action message. You're responsible for checking
whether you have the permission to do so, or to except the error
otherwise. This is a shorthand for
``client.delete_messages(event.chat, event.message, ...)``.
Does nothing if no message action triggered this event.
"""
if self.action_message:
return self._client.delete_messages(self.input_chat,
[self.action_message],
*args, **kwargs)
@property
def pinned_message(self):
"""
If ``new_pin`` is ``True``, this returns the (:tl:`Message`)
object that was pinned.
"""
if self._pinned_message == 0:
return None
if isinstance(self._pinned_message, int) and self.input_chat:
r = self._client(functions.channels.GetMessagesRequest(
self._input_chat, [self._pinned_message]
))
try:
self._pinned_message = next(
x for x in r.messages
if isinstance(x, types.Message)
and x.id == self._pinned_message
)
except StopIteration:
pass
if isinstance(self._pinned_message, types.Message):
return self._pinned_message
@property
def added_by(self):
"""
The user who added ``users``, if applicable (``None`` otherwise).
"""
if self._added_by and not isinstance(self._added_by, types.User):
self._added_by =\
self._entities.get(utils.get_peer_id(self._added_by))
if not self._added_by:
self._added_by = self._client.get_entity(self._added_by)
return self._added_by
@property
def kicked_by(self):
"""
The user who kicked ``users``, if applicable (``None`` otherwise).
"""
if self._kicked_by and not isinstance(self._kicked_by, types.User):
self._kicked_by =\
self._entities.get(utils.get_peer_id(self._kicked_by))
if not self._kicked_by:
self._kicked_by = self._client.get_entity(self._kicked_by)
return self._kicked_by
@property
def user(self):
"""
The first user that takes part in this action (e.g. joined).
Might be ``None`` if the information can't be retrieved or
there is no user taking part.
"""
if self.users:
return self._users[0]
@property
def input_user(self):
"""
Input version of the ``self.user`` property.
"""
if self.input_users:
return self._input_users[0]
@property
def user_id(self):
"""
Returns the marked signed ID of the first user, if any.
"""
if self._user_peers:
return utils.get_peer_id(self._user_peers[0])
@property
def users(self):
"""
A list of users that take part in this action (e.g. joined).
Might be empty if the information can't be retrieved or there
are no users taking part.
"""
if not self._user_peers:
return []
if self._users is None:
have, missing = [], []
for peer in self._user_peers:
user = self._entities.get(utils.get_peer_id(peer))
if user:
have.append(user)
else:
missing.append(peer)
try:
missing = self._client.get_entity(missing)
except (TypeError, ValueError):
missing = []
self._users = have + missing
return self._users
@property
def input_users(self):
"""
Input version of the ``self.users`` property.
"""
if self._input_users is None and self._user_peers:
self._input_users = []
for peer in self._user_peers:
try:
self._input_users.append(self._client.get_input_entity(
peer
))
except (TypeError, ValueError):
pass
return self._input_users
@property
def user_ids(self):
"""
Returns the marked signed ID of the users, if any.
"""
if self._user_peers:
return [utils.get_peer_id(u) for u in self._user_peers]
| [
"totufals@hotmail.com"
] | totufals@hotmail.com |
16c9a46a2c9d733a15e6609b93e0c2dda7c13452 | 12a6a68913fa5772973af9b2b2c4c90bc0656a57 | /assignment03/mdp-simulator/ai982-mdp/utilities.py | 5ec736a82ac011e00107bc86d94110542ff535c6 | [] | no_license | homasms/AI_projects | 42b944166617f76a106edcafe7d940dd240be0b1 | c66c2e6c7e1965e6579b9ed849b7d6415c6e5e9a | refs/heads/master | 2023-06-01T05:03:27.160197 | 2021-06-10T14:28:36 | 2021-06-10T14:28:36 | 276,040,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | import enum
class Actions(enum.Enum):
N = 1
W = 2
S = 3
E = 4
EXIT = 5
def initialize_world_parameters(world_type):
if world_type == 'smallWorld':
return (3, 3), {(0, 2): 1, (1, 2): -1}
if world_type == 'largeWorld':
return (10, 10), {(0, 9): 1, (1, 9): -1}
else:
raise Exception("Wrong Entry.")
def initialize_mdp_parameters(width, height, exit_locations):
v_states = [[0 for i in range(0, width)] for j in range(height)] # Current step's V*(s) grid.
pre_v_states = [[0 for i in range(0, width)] for j in range(height)] # Last step's V*(s) grid.
policy = [[Actions.N for i in range(0, width)] for j in range(height)] # Current step's policy gird.
for exit_state, exit_reward in exit_locations.items():
exit_x, exit_y = exit_state
v_states[exit_x][exit_y] = exit_reward
pre_v_states[exit_x][exit_y] = exit_reward
policy[exit_x][exit_y] = Actions.EXIT
return v_states, pre_v_states, policy
| [
"homasemsarha@yahoo.com"
] | homasemsarha@yahoo.com |
dc2c1061b024a7d9902210c6ee216bf9908e7be1 | 1511bc3e1dac288855e0757f199e5f505afb8e6c | /Senior(2018-2019)/Python/Chapter 4/P4.3-B.py | 42a71b3597b3d64dcf32077b45bf2392ed09c958 | [] | no_license | jakelorah/highschoolprojects | a629fa36cc662c908371ad800b53bbdb2cc8390f | dfe8eec9894338933ce4eb46f180d7eeadd7e4d3 | refs/heads/main | 2023-02-26T03:01:55.570156 | 2021-01-31T19:05:41 | 2021-01-31T19:05:41 | 308,147,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | #Name: Jake Lorah
#Date: 10/18/2018
#Program Number: P4.3-B
#Program Description: This program prints every second letter of the string.
#B:
string = input("Please enter a string: ")
n = len(string)
for n in range (1, n, 2) :
print(string [n])
| [
"jlorah@highpoint.edu"
] | jlorah@highpoint.edu |
68b259649181c54eea9faebc337711ab016af534 | 5c4289608693609de3d755674cba53b77cbc4c69 | /Python_Study/2课堂练习/Python基础班/06_名片管理系统/cards_main.py | 32a8e9caa251e2f2c3000e3de1f3a1e6e5ad5bcf | [
"Apache-2.0"
] | permissive | vipliujunjie/HouseCore | 95892e632f840f22715d08467d6610195d562261 | e9fa5ebc048cbede7823ac59a011a554bddf8674 | refs/heads/master | 2021-02-05T13:09:43.962224 | 2020-02-28T14:46:26 | 2020-02-28T14:46:26 | 243,783,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #! /Library/Frameworks/Python.framework/Versions/3.7/bin/python3
import cards_tools
# 无限循环 由用户决定什么时候退出循环
while True:
# TODO(刘俊杰) 显示功能菜单
cards_tools.show_menu()
action_str = input("请输入希望执行的操作:")
print("您选择的操作是【%s】" % action_str)
# [1,2,3] 针对名片的操作
if action_str in ["1", "2", "3"]: # 判断在指定列表内
# 新增名片
if action_str == "1":
cards_tools.new_card()
# pass
# 显示全部
if action_str == "2":
cards_tools.show_all()
# pass
# 查询名片
if action_str == "3":
cards_tools.search_card()
# pass
# pass
# 0 退出系统
elif action_str == "0":
# 如果在开发程序时,不希望立刻编写分支内部的代码
# 可以使用 pass 关键字,表示一个占位符,能够保证程序的代码结构正确!
# 程序运行时,pass 关键字不会执行任何的操作
print("\n欢迎再次使用【名片管理系统】")
break
# pass
# 输入其他内容提示用户错误
else:
print("您输入的不正确,请从新选择")
| [
"1520997065@qq.com"
] | 1520997065@qq.com |
5accaca4d79de6e89aeddb67971165259bec460b | 7a866c210bba93fa33e02305e221338541d6ec9b | /Direction JOL/Timed JOL/Output/Merged/EX2_conf_plots.py | 912af24af1f000dedf77b86d611584c4bba457a1 | [] | no_license | npm27/Spring-2019-Projects | afbb6d3816e097b58f7d5032bc8d7563536a232a | 52e0c1c4dc3de2e0399e5391dd2c8aff56754c1c | refs/heads/master | 2021-12-20T01:01:11.218779 | 2021-12-08T14:49:18 | 2021-12-08T14:49:18 | 168,214,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | ##set up
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dat = pd.read_csv("Delayed conf.csv") #JUST NEED TO ADD DATA
dat['diff'] = dat['Upper'].sub(dat['Lower'])
dat['diff2'] = dat['diff'].div(2)
##make subsets
datF = dat[dat['Direction'] == 'F']
datB = dat[dat['Direction'] == 'B']
datS = dat[dat['Direction'] == 'S']
datU = dat[dat['Direction'] == 'U']
##set up the initial plot
fig = plt.figure()
fig.set_size_inches(11,8)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
dot_line = np.arange(100)
major_ticks = np.arange(0, 101, 20)
fig.text(0.5, 0.04, 'JOL Rating', ha='center', fontsize=18)
fig.text(0.04, 0.5, '% Correct Recall', va='center', rotation='vertical', fontsize=18)
##forward
x1 = datF.JOL_Bin.values
y1 = datF.Average.values
ax1.plot(dot_line, 'k--')
ax1.plot(x1, y1, marker = '.', color = 'k')
ax1.set_xticks(major_ticks)
ax1.set_yticks(major_ticks)
ax1.set_title("Forward", fontsize = 16)
ax1.errorbar(x1, y1, yerr=(datF['diff2']), fmt='none', c= 'k', capsize=5)
##backward
x2 = datB.JOL_Bin.values
y2 = datB.Average.values
ax2.plot(dot_line, 'k--')
ax2.plot(x2, y2, marker = '.', color = 'k')
ax2.set_xticks(major_ticks)
ax2.set_yticks(major_ticks)
ax2.set_title("Backward", fontsize = 16)
ax2.errorbar(x2, y2, yerr=(datB['diff2']), fmt='none', c= 'k', capsize=5)
##symmetrical
x3 = datS.JOL_Bin.values
y3 = datS.Average.values
ax3.plot(dot_line, 'k--')
ax3.plot(x3, y3, marker = '.', color = 'k')
ax3.set_xticks(major_ticks)
ax3.set_yticks(major_ticks)
ax3.set_title("Symmetrical", fontsize = 16)
ax3.errorbar(x3, y3, yerr=(datS['diff2']), fmt='none', c= 'k', capsize=5)
##unrelated
x4 = datU.JOL_Bin.values
y4 = datU.Average.values
ax4.plot(dot_line, 'k--')
ax4.plot(x4, y4, marker = '.', color = 'k')
ax4.set_xticks(major_ticks)
ax4.set_yticks(major_ticks)
ax4.set_title("Unrelated", fontsize = 16)
ax4.errorbar(x4, y4, yerr=(datU['diff2']), fmt='none', c= 'k', capsize=5)
##save figure
#fig.savefig('Plot2_smoothed.png')
| [
"35810320+npm27@users.noreply.github.com"
] | 35810320+npm27@users.noreply.github.com |
8dcb22ca5cfbfea727ccfbf086dcd7217f807a28 | e5487abf1270a8b14f003c444b199483c6d825d2 | /Code/lambda/skill_env/bin/rst2s5.py | 28d30c28e7838663dfad221a9f679ae1e398e04c | [] | no_license | tmoessing/Coin-Collector | 8828c789da2fa7a46fbfc741487d1a6dc533c7c8 | c5e9dccee6ed393c81db7350bdab89111033ac33 | refs/heads/master | 2020-11-26T01:23:03.131189 | 2019-12-18T21:02:07 | 2019-12-18T21:02:07 | 223,520,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!c:\users\tmoes\appdata\local\programs\python\python36\python.exe
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| [
"tmoessing@gmail.com"
] | tmoessing@gmail.com |
202bff332bc7441a918f1a1ed187ec533a4b32cf | 04bae28a2eefc1db77097a94af558d3df6a1e713 | /20191009/Deblur_GAN/deblurgan/losses.py | c5c6dfc84d0af0c2ed18a1f853911c052d194acb | [] | no_license | Armida220/DIPHomeWork | 8fc56df852fdedf26ef852ca2cc62f7be4067005 | 1753c1ad9783aba8a5402c98421d1126d5081aaf | refs/heads/master | 2020-11-26T06:10:24.752413 | 2019-10-31T08:11:08 | 2019-10-31T08:11:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | import keras.backend as K
from keras.applications.vgg16 import VGG16
from keras.models import Model
import numpy as np
# Note the image_shape must be multiple of patch_shape
# image_shape = (256, 256, 3)
image_shape = (1024, 1024, 3)
def l1_loss(y_true, y_pred):
return K.mean(K.abs(y_pred - y_true))
def perceptual_loss_100(y_true, y_pred):
return 100 * perceptual_loss(y_true, y_pred)
def perceptual_loss(y_true, y_pred):
vgg = VGG16(include_top=False, weights='imagenet', input_shape=image_shape)
loss_model = Model(inputs=vgg.input, outputs=vgg.get_layer('block3_conv3').output)
loss_model.trainable = False
return K.mean(K.square(loss_model(y_true) - loss_model(y_pred)))
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true*y_pred)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
gradients = K.gradients(y_pred, averaged_samples)[0]
gradients_sqr = K.square(gradients)
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
gradient_penalty = K.square(1 - gradient_l2_norm)
return K.mean(gradient_penalty)
| [
"754037927@qq.com"
] | 754037927@qq.com |
6557c40afa926f83bc0721834c9ee2d158e8fd46 | 0d0988d2afeba6539ab3802f0cac9a25ff862076 | /metodosConjuntosSTR.py | f42cea1cbd2300014f65169bbb193579f1ddd1e4 | [] | no_license | alexisflores99/Repo-for-Python | a5e967c17c0938dc5a8e91db2a6ea7302e2cd109 | d4339edcee3d2b1f57129df8059eb32c41ce4864 | refs/heads/master | 2023-04-18T18:43:06.864806 | 2021-05-02T04:05:41 | 2021-05-02T04:05:41 | 363,562,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # set_conjunto1 = set({1,2,3,4})
# set_conjunto1.add(5)
# print(set_conjunto1)
#************************************
# set_conjunto = set({1.0, "Auto", True})
# otro_conjunto = set_conjunto.copy()
# set_conjunto == otro_conjunto
# print(otro_conjunto)
#************************************
# paquete = set({"Hola",2 ,3 ,4 })
# paquete.discard("Hola")
# print(paquete)
#************************************
paquete = set({"Hola" ,2, 3, 4})
paquete.remove("Hola")
print(paquete)
| [
"hector.flores6@unmsm.edu.pe"
] | hector.flores6@unmsm.edu.pe |
9e6666d6b99be4eaa286ee65de5946bc52dde225 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_managed_instance_vulnerability_assessments_operations.py | b81f57346b44a5fb1b5e3a63d654f6f168e9144d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 21,802 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_instance_vulnerability_assessments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_instance_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedInstanceVulnerabilityAssessmentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`managed_instance_vulnerability_assessments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Gets the managed instance's vulnerability assessment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessment] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: _models.ManagedInstanceVulnerabilityAssessment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Required.
:type parameters: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: Union[_models.ManagedInstanceVulnerabilityAssessment, IO],
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedInstanceVulnerabilityAssessment")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
**kwargs: Any
) -> None:
"""Removes the managed instance's vulnerability assessment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@distributed_trace
def list_by_instance(
self, resource_group_name: str, managed_instance_name: str, **kwargs: Any
) -> AsyncIterable["_models.ManagedInstanceVulnerabilityAssessment"]:
"""Gets the managed instance's vulnerability assessment policies.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessments is defined. Required.
:type managed_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedInstanceVulnerabilityAssessment or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessmentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_instance_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_instance.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_instance.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments"
}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
6affbb678c0509cdb3dd5674c2edb67dd98dcb75 | e53d920751b86ae09a16913c7531a806c334265f | /mmdet3d/ops/furthest_point_sample/points_sampler.py | 9a3bd2ae42d625cc19355492246d4967a8e96b92 | [
"Apache-2.0"
] | permissive | destinyls/mmdetection3d | e959e4a1a9316b385cec73a654bc8a016cb43a02 | f2247c2471dbb429512353b3f0802f839655dd16 | refs/heads/master | 2023-03-20T21:12:32.105859 | 2023-01-24T01:45:12 | 2023-01-24T01:45:12 | 279,744,471 | 1 | 1 | Apache-2.0 | 2020-07-15T02:42:54 | 2020-07-15T02:42:53 | null | UTF-8 | Python | false | false | 5,373 | py | import torch
from mmcv.runner import force_fp32
from torch import nn as nn
from typing import List
from .furthest_point_sample import (furthest_point_sample,
furthest_point_sample_with_dist)
from .utils import calc_square_dist
def get_sampler_type(sampler_type):
"""Get the type and mode of points sampler.
Args:
sampler_type (str): The type of points sampler.
The valid value are "D-FPS", "F-FPS", or "FS".
Returns:
class: Points sampler type.
"""
if sampler_type == 'D-FPS':
sampler = DFPS_Sampler
elif sampler_type == 'F-FPS':
sampler = FFPS_Sampler
elif sampler_type == 'FS':
sampler = FS_Sampler
else:
raise ValueError('Only "sampler_type" of "D-FPS", "F-FPS", or "FS"'
f' are supported, got {sampler_type}')
return sampler
class Points_Sampler(nn.Module):
"""Points sampling.
Args:
num_point (list[int]): Number of sample points.
fps_mod_list (list[str]: Type of FPS method, valid mod
['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
F-FPS: using feature distances for FPS.
D-FPS: using Euclidean distances of points for FPS.
FS: using F-FPS and D-FPS simultaneously.
fps_sample_range_list (list[int]): Range of points to apply FPS.
Default: [-1].
"""
def __init__(self,
num_point: List[int],
fps_mod_list: List[str] = ['D-FPS'],
fps_sample_range_list: List[int] = [-1]):
super(Points_Sampler, self).__init__()
# FPS would be applied to different fps_mod in the list,
# so the length of the num_point should be equal to
# fps_mod_list and fps_sample_range_list.
assert len(num_point) == len(fps_mod_list) == len(
fps_sample_range_list)
self.num_point = num_point
self.fps_sample_range_list = fps_sample_range_list
self.samplers = nn.ModuleList()
for fps_mod in fps_mod_list:
self.samplers.append(get_sampler_type(fps_mod)())
self.fp16_enabled = False
@force_fp32()
def forward(self, points_xyz, features):
"""forward.
Args:
points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
features (Tensor): (B, C, N) Descriptors of the features.
Return:
Tensor: (B, npoint, sample_num) Indices of sampled points.
"""
indices = []
last_fps_end_index = 0
for fps_sample_range, sampler, npoint in zip(
self.fps_sample_range_list, self.samplers, self.num_point):
assert fps_sample_range < points_xyz.shape[1]
if fps_sample_range == -1:
sample_points_xyz = points_xyz[:, last_fps_end_index:]
sample_features = features[:, :, last_fps_end_index:] if \
features is not None else None
else:
sample_points_xyz = \
points_xyz[:, last_fps_end_index:fps_sample_range]
sample_features = \
features[:, :, last_fps_end_index:fps_sample_range] if \
features is not None else None
fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,
npoint)
indices.append(fps_idx + last_fps_end_index)
last_fps_end_index += fps_sample_range
indices = torch.cat(indices, dim=1)
return indices
class DFPS_Sampler(nn.Module):
"""DFPS_Sampling.
Using Euclidean distances of points for FPS.
"""
def __init__(self):
super(DFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with D-FPS."""
fps_idx = furthest_point_sample(points.contiguous(), npoint)
return fps_idx
class FFPS_Sampler(nn.Module):
"""FFPS_Sampler.
Using feature distances for FPS.
"""
def __init__(self):
super(FFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with F-FPS."""
assert features is not None, \
'feature input to FFPS_Sampler should not be None'
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(
features_for_fps, features_for_fps, norm=False)
fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
return fps_idx
class FS_Sampler(nn.Module):
"""FS_Sampling.
Using F-FPS and D-FPS simultaneously.
"""
def __init__(self):
super(FS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with FS_Sampling."""
assert features is not None, \
'feature input to FS_Sampler should not be None'
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(
features_for_fps, features_for_fps, norm=False)
fps_idx_ffps = furthest_point_sample_with_dist(features_dist, npoint)
fps_idx_dfps = furthest_point_sample(points, npoint)
fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
return fps_idx
| [
"noreply@github.com"
] | destinyls.noreply@github.com |
6388a2abfd90b416b71fb102e7f2bdc93ad8f6ca | 23da742e7b7fd998f499abda3d26d4a8689f681f | /split_list.py | 3b3f52e747e063226bbd99bd7574ca6f7c7cdadf | [
"Apache-2.0"
] | permissive | JustDoItGit/daily_utils | 57c25f7beb57f887d69e1244ecac518e0f357d63 | 503937f284bca021e10a11d846dfae2fcae808d8 | refs/heads/master | 2023-01-22T04:34:31.114116 | 2020-12-04T05:28:30 | 2020-12-04T05:28:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | def split_list(ls, num):
"""
拆分list
:param ls: 带截取的列表 [0, 1, 2, 3, 4, 5, 6]
:param num: 除最后一个列表之外其他列表长度 3
:return: 所有拆分的列表 [[0, 1, 2], [3, 4, 5], [6]]
"""
a = len(ls)
if a <= num:
return [ls]
quotient = a // num # 商
remainder = a % num # 余数
res_split = []
for i in range(quotient):
res_split.append(ls[num * i: num * (i + 1)])
if remainder != 0:
res_split.append(ls[num * quotient: num * quotient + remainder])
# 方法2
# res_split = [ls[i:i + num] for i in range(0, len(ls), num)]
return res_split
| [
"noreply@github.com"
] | JustDoItGit.noreply@github.com |
78f30f4848d93d075ba4bebcbc215dde6d0351fd | f777be3ca7cfe3c4ac24a1f962ea4c4d4e0aada3 | /testing.py | b672b936e7b0c0fe9bbfe3fdaf65b82198dabdb3 | [] | no_license | rogelio08/text_based_story | ab7419865b34c6c1b47f87fc768a02eddcc5c2ea | e7c646335ead1cf552e382dff3025157b4540ecf | refs/heads/master | 2023-07-02T03:49:46.080875 | 2021-08-08T05:07:09 | 2021-08-08T05:07:09 | 393,861,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,411 | py | import sys
import time
#I changed this
a = 2
b = 0.2 # slower time between characters getting spaced out
c = 0.08 # quicker time between characters getting printed
def intro():
print("\n")
time.sleep(a)
string_1 = '"Very well, remember to answer truthfully... Or don\'t, either way you\'ll provide valuable data"\n'
for character in string_1:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(b)
time.sleep(1)
print("With that the voice leaves you and the lights turn off for a moment\nWhen they turn on again you find a table has appeared")
time.sleep(a)
print("On the table you see a key on end of the table and a hammer on the other\nThe voice chimes in again")
s = '"Please choose the item that has most value to you..."\n'
for character in s:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(b)
time.sleep(1)
print()
first_path = input("which item do you pick? (key = 1 / hammer = 2): ")
if first_path == '1':
print()
path1()
elif first_path == '2':
print()
path2()
else:
print("Unknown path detected")
def path1():
time.sleep(a)
print("\nDespite how strange and unnerving the situation is, you decided that perhaps the key might be important for something down the line.")
time.sleep(a)
print("You rationalize that there has to be some kind of logic as to why you're here and what's the meaning behind all this.")
time.sleep(a)
print("After picking up the key the lights turn off and the voice speaks up.")
s = '"Interesting choice, you have no idea where you are or if that key even fits anywhere yet you still chose it?"'
for character in s:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(c)
time.sleep(1)
print()
s2 = '"Let us find out whether your choice will be the key to freedom or the key to your death."'
for character in s2:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(b)
time.sleep(1)
print()
print("When the lights turn on again the table is gone, but now the room includes two doors.\n")
time.sleep(a)
print("The first door appears to have seen better days, mots of its color has faded while several cracks could be seen in the wood.")
time.sleep(a)
print("The second door leaves you stunned, you recognize the door as the same one on the front of your home!")
door_choice = input("\nWhich door do you use the key on? (1/2): ")
if door_choice == "1":
print()
path1_1()
elif door_choice == '2':
print()
path1_2()
def path1_1():
print("\nWhile the familiar door is calling out to you, you realize that such an obvious choice must be a trap. So going against all your instincts for survival you hesitantely unlock the worn down door and head inside.")
time.sleep(a)
print("After exiting the concrete prison you find yourself somewhere damp, dark, and cold. Using your hands to feel around you deduce that you must be in some sort of cave.")
time.sleep(a)
print("Realizing that the door is no longer behind you and left with no other options you decide to feel your way to what is hopefully an exit.")
time.sleep(a)
print("After wandering around in the dark you notice small beams of light that eventually lead to an opening in the cave, and before you know you're outside in a forest.")
time.sleep(a)
print("Out in the distance you notice smoke from a what could be a campfire but at the same time you have no idea if you've actually escaped or not.")
time.sleep(a)
print("Armed with the determination to survive, you venture towards the smoke.")
def path1_2():
print("\nNot wanting to spend another moment in the room you rush over to the familiar door and check to see if they key works.")
time.sleep(a)
print("By some miracle the key fits and you're able to open the door\nRushing through the door you find yourself in your own living room, and breathing a sigh of relief.")
time.sleep(a)
print("Things however, are not as they seem. You begin to notice that your home is eerily quiet, with no traces of your family anywhere.")
time.sleep(a)
print("As you search through your home your fears and only confirmed, none of your family members are anywhere!\nDesperate for answers you go back through the front door but are shocked by the result.")
time.sleep(a)
print("Instead of making it back to the isolated room your find yourself in your neighborhood, only there's no neighbors in sight. Moreover the normally busy interstate freeway you live next to is unusually quiet.")
time.sleep(a)
print("While trying to process what's happening you realize that if the door was in fact the one to your home how did they key you picked up unlock it if you've never seen a key like it?")
time.sleep(a)
print("Trying to remain optimistic, you figure there has to be someone around. And so you you go off in search of survivors that don't exist, forever wandering the hollow shell of the world you once knew.")
def path2():
time.sleep(a)
print("\nGiven the situation you're in, you can't rule out the possibility that this is all some kind of twisted game. Thus you reason that it's in your best interest to have some kind of weapon.")
time.sleep(a)
print("Besides, who knows if the key is meant to throw you off from choosing a multi-purpose tool? Not to mention you could theoretically open any lock using the hammer if you're smart about it.")
time.sleep(a)
print("Feeling satisfied you pick up the hammer, soon after the lights turn off and the voice could be heard again.\n")
s = '"What an interesting choice, while it\'s clever to be cautious in your position choosing what could be considered a weapon does seem rather barbaric. Though that\'s nothing new to humans."'
for character in s:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(c)
time.sleep(1)
print()
s2 = '"You made a bold choice, let\'s find out whether you have the dexterity to justify such an option."'
for character in s2:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(c)
time.sleep(1)
print()
print("Soon the lights turn on and you notice the table and key is gone but you're not interested in that. What has your attention now is the 500 pound apex preadator that occupies the room with.")
time.sleep(a)
print("With a low growl, the spontaneous bear is sizng you up. It's at this moment when your adrenaline kicks in and you're given a few breif seconds to form a plan of attack.")
time.sleep(a)
print("You narrow down to your options to two choices: 1) Use your adrenaline to take on the bear in a battle to the death or 2) Throw the hammer towards the one lightbulb in the room and use the darkness to hide and wait it out.")
bear_choice = input("\nHow do you go about dealing with the bear? (1/2): ")
if bear_choice == '1':
print()
path2_1()
else:
print()
path2_2()
def path2_1():
print("\nDespite feeling panicked and afraid for your life you decide to muster up all the courage you have and challenge the bear for the right to live.")
time.sleep(a)
print("With a war cry you rush the bear ad the bear responds with a roar of its own and stands on two feet in order to strike you down.")
time.sleep(a)
print("Seeing this you fling yourself to the right in order to dodge the potentially fatal blow and as the bear crashes its paws down and turns to face you, you get in a lucky swing and manage to strike the bear near it's eye.")
time.sleep(a)
print("With a roar of pain the bear backs off. You can't believe it, you just might be able to pull this off! Is what you were thinking before you realized that you didn't completely dodge the first attack.")
time.sleep(a)
print("Looking down you realize you see an unsightly slash on the left side of your abdomen and while attempting to stop the bleeding the last of your adrenaline fades as the bear recovers.")
time.sleep(a)
print("Your last thoughts as you see the bear closing in for the finishing move were about how people who don't consider bears as apex predators have never fought one.")
def path2_2():
print("\nUnderstanding the fact that under the laws of nature no human could ever beat a grown bear with just a hammer in an enclosed space you decide to use your higher level intelligence to your advantage.")
time.sleep(a)
print("As the bear prepares to attack your quickly throw your hammer at the dim lightbulb hanging from the ceiling, shattering it and engulfing the room in darkness.")
time.sleep(a)
print("At first your gamble seems to pay off as the bear's roars turn from aggressive to confused at the lack of vision.\nAs you hide in the corner of the now dark room a terrifying thought hits you.")
time.sleep(a)
print("Not only are you in a small room but bears don't exactly have to rely on sight alone. Sure enough, the bear begins to compose itself and soon begins sniffing the air.")
time.sleep(a)
print("You could only cower in horror and wait for your inevitable death as you curse your own lack of foresight")
print()
print()
print(" #######################")
print(" # #")
print(" # Title Card #")
print(" # #")
print(" #######################")
print()
print()
time.sleep(a)
print("You find yourself in a dim, concrete room with only a single lightbulb hanging from the ceiling")
time.sleep(a)
print("Before you are able to asses your surroundings a monotone voice could be heard")
time.sleep(a)
print()
start_game = input("Would you like to start the game? (Y/N): ")
if start_game == 'n' or start_game == 'N':
print("Understood, subject #[REDACTED] does not wish to participate in the experiment. Bringing in the next subject...")
elif start_game == 'y' or start_game == 'Y':
intro()
else:
print("Answer does not compute, try again") | [
"noreply@github.com"
] | rogelio08.noreply@github.com |
ad42586e96c02a379336285a2bc1b60cb0230dec | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/containerinstance/v20180401/container_group.py | 393f32a489204ca350e64cfea46921dc0a2db827 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,452 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ContainerGroupArgs', 'ContainerGroup']
@pulumi.input_type
class ContainerGroupArgs:
def __init__(__self__, *,
containers: pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]],
os_type: pulumi.Input[Union[str, 'OperatingSystemTypes']],
resource_group_name: pulumi.Input[str],
container_group_name: Optional[pulumi.Input[str]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]] = None,
ip_address: Optional[pulumi.Input['IpAddressArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]] = None):
"""
The set of arguments for constructing a ContainerGroup resource.
:param pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]] containers: The containers within the container group.
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The operating system type required by the containers in the container group.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] container_group_name: The name of the container group.
:param pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]] image_registry_credentials: The image registry credentials by which the container group is created from.
:param pulumi.Input['IpAddressArgs'] ip_address: The IP address type of the container group.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']] restart_policy: Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]] volumes: The list of volumes that can be mounted by containers in this container group.
"""
pulumi.set(__self__, "containers", containers)
pulumi.set(__self__, "os_type", os_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if container_group_name is not None:
pulumi.set(__self__, "container_group_name", container_group_name)
if image_registry_credentials is not None:
pulumi.set(__self__, "image_registry_credentials", image_registry_credentials)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if location is not None:
pulumi.set(__self__, "location", location)
if restart_policy is not None:
pulumi.set(__self__, "restart_policy", restart_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter
def containers(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]]:
"""
The containers within the container group.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Input[Union[str, 'OperatingSystemTypes']]:
"""
The operating system type required by the containers in the container group.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: pulumi.Input[Union[str, 'OperatingSystemTypes']]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="containerGroupName")
def container_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the container group.
"""
return pulumi.get(self, "container_group_name")
@container_group_name.setter
def container_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_group_name", value)
@property
@pulumi.getter(name="imageRegistryCredentials")
def image_registry_credentials(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]]:
"""
The image registry credentials by which the container group is created from.
"""
return pulumi.get(self, "image_registry_credentials")
@image_registry_credentials.setter
def image_registry_credentials(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]]):
pulumi.set(self, "image_registry_credentials", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input['IpAddressArgs']]:
"""
The IP address type of the container group.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input['IpAddressArgs']]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]]:
"""
Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
"""
return pulumi.get(self, "restart_policy")
@restart_policy.setter
def restart_policy(self, value: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]]):
pulumi.set(self, "restart_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]]:
"""
The list of volumes that can be mounted by containers in this container group.
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]]):
pulumi.set(self, "volumes", value)
class ContainerGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_group_name: Optional[pulumi.Input[str]] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]]] = None,
ip_address: Optional[pulumi.Input[pulumi.InputType['IpAddressArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OperatingSystemTypes']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]]] = None,
__props__=None):
"""
A container group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_group_name: The name of the container group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]] containers: The containers within the container group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]] image_registry_credentials: The image registry credentials by which the container group is created from.
:param pulumi.Input[pulumi.InputType['IpAddressArgs']] ip_address: The IP address type of the container group.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The operating system type required by the containers in the container group.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']] restart_policy: Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]] volumes: The list of volumes that can be mounted by containers in this container group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A container group.
:param str resource_name: The name of the resource.
:param ContainerGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_group_name: Optional[pulumi.Input[str]] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]]] = None,
ip_address: Optional[pulumi.Input[pulumi.InputType['IpAddressArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OperatingSystemTypes']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerGroupArgs.__new__(ContainerGroupArgs)
__props__.__dict__["container_group_name"] = container_group_name
if containers is None and not opts.urn:
raise TypeError("Missing required property 'containers'")
__props__.__dict__["containers"] = containers
__props__.__dict__["image_registry_credentials"] = image_registry_credentials
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["location"] = location
if os_type is None and not opts.urn:
raise TypeError("Missing required property 'os_type'")
__props__.__dict__["os_type"] = os_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["restart_policy"] = restart_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["volumes"] = volumes
__props__.__dict__["instance_view"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerinstance/v20180401:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20170801preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20170801preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20171001preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20171001preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20171201preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20171201preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180201preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180201preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180601:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180601:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180901:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180901:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20181001:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20181001:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20191201:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20191201:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20201101:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20201101:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210301:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210301:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210701:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210701:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210901:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210901:ContainerGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ContainerGroup, __self__).__init__(
'azure-native:containerinstance/v20180401:ContainerGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ContainerGroup':
"""
Get an existing ContainerGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ContainerGroupArgs.__new__(ContainerGroupArgs)
__props__.__dict__["containers"] = None
__props__.__dict__["image_registry_credentials"] = None
__props__.__dict__["instance_view"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["os_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["restart_policy"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["volumes"] = None
return ContainerGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Sequence['outputs.ContainerResponse']]:
"""
The containers within the container group.
"""
return pulumi.get(self, "containers")
@property
@pulumi.getter(name="imageRegistryCredentials")
def image_registry_credentials(self) -> pulumi.Output[Optional[Sequence['outputs.ImageRegistryCredentialResponse']]]:
"""
The image registry credentials by which the container group is created from.
"""
return pulumi.get(self, "image_registry_credentials")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> pulumi.Output['outputs.ContainerGroupResponseInstanceView']:
"""
The instance view of the container group. Only valid in response.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional['outputs.IpAddressResponse']]:
"""
The IP address type of the container group.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[str]:
"""
The operating system type required by the containers in the container group.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the container group. This only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> pulumi.Output[Optional[str]]:
"""
Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
"""
return pulumi.get(self, "restart_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def volumes(self) -> pulumi.Output[Optional[Sequence['outputs.VolumeResponse']]]:
"""
The list of volumes that can be mounted by containers in this container group.
"""
return pulumi.get(self, "volumes")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
468ec6b362681d9a3018b5f0182ef31622ef30b1 | 1b0a729f6e20c542a6370785a49c181c0675e334 | /main.py | 35fb3f77ad0ea393411e9e0c57d85315d85bd310 | [] | no_license | fans656/mint-dev | 68125c4b41ab64b20d54a2b19e8bf0179dc4636b | 408f6f055670b15a3f3ee9c9ec086b1090cce372 | refs/heads/master | 2021-05-04T11:43:44.740116 | 2016-09-07T13:43:44 | 2016-09-07T13:43:44 | 45,515,119 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from mint import *
from mint.protocols.test import Retransmit
a, b, c = Host(), Host(), Host()
s = Switch()
link(a, s.tips[0], 1)
link(b, s.tips[1], 2)
#link(c, s.tips[2], 3)
a += Retransmit()
a.send('hi')
#b.send('me').at(5)
start()
| [
"fans656@yahoo.com"
] | fans656@yahoo.com |
47b2fcaa1e74c97b42be077420a4335f38b24f8d | a7ff1ba9437204454c6b8639e99b007393c64118 | /synapse/tools/aha/enroll.py | a643a485268842bbc531afab92dd9b5e8bf84112 | [
"Apache-2.0"
] | permissive | vishalbelsare/synapse | 67013933db31ac71a4074b08a46b129774f63e47 | a418b1354b2f94e32644ede612c271a6c362ccae | refs/heads/master | 2023-09-01T10:45:34.439767 | 2022-05-13T21:07:20 | 2022-05-13T21:07:20 | 164,022,574 | 0 | 0 | Apache-2.0 | 2022-05-15T07:45:07 | 2019-01-03T21:01:32 | Python | UTF-8 | Python | false | false | 2,609 | py | import os
import sys
import asyncio
import argparse
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.certdir as s_certdir
descr = '''
Use a one-time use key to initialize your AHA user enrivonment.
Examples:
python -m synapse.tools.aha.register tcp://aha.loop.vertex.link:27272/b751e6c3e6fc2dad7a28d67e315e1874
'''
async def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser(prog='provision', description=descr)
pars.add_argument('onceurl', help='The one-time use AHA user enrollment URL.')
opts = pars.parse_args(argv)
async with s_telepath.withTeleEnv():
certpath = s_common.getSynDir('certs')
yamlpath = s_common.getSynPath('telepath.yaml')
teleyaml = s_common.yamlload(yamlpath)
if teleyaml is None:
teleyaml = {}
teleyaml.setdefault('version', 1)
teleyaml.setdefault('aha:servers', ())
s_common.gendir(certpath)
certdir = s_certdir.CertDir(path=certpath)
async with await s_telepath.openurl(opts.onceurl) as prov:
userinfo = await prov.getUserInfo()
ahaurls = userinfo.get('aha:urls')
ahauser = userinfo.get('aha:user')
ahanetw = userinfo.get('aha:network')
username = f'{ahauser}@{ahanetw}'
capath = certdir.getCaCertPath(ahanetw)
if capath is not None:
os.path.unlink(capath)
byts = await prov.getCaCert()
capath = certdir.saveCaCertByts(byts)
outp.printf(f'Saved CA certificate: {capath}')
keypath = certdir.getUserKeyPath(username)
if keypath is not None:
os.path.unlink(keypath)
crtpath = certdir.getUserCertPath(username)
if crtpath is not None:
os.path.unlink(keypath)
xcsr = certdir.genUserCsr(username)
byts = await prov.signUserCsr(xcsr)
crtpath = certdir.saveUserCertByts(byts)
outp.printf(f'Saved user certificate: {crtpath}')
ahaurls = s_telepath.modurl(ahaurls, user=ahauser)
if ahaurls not in teleyaml.get('aha:servers'):
outp.printf('Updating known AHA servers')
servers = list(teleyaml.get('aha:servers'))
servers.append(ahaurls)
teleyaml['aha:servers'] = servers
s_common.yamlsave(teleyaml, yamlpath)
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
40d836471602038f8e490438807b48014491d9e2 | df97d5b25d40b54e0714ed9c0a6dd7a579011e2e | /mikadocms/flikr_grabber.py | 966050a532ec3be0269d2f1bc60375d21d2ae39b | [] | no_license | mikadosoftware/mikadoCMS | 90ac1910b06f32bc3e808d1df656ba38a30e781c | 7bb1ca4f66b74d4529a601540e1bf469f44d3b01 | refs/heads/master | 2021-01-17T00:20:34.489198 | 2018-06-13T15:27:53 | 2018-06-13T15:27:53 | 8,103,422 | 0 | 0 | null | 2013-05-03T23:07:59 | 2013-02-08T23:27:27 | JavaScript | UTF-8 | Python | false | false | 2,740 | py | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
### Copyright Paul Brian 2013
# This program is licensed, without under the terms of the
# GNU General Public License version 2 (or later). Please see
# LICENSE.txt for details
###
"""
:author: paul@mikadosoftware.com <Paul Brian>
Flikr.com provides a useful outlet for using photographs on
a website with minimal cost, and importantly, fuss.
1. visit http://www.flickr.com/search/advanced/
Search for a photo (by tag / text) but click "creative commons"
and "commercial" use.
2. Find the right photo URL
3. run ``python flickr_grabber.py <URL>``
4. I will grab the page and make a best guess as to the original photo
URL
5.
"""
import requests
from bs4 import BeautifulSoup
import sys
from bookmaker import lib
import conf
from optparse import OptionParser
import logging
import webbrowser
import urllib
import os
class myError(Exception):
pass
#########
PHOTO_STORE = "./photos"
testurl = "http://www.flickr.com/photos/comedynose/4230176889/"
def extract_photo_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
likelicandidate = soup.find(property='og:image')
resultstr = """
From page %s
We have likely candidate of
%s
or these:
"""
resultstr = resultstr % (url, str(likelicandidate))
for imgtag in soup.find_all("img"):
resultstr += str(imgtag)
return (likelicandidate, resultstr)
def get_photo(url):
"""
"""
tgt = os.path.join(PHOTO_STORE, os.path.basename(url))
urllib.urlretrieve(url, tgt)
#########
def parse_args():
parser = OptionParser()
parser.add_option("--config", dest="confpath",
help="path to ini file")
parser.add_option("--flikrpage", dest="flikrpage",
help="url to embedded photo")
parser.add_option("--flikrphoto", dest="flikrphoto",
help="url to stadnalone photo (mutually xlusive with glikrpage")
(options, args) = parser.parse_args()
return (options, args)
def main(opts, args):
"""
"""
if opts.confpath:
confd = conf.get_config(opts.confpath)
lgr.debug(pprint.pformat(confd))
else:
confd = {}
if opts.flikrpage:
likelicandidate, resultstr = extract_photo_url(opts.flikrpage)
print likelicandidate
print resultstr
if opts.flikrphoto:
get_photo(opts.flikrphoto)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
opts, args = parse_args()
try:
main(opts, args)
except Exception, e:
print "We can trap a lot up here"
raise e
| [
"paul@mikadosoftware.com"
] | paul@mikadosoftware.com |
a006f031a6bef10a643b1366ee30edb96ede4562 | 7e40fdb15a67e3b53162bbcd2b1f091805837d9f | /article/migrations/0006_auto__add_newslettermain.py | ee4e4c7d24923010ed32341a3a741fa9e7bb03f5 | [] | no_license | brentcappello/newsdub | 79a5eecd92dcaf44aa07314eedbc7d5183683689 | cdfc6619cc8b89bc224100e913cb85378d0d8cea | refs/heads/master | 2016-09-01T20:53:07.784968 | 2012-11-15T02:53:41 | 2012-11-15T02:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,992 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NewsletterMain'
db.create_table('article_newslettermain', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.TextField')()),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
))
db.send_create_signal('article', ['NewsletterMain'])
# Adding M2M table for field newsletters_main on 'Newsletter'
db.create_table('article_newsletter_newsletters_main', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('newsletter', models.ForeignKey(orm['article.newsletter'], null=False)),
('newslettermain', models.ForeignKey(orm['article.newslettermain'], null=False))
))
db.create_unique('article_newsletter_newsletters_main', ['newsletter_id', 'newslettermain_id'])
def backwards(self, orm):
# Deleting model 'NewsletterMain'
db.delete_table('article_newslettermain')
# Removing M2M table for field newsletters_main on 'Newsletter'
db.delete_table('article_newsletter_newsletters_main')
models = {
'article.newsletter': {
'Meta': {'ordering': "('-publish',)", 'object_name': 'Newsletter'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newsletters_main': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['article.NewsletterMain']", 'symmetrical': 'False'}),
'publish': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'article.newslettermain': {
'Meta': {'object_name': 'NewsletterMain'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'article.post': {
'Meta': {'ordering': "('-publish',)", 'object_name': 'Post'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_posts'", 'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'newsletters': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['article.Newsletter']", 'symmetrical': 'False'}),
'publish': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'tease': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['article'] | [
"brent@gmail"
] | brent@gmail |
07ed4b9273137675fff9b21384eac1a28eb95b43 | 137524b533472fd4b2752078e0a6d7f4c0fcf2d7 | /tasksLab1/task2/TaskC.py | fcb64df58dd9a0784cd9d4db227026f85e35aa2e | [] | no_license | blazejmichal/inteligencja-obliczeniowa | 8666869c227006fdae5dc1ab3a1b549c1db91548 | 4ffef53cddd82711d559eafd5c9d47e09c0e048d | refs/heads/master | 2023-02-17T05:42:43.522395 | 2021-01-17T16:09:34 | 2021-01-17T16:09:34 | 319,463,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import matplotlib.pyplot as plt
import csv
class Task2c:
def __init__(self):
pass
@staticmethod
def execute():
x = []
y = []
with open('miasta.csv') as csvfile:
reader = csv.DictReader(csvfile)
for column in reader:
x.append(column['Rok'])
y.append(column['Gdansk'])
plt.plot(x, y, 'r', label='Krzywa wykresu')
plt.xlabel('Lata')
plt.ylabel('Liczba ludnosci [w tys.]')
plt.title('Ludnosc w miastach Polski (Gdansk)')
plt.legend()
plt.show()
| [
"Blazej@DESKTOP-P8SE49K"
] | Blazej@DESKTOP-P8SE49K |
1ad67f537ad7c367ceee20cd2b88f9124ff2566a | 6971681df75216216f4b0a196b49077361ed6829 | /src/olympia/migrations/335-perms-locales.py | 6e210baccd4f36e7d0b22701cea1f573819fb9dc | [
"CC-BY-3.0",
"CC-BY-NC-4.0",
"CC-BY-4.0",
"CC-BY-ND-3.0",
"CC-BY-NC-ND-3.0",
"CC-BY-SA-3.0",
"CC-BY-NC-3.0",
"CC-BY-NC-ND-4.0",
"CC-BY-NC-SA-3.0"
] | permissive | piyushmittal25/addons-server | fb6eafc2c1239608c435e3afc7a6bd3db3e38e77 | 1527d1542f0e025940b7b370bf98350869737e2f | refs/heads/master | 2020-03-18T21:44:06.420678 | 2018-05-29T11:08:31 | 2018-05-29T11:08:31 | 130,405,465 | 0 | 0 | BSD-3-Clause | 2018-04-23T10:56:40 | 2018-04-20T19:29:28 | Python | UTF-8 | Python | false | false | 1,140 | py | from django.conf import settings
from access.models import Group, GroupUser
LANGS = sorted(list(
set(settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES) -
set(['en-US'])))
def run():
Group.objects.create(pk=50006, name='Senior Localizers',
rules='Locales:Edit')
for idx, locale in enumerate(LANGS):
pk = 50007 + idx
name = '%s Localizers' % locale
rules = 'Locale.%s:Edit,L10nTools:View' % locale
group = Group.objects.create(pk=pk, name=name, rules=rules)
print 'New group created: (%d) %s' % (pk, name)
try:
old_group = Group.objects.get(pk__lt=50000, name=name)
except Group.DoesNotExist:
print 'Old group not found: %s' % name
continue
# Rename old groups so they are distinguisable.
old_group.update(name=old_group.name + ' (OLD)')
# Migrate users to new group.
cnt = 0
for user in old_group.users.all():
cnt += 1
GroupUser.objects.create(group=group, user=user)
print 'Migrated %d users to new group (%s)' % (cnt, name)
| [
"chudson@mozilla.com"
] | chudson@mozilla.com |
230c05c7d30324adcb69a3442767523215dea7ec | a56252fda5c9e42eff04792c6e16e413ad51ba1a | /resources/usr/local/lib/python2.7/dist-packages/sklearn/metrics/cluster/supervised.py | 31d1a45b74047c04f16b5e95a5fec55fca7b256f | [
"Apache-2.0"
] | permissive | edawson/parliament2 | 4231e692565dbecf99d09148e75c00750e6797c4 | 2632aa3484ef64c9539c4885026b705b737f6d1e | refs/heads/master | 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 | Apache-2.0 | 2019-09-11T03:22:55 | 2018-09-25T10:21:03 | Python | UTF-8 | Python | false | false | 26,696 | py | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from ...utils.fixes import unique
from .expected_mutual_info_fast import expected_mutual_information
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = unique(labels_true, return_inverse=True)
clusters, cluster_idx = unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| [
"szarate@dnanexus.com"
] | szarate@dnanexus.com |
a66207933164a09184cfbafd8103be05a5840217 | 204833b06d6b62a66cf60c966835d0876f84432e | /Constants.py | ee09c1a8cfc4c6f068b2057c868b723727983bbc | [] | no_license | dariodematties/Dirichlet | d03a9067bcedd882f9e3421dc5a35c592da0c360 | 7a69ea351e64110b699290268379b6ef2fc86e4b | refs/heads/master | 2018-11-10T11:02:49.439077 | 2018-08-21T17:37:26 | 2018-08-21T17:37:26 | 109,689,246 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | ENABLE_RANDOM_BEHAVIOUR = True;
| [
"dariodematties@yahoo.com.ar"
] | dariodematties@yahoo.com.ar |
17edec3a0cbd5397bc360dc2289f7aa23fef2f2b | 02122ec38633c178ced34d8a027addc919b4c200 | /Nutrients/api/urls.py | 757826e0b86fe90b0ab82e9e332d35f5dd0ee419 | [] | no_license | SIBU99/serverCVKM | 07907b3c416892bcc432b9317506927112750a93 | 8182f2274216016a15a2a98ea5a31d7e05222ed5 | refs/heads/master | 2023-01-12T10:19:54.966211 | 2020-11-10T08:33:41 | 2020-11-10T08:33:41 | 311,407,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.urls import path
from .views import NutrientExamination
urlpatterns = [
path("nutrient-examination/", NutrientExamination.as_view(), name="nutrient-examination"),
]
| [
"kumarmishra678@gmail.com"
] | kumarmishra678@gmail.com |
2f42da8393cd536ef56b1a0bef15efe947177b66 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/mgr/model_edit/trade_shop.py | d402834b28b5ad1f8056bc5d4ec9eec808d29ae6 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.model_edit import AdminModelEditHandler,\
AppModelForm, ModelEditValidError, AppModelChoiceField
from defines import Defines
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.models.TradeShop import TradeShopMaster, TradeShopItemMaster
from platinumegg.app.cabaret.models.Schedule import ScheduleMaster
class Handler(AdminModelEditHandler):
"""マスターデータの操作.
"""
class Form(AppModelForm):
class Meta:
model = TradeShopMaster
exclude = (
Defines.MASTER_EDITTIME_COLUMN,
)
schedule = AppModelChoiceField(ScheduleMaster, required=False, label=u'期間')
def setting_property(self):
self.MODEL_LABEL = u'トレードショップ'
def valid_insert(self, master):
self.__valid_master(master)
def valid_update(self, master):
self.__valid_master(master)
def __valid_master(self, master):
model_mgr = self.getModelMgr()
self.__check_schedule(model_mgr, master)
self.__check_trade_shop_item_masetr_ids(model_mgr, master)
model_mgr.write_all()
def __check_schedule(self, model_mgr, master):
model = model_mgr.get_model(ScheduleMaster, master.schedule)
if model is None:
raise ModelEditValidError(u'スケジュールに、存在しないIDが指定されています.id=%d' % master.id)
def __check_trade_shop_item_masetr_ids(self, model_mgr, master):
if not isinstance(master.trade_shop_item_master_ids, (list)):
raise ModelEditValidError(u'trade_shop_item_master_idsのJsonが壊れています.id=%d' % master.id)
for trade_shop_item_master_id in master.trade_shop_item_master_ids:
model = model_mgr.get_model(TradeShopItemMaster, trade_shop_item_master_id)
if model is None:
raise ModelEditValidError(u'trade_shop_item_master_idsで指定されているidがTradeShopItemMasterに存在しません.id=%d' % master.id)
def main(request):
return Handler.run(request) | [
"shangye@mail.com"
] | shangye@mail.com |
b27a50e038b03e30c82265c12688de6cc9a21df9 | 0ac34d1fad3ed7e18b3803a25878a8e3d74a259e | /messages_app/forms.py | 39b210591b39588f92dd76cf69d3813ca820b149 | [] | no_license | predictnonprofit/PredictME-WebApplication | b20a35a3ca9fcd0f8349cca83a75576afe96841c | 557864cf9b98188478b9661cba23477d3e16ff85 | refs/heads/main | 2023-08-12T12:01:53.865143 | 2021-10-06T18:40:01 | 2021-10-06T18:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # -*- coding: utf-8 -*-#
from django.forms import ModelForm
from .models import MemberMessages
class MemberMessagesForm(ModelForm):
class Meta:
model = MemberMessages
fields = ('sender', 'subject', "other_subject", "attachment", 'message', "reply")
| [
"ibm_luq95@yahoo.com"
] | ibm_luq95@yahoo.com |
0829499a37fc13ac636386433fe887068436789a | b8ab0e1ac2634741a05e5fef583585b597a6cdcf | /wsltools/utils/faker/providers/date_time/fil_PH/__init__.py | 42a736439193745ecd672678cc198a9d48ef49e4 | [
"MIT"
] | permissive | Symbo1/wsltools | be99716eac93bfc270a5ef0e47769290827fc0c4 | 0b6e536fc85c707a1c81f0296c4e91ca835396a1 | refs/heads/master | 2022-11-06T16:07:50.645753 | 2020-06-30T13:08:00 | 2020-06-30T13:08:00 | 256,140,035 | 425 | 34 | MIT | 2020-04-16T14:10:45 | 2020-04-16T07:22:21 | Python | UTF-8 | Python | false | false | 829 | py | from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
"""Provider for datetimes for fil_PH locale"""
DAY_NAMES = {
'0': 'Linggo',
'1': 'Lunes',
'2': 'Martes',
'3': 'Miyerkules',
'4': 'Huwebes',
'5': 'Biyernes',
'6': 'Sabado',
}
MONTH_NAMES = {
'01': 'Enero',
'02': 'Pebrero',
'03': 'Marso',
'04': 'Abril',
'05': 'Mayo',
'06': 'Hunyo',
'07': 'Hulyo',
'08': 'Agosto',
'09': 'Setyembre',
'10': 'Oktubre',
'11': 'Nobyembre',
'12': 'Disyembre',
}
def day_of_week(self):
day = self.date('%w')
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| [
"tr3jer@gmail.com"
] | tr3jer@gmail.com |
42e1c516f36f4fbc2863cfbb85713138553946f4 | e62ade72c9808b806a523a73908fa1032b10f9fc | /AlgorithmPrograms/InsertionSort.py | 42bdf35e24078997b64c895332640f2b507087c1 | [] | no_license | manjumugali/Python_Programs | 40b0b77586cc20d1f77b6035cdc67f62b5e9955e | 06934cb8037594dd4269f8c2fee3d301c27f624f | refs/heads/master | 2020-04-17T06:54:55.571579 | 2019-02-13T04:37:25 | 2019-02-13T04:37:25 | 166,344,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | """
******************************************************************************
* Purpose: Reads in strings from standard input and prints them in sorted order.Uses insertion sort.
*
* @author: Manjunath Mugali
* @version: 3.7
* @since: 16-01-2019
*
******************************************************************************
"""
import re
from Utility import UtilityTest
c1 = UtilityTest.TestFunctional()
class InsertionSort:
try:
print("Enter The String")
str1 = input() # read The String
onlystr = re.sub('[^A-Za-z]+', ' ', str1) # Remove The All Special Characters
word = onlystr.split() # It splits the Given Sentence into Words(by Space)
print("Before Sorting:")
print(word)
print("After Sorting:")
sort = c1.insertionSort(word) # Invoking function it takes One arguments As list
print(sort)
except ValueError:
print("...........oops Something Went Wrong.........") | [
"manjumugali111@gmail.com"
] | manjumugali111@gmail.com |
ace388a41b74682d643ef7c6c7176d8cf1f6b831 | 3a5d8cdc7ac14c389fd9426f3f39c3b1dc906dda | /nautobot/extras/tests/test_jobs.py | e04668889b1dffc9a3853d2e190027a5f793514f | [
"Apache-2.0"
] | permissive | nammie-punshine/nautobot | f3cdb9d269c37a74706c105d237b883650f10465 | d6227b211ad89f25233a8791937cd75092421c8a | refs/heads/main | 2023-03-08T10:51:29.437859 | 2021-02-24T20:44:32 | 2021-02-24T20:44:32 | 342,080,836 | 0 | 0 | Apache-2.0 | 2021-02-25T01:01:36 | 2021-02-25T01:01:36 | null | UTF-8 | Python | false | false | 1,970 | py | import os
import uuid
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
| [
"lampwins@gmail.com"
] | lampwins@gmail.com |
9e0d2453761f2903b984c6806664e6a9cfb0d256 | acac3cf012920dc027ee4343a2e27f02338b342f | /pattern_matcher/dto/project_dto.py | 0f5eca12d949e8f11e254de62cc59310aa4eb2a3 | [] | no_license | HYUNMIN-KIM/flask_start | ff60592d27cdc510402b6b18f7c8642db929de44 | 8897e00dd29e5f7b3db5d1cec6d597a8edb2980e | refs/heads/master | 2023-01-19T01:32:27.202743 | 2020-11-18T03:52:17 | 2020-11-18T03:52:17 | 291,651,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | """
Project 전체에 대한 DTO
사이즈가 매우 큼
지식관리및학습서버와 대화작업서버간의 데이터 교환을 위해 사용됨
"""
from pattern_matcher.dto import triggering_pattern_dto
class ProjectDTO:
def __int__(self):
self.triggering_pattern_dto_list = triggering_pattern_dto()
# getter
@property
def triggering_pattern_dto_list(self):
return self.triggering_pattern_dto_list
@triggering_pattern_dto_list.setter
def triggering_pattern_dto_list(self, triggering_pattern_dto_list):
self.triggering_pattern_dto_list = triggering_pattern_dto_list
| [
"hogay88@wisenut.co.kr"
] | hogay88@wisenut.co.kr |
a56825bd2f75c83393aad08f9a63136c9a6cd561 | 393f30495e9cecebd6f8950d51b10c0817ed7d28 | /venv/task2_10.py | ad6a65bf495eaffc67787979cd14e929bce47380 | [] | no_license | Skornel/NNGASU_Domrachev_Python | 8f741d99a9b689e4c09a739ff42b0648da0cf24c | 9a996925ca6729178b7a439025508aad72d633ae | refs/heads/master | 2020-12-19T15:36:24.546269 | 2020-04-13T15:48:24 | 2020-04-13T15:48:24 | 235,776,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | s=[]
for i in range(4):
b=[]
print("Введите данные ",i+1," списка")
for row in range(4):
print("Вводи ",row+1," элемент ",i+1," списка ")
b.append(input())
s.append(b)
print(s)
maximum=0
minimum=1000
for i in range(len(s)):
for j in range(len(s[i])):
if int(s[i][j])>int(maximum):
maximum=s[i][j]
if int(s[i][j])<int(minimum):
minimum=s[i][j]
print("Максимальное число:", maximum, "Минимальное: ", minimum, " Разность: ",int(maximum)-int(minimum)) | [
"Suslova2907@gmail.com"
] | Suslova2907@gmail.com |
40830d2e202a0447d24f36b58b901c90eba955bd | d1e3399db6973d639082bd24865bc0df538c0d8d | /ricommender_backend/settings.py | a51b80864b9dc2f358ec683a497db12f165cc5bd | [
"MIT"
] | permissive | reeechart/ricommender | a0c505f8eab6b7c381a41b919d3f5c3da02f61a2 | c5cdf1cb9db27b9fc4a2553aee2b705b9ad0b95a | refs/heads/master | 2020-04-22T12:13:35.198868 | 2019-05-12T16:42:25 | 2019-05-12T16:42:25 | 170,365,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,359 | py | """
Django settings for ricommender_backend project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'ricommender_backend.authentication',
'ricommender_backend.musicstreamer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ricommender_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ricommender_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djongo',
'NAME': os.environ.get('DATABASE_NAME'),
},
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Django REST Framework
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"ferdinandusrichard@yahoo.co.id"
] | ferdinandusrichard@yahoo.co.id |
6316d5bbf61b883c8a1230ade79e25d1b8b68ce4 | 8b4521c046779bee7f0499d73e183851f198af14 | /server.py | b5d0f09fbb907e26b62a438d288a204612cc22b5 | [] | no_license | sugrospi/RPSLS | 80fa53a88f1531af03809716c44f10a937a125e4 | 1c50c9b3019dcc8f244f6ae2d1cba87d409bbd56 | refs/heads/master | 2023-07-24T15:21:35.423381 | 2021-09-07T15:13:47 | 2021-09-07T15:13:47 | 403,999,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | import socket
from _thread import *
import pickle
from game import Game
server = "IP_ADDRESS"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
str(e)
s.listen(2)
print("Waiting for a connection, Server Started")
connected = set()
games = {}
idCount = 0
def threaded_client(conn, p, gameId):
global idCount
conn.send(str.encode(str(p)))
reply = ""
while True:
try:
data = conn.recv(4096).decode()
if gameId in games:
game = games[gameId]
if not data:
break
else:
if data == "reset":
game.resetWent()
elif data != "get":
game.play(p, data)
conn.sendall(pickle.dumps(game))
else:
break
except:
break
print("Lost connection")
try:
del games[gameId]
print("Closing Game", gameId)
except:
pass
idCount -= 1
conn.close()
while True:
conn, addr = s.accept()
print("Connected to:", addr)
idCount += 1
p = 0
gameId = (idCount - 1)//2
if idCount % 2 == 1:
games[gameId] = Game(gameId)
print("Creating a new game...")
else:
games[gameId].ready = True
p = 1
start_new_thread(threaded_client, (conn, p, gameId))
| [
"s.shaggypi@gmail.com"
] | s.shaggypi@gmail.com |
b3cffcaaac0bef8d65f8fdbae1aa31e4b48f15ed | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/FiltersAndTransformers/Scripts/JoinIfSingleElementOnly/JoinIfSingleElementOnly.py | c91e49454d83bdef53b8f6eeabbd9dcc16b073fc | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 466 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def return_first_element_if_single(value):
res = value
if isinstance(value, list):
if len(value) == 1:
res = value[0]
return res
def main(): # pragma: no cover
value = demisto.args()["value"]
res = return_first_element_if_single(value)
demisto.results(res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
9265eec49a0e583e02e5cb8418f517c93695b206 | be25988b4b92e16144315b3f3a45bb31c9036d87 | /FinalProject.py | a4cc0cff617ba3e9e95bdafd188cff1566c19015 | [] | no_license | cormag128/Python_Checkers | c658cd3ce9bbc03e770df6faed5ec1acb83326e1 | 4068b25a54d195a223c7bf73dfc4d4e18ac6c1cb | refs/heads/master | 2021-01-19T21:23:50.391239 | 2017-05-03T00:58:52 | 2017-05-03T00:58:52 | 88,652,979 | 1 | 0 | null | 2017-04-30T19:49:40 | 2017-04-18T17:38:32 | Python | UTF-8 | Python | false | false | 6,292 | py | # Final Project: Checkers AI
# Written by Thomas Walters and Trevor Jenkins
# The purpose of this project is to demonstrate a complex state-based
# program using heuristic programming to create a Checkers AI capable of
# beating a human in checkershttps://askubuntu.com/questions/827005/how-to-install-eric-6-on-ubuntu-16-04https://askubuntu.com/questions/827005/how-to-install-eric-6-on-ubuntu-16-04https://askubuntu.com/questions/827005/how-to-install-eric-6-on-ubuntu-16-04.
# Import random module for use later in program.
import random
# Class to output different errors that could be encountered during game.
class Errors:
NotValid = "The space entered is not a valid move."
ShortMove = "Move must start at current position and finish at another square."
WrongPiece = "Player must move their own piece."
OccupiedSpace = "Player must move to an empty space."
MoveTooLong = "Player must move exactly two spaces."
BackwardMove = "Only king can move backward."
MustJump = ("Player must jump opponent in this move, and must do multiple jumps"
"if they are possible.")
KingPiece = "Move terminates immediately if piece enters king's row."
JumpMove = "If a move starts with a jump, only jumps can be performed."
InvalidCapture = "Player can only capture opponent's pieces."
InvalidMove = "Please move to an adjacent empty space, or jump the opponent."
# Class to populate and print board.
class Board():
board = [" " * 8 for i in range(8)]
error = Errors
def __init__(self, width, height):
self.width = width
self.height = height
def __repr__(self):
print(self.board)
#function to place pieces on the board, stri is the name of the pieces
def placepieces(self, stri):
#if we want to place white pieces but on bottom 3 rows, use letters array for distinguishing
#checkers pieces
wnum = 0;
bnum = 0;
letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m']
if stri == "W":
i = self.height - 3
j = 0
while i < self.height:
j = 0
while j < self.width:
if i % 2 == 0:
if j % 2 == 1:
if wnum < 10:
self.board[i][j] = "W%s" % letters[wnum]
wnum += 1
else:
self.board[i][j] = "W%s" % letters[wnum]
wnum += 1
else:
pass
else:
if j % 2 == 1:
pass
else:
if wnum < 10:
self.board[i][j] = "W%s" % letters[wnum]
wnum += 1
else:
self.board[i][j] = "W%s" % letters[wnum]
wnum += 1
j += 1
i += 1
#else we want the black pieces, but on top 3 rows
else:
i = 0
j = 0
while i < 3:
j = 0
while j < self.width:
if i % 2 == 0:
if j % 2 == 1:
if bnum < 10:
self.board[i][j] = "B%s" % letters[bnum]
bnum += 1
else:
self.board[i][j] = "B%s" % letters[bnum]
bnum += 1
else:
pass
else:
if j % 2 == 1:
pass
else:
if bnum < 10:
self.board[i][j] = "B%s" % letters[bnum]
bnum += 1
else:
self.board[i][j] = "B%s" % letters[bnum]
bnum += 1
j += 1
i += 1
def setup(self):
#slashes used as a placeholder for empty spaces
self.board = [["//" for m in range(8)] for k in range(8)]
# place white team checkers
self.placepieces("W")
#place black team checkers
self.placepieces("B")
#print the board itself out, also prints out piece names etc.
def printboard(self):
i = 0
while i < self.height:
j = 0
print "---------------------------------------"
while j < self.width:
print "|%s|" % (self.board[i][j]),
j += 1
print ""
i += 1
print "---------------------------------------"
def move(self,str,move):
#find the location of the checker we are looking for, could be a function
#that returns to a checkers class with wval, hval, and str for variables?
i = 0
j = 0
wval = 0
hval = 0
while i < self.height:
j = 0;
while j < self.width:
if self.board[i][j] == str:
hval = i
wval = j
j += 1
i += 1
#white movement could be split into functions still needs checking for edges
# needs to handle kings/queens, and no jump handling, jump function could
# be made and replace the occupied space errors where a jump is possible
if(str.startswith("W")):
#moving up and to the right
if move == 9:
if self.board[hval - 1][wval + 1] == "//":
self.board[hval - 1][wval + 1] = self.board[hval][wval]
self.board[hval][wval] = "//"
board.printboard()
#error handling
else:
print ("%s") % (self.error.OccupiedSpace)
# moving up and to the left
elif move == 7:
if self.board[hval - 1][wval - 1] == "//":
self.board[hval - 1][wval - 1] = self.board[hval][wval]
self.board[hval][wval] = "//"
board.printboard()
# error handling
else:
print ("%s") % (self.error.OccupiedSpace)
# error handling for other moves
elif move == 1:
print ("%s") % (self.error.BackwardMove)
elif move == 3:
print ("%s") % (self.error.BackwardMove)
else:
print ("%s") % (self.error.InvalidMove)
#black movement could be split into functions, still needs checking for edges
# needs to handle kings/queens, and no jump handling, jump function could
# be made and replace the occupied space errors where a jump is possible
elif (str.startswith("B")):
# moving down and to the left
if move == 1:
if self.board[hval + 1][wval - 1] == "//":
self.board[hval + 1][wval - 1] = self.board[hval][wval]
self.board[hval][wval] = "//"
board.printboard()
#error handling
else:
print ("%s") % (self.error.OccupiedSpace)
# moving down and to the right
elif move == 3:
if self.board[hval + 1][wval + 1] == "//":
self.board[hval + 1][wval + 1] = self.board[hval][wval]
self.board[hval][wval] = "//"
board.printboard()
# error handling
else:
print ("%s") % (self.error.OccupiedSpace)
# error handling
elif move == 7:
print ("%s") % (self.error.BackwardMove)
elif move == 9:
print ("%s") % (self.error.BackwardMove)
else:
print ("%s") % (self.error.InvalidMove)
#start of main function area
#build a board that is 8x8, place checkers and print it out
board = Board(8, 8)
board.setup()
board.printboard()
| [
"noreply@github.com"
] | cormag128.noreply@github.com |
2e2f74124954a3985bfb08d9d40e0bc56bc5fff2 | 6e373b40393fb56be4437c37b9bfd218841333a8 | /Level_6/Lecture_9/enroll/forms.py | a24e95e08208751aa12e95e489b7e6bdfa3638eb | [] | no_license | mahto4you/Django-Framework | 6e56ac21fc76b6d0352f004a5969f9d4331defe4 | ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b | refs/heads/master | 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from django.contrib.auth.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
class SignUpForm(UserCreationForm):
password2 = forms.CharField(label='Confirm Password (again)', widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
labels ={'email':'Email'}
class EditUserProfileForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login', 'is_active']
labels = {'email':'Email'} | [
"mahto4you@gmail.com"
] | mahto4you@gmail.com |
6249e0ffb60185954c5323d646f6ee5e4b97a4cc | 2be8a9f06d4003d12c0a727fb83d284c31a53050 | /HoudiniHotBox17.0/lib/PastFbx.py | a984bb3fb35778efa1d77ea747bb869b4f43016f | [] | no_license | LiuLiangFx/SmileHotBOX | 7551d9578b2defe612950cb8e3bffdb85024cede | 8bd8eac69b3c2a9824b9aa4488ca77789bea8d85 | refs/heads/master | 2021-01-01T10:22:26.959731 | 2020-02-09T03:16:32 | 2020-02-09T03:16:32 | 239,236,801 | 0 | 0 | null | 2020-02-09T02:47:18 | 2020-02-09T02:47:18 | null | UTF-8 | Python | false | false | 3,133 | py | import hou
class PastFbx:
def __init__(self):
pass
def checkNode(self,node, name,temp1 =0):
for childrenNode in node.parent().children():
if childrenNode.name() == name:
temp1 =childrenNode
return temp1
def checkInput(self,qian,hou1,temp=0):
if hou1.inputs() ==():
pass
else:
for node in hou1.inputs():
if node == qian:
temp =hou1
else:
temp =0
return temp
def creatNode(self,node,temp ):
for mergeName in temp:
serachNode = self.checkNode(node, mergeName)
if serachNode :
houNode = self.checkInput(node, serachNode )
if houNode ==0:
serachNode.setInput(100,node)
node = serachNode
else:
node = houNode
else:
merge = node.createOutputNode("merge",mergeName)
node = merge
def run(self):
plane = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)
pos = plane.selectPosition()
pos1 = pos
node = plane.currentNode()
fl1=open('list.txt', 'r')
a= len( fl1.readlines())
check = 0
fl1.close()
for index in range(a):
pos[0] +=1
try:
null = node.createNode("object_merge")
except:
b = node.parent()
null =b.createNode("object_merge")
null.setPosition(pos)
fl1=open('list.txt', 'r')
path = fl1.readlines()[index][0:-1]
allPath= path.split("++")
null.parm("objpath1").set(allPath[0])
null.parm("xformtype").set("local")
attNode = null.createOutputNode("attribcreate")
attNode.parm("name1").set("shop_materialpath")
attNode.parm("type1").set("index")
attNode.parm("string1").set("/shop/"+ allPath[-1])
attNode.parm("class1").set("primitive")
catchNode = attNode.createOutputNode("catche_tool_1.0.1")
catchNode.bypass(1)
currentNode =catchNode
self.creatNode(currentNode,allPath[1:-1] )
comping =int((index*1.0/(a-1))*100 )
fl1.close()
print "CreatNode for " + null.name()+","+" Comping: " + str(comping)+"%"
print "\nCopy node success!!!!"
| [
"change52092@yahoo.com"
] | change52092@yahoo.com |
2ffa30e97c07a6799d516290ff2c899a4253a39e | eff8f2e795566c7aad10e9e3e7b0ffcafcc19145 | /Set_difference().py | 727190aca895eb0eb82fe4954ad9530732eca9d6 | [] | no_license | Siddu02june/HackerRank-Sets | b27aa2f3679a6f3324d4daa3c5dffe28f08ebed7 | a5a3b56d1bfbdd8b58b2df246e397000deaddbf6 | refs/heads/main | 2023-05-31T08:10:31.756639 | 2021-06-14T09:24:10 | 2021-06-14T09:24:10 | 376,763,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #Set_difference()
E = int(input())
English = list(input().split()[:E])
F = int(input())
French = list(input().split()[:F])
print(len(set(English)-set(French)))
'''
Input (stdin)
9
1 2 3 4 5 6 7 8 9
9
10 1 2 3 11 21 55 6 8
Your Output (stdout)
4
Expected Output
4
'''
| [
"noreply@github.com"
] | Siddu02june.noreply@github.com |
b1cbde3a104f2e0b81d5ab63350c31dd2307980a | 0a4432a13600e025937af3b2554ae048321a50bb | /sphinx/conf.py | e717ad87e5ed58f5acf5bbcd3588642b18e4451a | [
"Apache-2.0"
] | permissive | PourroyJean/ProgrammingNote | 343c6a7fa3324b85cb7ed88e1bb794599d645c80 | 33fc7e64b3e5f44d1acde266df280f944d56674b | refs/heads/master | 2023-01-08T17:52:39.833582 | 2020-06-09T08:09:08 | 2020-06-09T08:09:08 | 87,910,140 | 2 | 0 | Apache-2.0 | 2022-12-27T15:01:06 | 2017-04-11T08:35:30 | Jupyter Notebook | UTF-8 | Python | false | false | 4,880 | py | # -*- coding: utf-8 -*-
#
# Notes Jean documentation build configuration file, created by
# sphinx-quickstart on Fri May 12 14:54:18 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinxjp.themes.revealjs']
html_theme = 'revealjs'
html_use_index = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.txt', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Notes Jean'
copyright = u'2017, Jean Pourroy'
author = u'Jean Pourroy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1'
# The full version, including alpha/beta/rc tags.
release = u'1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NotesJeandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NotesJean.tex', u'Notes Jean Documentation',
u'Jean Pourroy', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'notesjean', u'Notes Jean Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NotesJean', u'Notes Jean Documentation',
author, 'NotesJean', 'One line description of project.',
'Miscellaneous'),
]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| [
"jean@Nano-ubuntu-VM.ielbyy3bjwuuredtcfjnooi3gd.ax.internal.cloudapp.net"
] | jean@Nano-ubuntu-VM.ielbyy3bjwuuredtcfjnooi3gd.ax.internal.cloudapp.net |
bb69649a492b5bb2e5ee249630dca2d8b04e8c78 | 8f1996c1b5a0211474c7fa287be7dc20a517f5f0 | /batch/batch/cloud/driver.py | 96349e4c4d578c9209d5ffabef4590256096a62d | [
"MIT"
] | permissive | johnc1231/hail | 9568d6effe05e68dcc7bf398cb32df11bec061be | 3dcaa0e31c297e8452ebfcbeda5db859cd3f6dc7 | refs/heads/main | 2022-04-27T10:51:09.554544 | 2022-02-08T20:05:49 | 2022-02-08T20:05:49 | 78,463,138 | 0 | 0 | MIT | 2022-03-01T15:55:25 | 2017-01-09T19:52:45 | Python | UTF-8 | Python | false | false | 936 | py | from hailtop import aiotools
from gear import Database
from gear.cloud_config import get_global_config
from ..inst_coll_config import InstanceCollectionConfigs
from ..driver.driver import CloudDriver
from .azure.driver.driver import AzureDriver
from .gcp.driver.driver import GCPDriver
async def get_cloud_driver(
app,
db: Database,
machine_name_prefix: str,
namespace: str,
inst_coll_configs: InstanceCollectionConfigs,
credentials_file: str,
task_manager: aiotools.BackgroundTaskManager,
) -> CloudDriver:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return await AzureDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
assert cloud == 'gcp', cloud
return await GCPDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
| [
"noreply@github.com"
] | johnc1231.noreply@github.com |
c9982973398a7bada2df68e8686fc6deee8ab7a5 | 3251eb404404da4f2cd49d7a77baf67c928453a2 | /src/membership/migrations/0003_coordinator_coordinator_image.py | 469ac79a801237f480cb16c68c332d19318926e5 | [
"MIT"
] | permissive | gatortechuf/gatortechuf.com | 6575abd26ce1382cfcb6255a28e1d202910c86ba | 8d0ad5f0772a42113c41bf454e96c2fa2c22d1f3 | refs/heads/master | 2020-05-22T06:49:09.580887 | 2018-02-03T17:42:02 | 2018-02-03T17:42:02 | 61,415,657 | 2 | 0 | MIT | 2018-02-03T17:42:03 | 2016-06-18T03:47:11 | Python | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-28 18:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('membership', '0002_leader_leader_image'),
]
operations = [
migrations.AddField(
model_name='coordinator',
name='coordinator_image',
field=models.ImageField(blank=True, upload_to='membership'),
),
]
| [
"ryandsheppard95@gmail.com"
] | ryandsheppard95@gmail.com |
5dfb79becde51feb01c67400ff548446d6963775 | 0cb38adedbe3a5192076de420e1aa0fd10ae3311 | /return_merchandise_authorizations/admin.py | 213dea63a59221b56ba699e6a457f59ff5076d67 | [] | no_license | fogcitymarathoner/rma | 73ada816b98f068b6c00b2e1fcf39461259453fa | 133d6026f99820d0702f0578b8a3b4574671f888 | refs/heads/master | 2021-01-11T00:32:47.797673 | 2016-10-10T18:34:54 | 2016-10-10T18:35:11 | 70,516,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | from django.contrib import admin
from return_merchandise_authorizations.models import Rma
from return_merchandise_authorizations.models import Item
from return_merchandise_authorizations.models import RmaAttachment
class ItemInline(admin.TabularInline):
model = Item
class AttachInline(admin.TabularInline):
model = RmaAttachment
class RmaAdmin(admin.ModelAdmin):
list_display = ('date', 'customer', 'case_number', 'reference_number', 'address')
search_fields = ('case_number', 'reference_number', 'address', 'issue')
inlines = [
ItemInline,
AttachInline
]
#
admin.site.register(Rma, RmaAdmin)
class ItemAdmin(admin.ModelAdmin):
list_display = ('note', 'quantity')
#
admin.site.register(Item, ItemAdmin) | [
"marc@fogtest.com"
] | marc@fogtest.com |
7796231c8f937912e9ccd9dd1399da035526bee6 | 55c0254b9889235844ca2fcfa5b80e6aedeb4841 | /Book_app/wsgi.py | ea116599419347d50d5b310f5c940541109e1334 | [] | no_license | AKSHAY-KR99/book_project | a75761a40c544fe4ad38ebcdd01b9d524e5f8ea8 | 019b316ec97395ac080be86333d7902b7c590271 | refs/heads/master | 2023-05-30T05:09:12.888518 | 2021-06-15T11:03:47 | 2021-06-15T11:03:47 | 377,130,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for Book_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Book_app.settings')
application = get_wsgi_application()
| [
"ashayakr4@gmail.com"
] | ashayakr4@gmail.com |
ce174f06a5f69d56fb0614a1766754e87ea39d0d | 0a983eebf91ad9a4342c8be92e1223b5d6ac28e1 | /setup.py | 13acd1717a2d8232655e0886d603d21d4dc7db71 | [
"MIT"
] | permissive | davidkwast/db_tools | 7bb1c6eeb01be81863e33edc38c60df8b31785b9 | 6881a3ce1d5bfb8634e01fc797d5e1dec6cd4891 | refs/heads/master | 2020-03-26T06:03:43.874788 | 2018-09-04T23:16:18 | 2018-09-04T23:16:18 | 144,587,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from setuptools import setup
setup(name='db_tools',
version='0.0',
description='Python database tools',
url='https://github.com/davidkwast/db_tools',
author='David Kwast',
author_email='david@kwast.me',
license='MIT',
packages=['db_tools'],
zip_safe=False)
| [
"david@kwast.me"
] | david@kwast.me |
d86790151df1e4863c98a6064062d24f7876ecb4 | 192cc298bc78889873fc932041c543bdc7b54bbb | /Cashier program.py | 05d4371d74c7fd8262e8a02db5d9de3ddfc59112 | [] | no_license | winter4w/Cashier-Program | 01af06ccbd9d06fa509861e9a57094ad8ed100d6 | 8126ae412d7de21e95a415b5242508cb6d9df126 | refs/heads/master | 2020-09-22T16:41:06.845507 | 2019-12-02T06:59:42 | 2019-12-02T06:59:42 | 225,275,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py |
import os
import time
import sys
import math
class Cashier():
def getDollars(self, a):
dol = int(math.floor(a))
return dol
def getQuarters(self, a):
qua = int(math.floor(a / .25))
return qua
def getDimes(self, a):
dim = int(math.floor(a / .10))
return dim
def getNickels(self, a):
nic = int(math.floor(a / .05))
return nic
def getPennies(self, a):
pen = int(a / .01 +.1)
return pen
def newChange(self, a, coin_value , numberofcoins):
return a - coin_value * numberofcoins
myChange = Cashier()
while True:
print("")
print("Enter the amount due in dollars and cents: ")
amountDue = float(raw_input("$"))
print("")
amountReceived = float(raw_input("Enter the amount received: $"))
print("")
change = amountReceived - amountDue
if amountDue > amountReceived:
print("The customer has payed less than the cost")
else:
dolSolve = myChange.getDollars(change)
change = myChange.newChange(change, 1, dolSolve)
quaSolve = myChange.getQuarters(change)
change = myChange.newChange(change, .25, quaSolve)
dimSolve = myChange.getDimes(change)
change = myChange.newChange(change, .10, dimSolve)
nicSolve = myChange.getNickels(change)
change = myChange.newChange(change, .05, nicSolve)
penSolve = myChange.getPennies(change)
print("Give the customer")
print(str(dolSolve) + " Dollars")
print(str(quaSolve) + " Quarters")
print(str(dimSolve) + " Dimes")
print(str(nicSolve) + " Nickels")
print(str(penSolve) + " Pennies")
print("")
choiceQuit = raw_input ("If you will like to quit this program type 'quit' otherwise press enter:")
os.system('cls')
if choiceQuit == "quit":
break
else:
True
os.system('cls')
print("The Program is now closeing!")
print ("5")
time.sleep(1)
os.system('cls')
print("The Program is now closeing!")
print ("4")
time.sleep(1)
os.system('cls')
print("The Program is now closeing!")
print ("3")
time.sleep(1)
os.system('cls')
print("The Program is now closeing!")
print ("2")
time.sleep(1)
os.system('cls')
print("The Program is now closeing!")
print ("1")
sys.exit()
| [
"winter4w@users.noreply.github.com"
] | winter4w@users.noreply.github.com |
abb8c70131d77c3c5abbae2840f52ba202b21851 | fb56624f35821c0714b516c30831953da8f8d131 | /run_fm_exp/scripts/select_params_ps.py | 6067d585576b36567a88aa013e419cab74d70423 | [] | no_license | jyhsia5174/pos-bias-exp-code | b27e31f6604420afae4aa4f2c9e6161ae7705bc4 | 913a00e6707482fd2122ec2c957e0dc8ebc3e7cc | refs/heads/master | 2022-12-26T20:09:50.893942 | 2020-10-05T07:23:04 | 2020-10-05T07:23:04 | 222,909,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import os, sys
root = sys.argv[1]
flag = 1 if sys.argv[2] == 'auc' else 0
log_paths = [os.path.join(root, f) for f in os.listdir(root) if f.endswith('log')]
records = {}
for lp in log_paths:
records[lp] = [0., 1000., 0.] # iter, min_logloss, max_auc
with open(lp) as f:
for i, line in enumerate(f):
if i < 2:
continue
line = line.strip().split(' ')
line = [s for s in line if s != '']
iter_num = float(line[0])
logloss = float(line[-2])
auc = float(line[-1])
if flag:
if auc > records[lp][-1]:
records[lp][0] = iter_num
records[lp][1] = logloss
records[lp][2] = auc
else:
if logloss < records[lp][1]:
records[lp][0] = iter_num
records[lp][1] = logloss
records[lp][2] = auc
if flag:
params = sorted(records.items(), key=lambda x: x[-1][-1], reverse=flag)[0]
else:
params = sorted(records.items(), key=lambda x: x[-1][-2], reverse=flag)[0]
print(params[0].split('/')[-1].split('.')[0], params[0].split('/')[-1].split('.')[2], int(params[1][0]), params[1][1], params[1][2],)
| [
"d08944012@ntu.edu.tw"
] | d08944012@ntu.edu.tw |
659788c034719b344f80307e0abf95f56aae99d2 | d16f2636a1157fde2eda16064b89dc6299d6c1fa | /main.py | 67691a8900c0347a4823718220af8a4e5fbfb262 | [] | no_license | razer89/Calculator | a762dd200074c7bd143fe087bf3752b92777f5c1 | cd2477c641f9f1ae08f72aea5f93bc5854dc240b | refs/heads/main | 2023-07-04T04:09:32.166568 | 2021-08-10T13:36:09 | 2021-08-10T13:36:09 | 382,019,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | num1 = int(input("Enter num1: "))
num2 = int(input("Enter num2: "))
action = str(input("Choose action: Add(a), Sub(s), Mult(m) Div(d) ->"))
print("The result is ",end="")
if action == "a":
print(num1+num2)
elif action == "s":
print(num1-num2)
elif action == "m":
print(num1*num2)
else:
print(num1/num2) | [
"49878506+razer89@users.noreply.github.com"
] | 49878506+razer89@users.noreply.github.com |
3a3bf2a75f8238a4f8a98e775a43ea60086f6668 | 87521e0ce35095d06f8cd2e0890f8b73f9ec0511 | /training_window.py | 3a083317b9c5a9109bcbb974ec32216694347011 | [] | no_license | chamara96/voice-command-rnn | 20fa6446e44a72c78113528b598756b545c1529d | e6847af88e09e01ddf06f1d6cdd1b0835d30ba4f | refs/heads/main | 2023-01-02T12:12:28.542385 | 2020-11-01T06:52:28 | 2020-11-01T06:52:28 | 308,967,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,464 | py | import sys
from PIL import ImageTk, Image
import time
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
from tkinter import messagebox
import neural_network
import dataset_handling
def update_thread():
# global is_stop
time.sleep(5)
is_train_end = 0
while not is_train_end:
is_train_end = dataset_handling.end_train
# text = ""
w.Label_log.delete("1.0", tk.END)
if is_stop == 1:
break
curr_epoch, total_epoches = neural_network.check_curr_epoch()
w.TProgressbar1['value'] = int((curr_epoch) * 100 / total_epoches)
filename = "checkpoints/log.txt"
try:
with open(filename) as f:
text = f.read()
except IOError:
text = ""
w.Label_log.insert("1.0", text)
try:
img = Image.open("checkpoints/fig.jpg")
except IOError:
img = Image.open("classes/wait.png")
basewidth = 550
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
w.Label_plot['image'] = img
w.Label_plot.image = img
sys.stdout.flush()
print("Updated")
time.sleep(2)
messagebox.showinfo("Training", "Done..!")
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def btn_stop():
destroy_window()
sys.stdout.flush()
is_stop = 0
def btn_update_view():
curr_epoch, total_epoches = neural_network.check_curr_epoch()
w.TProgressbar1['value'] = int(curr_epoch * 100 / total_epoches)
filename = "checkpoints/log.txt"
try:
with open(filename) as f:
text = f.read()
except IOError:
text = ""
w.Label_log.insert("1.0", text)
try:
img = Image.open("checkpoints/fig.jpg")
except IOError:
img = Image.open("classes/wait.png")
basewidth = 550
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
w.Label_plot['image'] = img
w.Label_plot.image = img
sys.stdout.flush()
print("Updated")
# time.sleep(2)
def destroy_window():
global is_stop
is_stop=1
print("QQWWEERR")
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
sys.exit()
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1 (root)
init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
#rt = root
root = rt
w = tk.Toplevel (root)
top = Toplevel1 (w)
init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("1245x656+220+79")
top.minsize(120, 1)
top.maxsize(2650, 1005)
top.resizable(0, 0)
top.title("Training Model")
top.configure(background="#d9d9d9")
self.Labelframe1 = tk.LabelFrame(top)
self.Labelframe1.place(x=20, y=40, height=600, width=600)
self.Labelframe1.configure(relief='groove')
self.Labelframe1.configure(foreground="black")
self.Labelframe1.configure(text='''Log''')
self.Labelframe1.configure(background="#d9d9d9")
self.Label_log=tk.Text(self.Labelframe1)
# self.Label_log = tk.Label(self.Labelframe1)
self.Label_log.place(x=20, y=30, height=551, width=564
, bordermode='ignore')
# self.Label_log.configure(anchor='nw')
self.Label_log.configure(background="#d9d9d9")
# self.Label_log.configure(disabledforeground="#a3a3a3")
self.Label_log.configure(foreground="#000000")
# self.Label_log.configure(text='''Label''')
self.Labelframe2 = tk.LabelFrame(top)
self.Labelframe2.place(x=630, y=40, height=600, width=600)
self.Labelframe2.configure(relief='groove')
self.Labelframe2.configure(foreground="black")
self.Labelframe2.configure(text='''Training Curves''')
self.Labelframe2.configure(background="#d9d9d9")
self.Label_plot = tk.Label(self.Labelframe2)
self.Label_plot.place(x=20, y=30, height=551, width=554
, bordermode='ignore')
self.Label_plot.configure(anchor='nw')
self.Label_plot.configure(background="#d9d9d9")
self.Label_plot.configure(disabledforeground="#a3a3a3")
self.Label_plot.configure(foreground="#000000")
self.Label_plot.configure(text='''Label''')
self.Button1 = tk.Button(top)
self.Button1.place(x=1100, y=10, height=34, width=127)
self.Button1.configure(activebackground="#ececec")
self.Button1.configure(activeforeground="#000000")
self.Button1.configure(background="#d9d9d9")
self.Button1.configure(command=btn_stop)
self.Button1.configure(disabledforeground="#a3a3a3")
self.Button1.configure(foreground="#000000")
self.Button1.configure(highlightbackground="#d9d9d9")
self.Button1.configure(highlightcolor="black")
self.Button1.configure(pady="0")
self.Button1.configure(text='''Stop''')
self.TProgressbar1 = ttk.Progressbar(top)
self.TProgressbar1.place(x=20, y=10, width=600, height=22)
self.TProgressbar1.configure(length="600")
self.TProgressbar1.configure(value="10")
# self.Button2 = tk.Button(top)
# self.Button2.place(x=980, y=10, height=34, width=117)
# self.Button2.configure(activebackground="#ececec")
# self.Button2.configure(activeforeground="#000000")
# self.Button2.configure(background="#d9d9d9")
# self.Button2.configure(command=btn_update_view)
# self.Button2.configure(disabledforeground="#a3a3a3")
# self.Button2.configure(foreground="#000000")
# self.Button2.configure(highlightbackground="#d9d9d9")
# self.Button2.configure(highlightcolor="black")
# self.Button2.configure(pady="0")
# self.Button2.configure(text='''Update View''')
if __name__ == '__main__':
vp_start_gui()
| [
"cmb.info96@gmail.com"
] | cmb.info96@gmail.com |
9ffedfdbb5aa841be3b526cd48ec2b1a4d37799e | 459e0f34dfbc818763edf153152711a11c2efbe3 | /pythonscript/billing.py | 65cc9ddd17973536698c22abf0b14a204bd7a018 | [] | no_license | tariqcoupa/experiments | 15523c7f60edcb3078169fb9f407915f859ef91d | 3323add34d66ebc76d91124c7358abd639d9317a | refs/heads/master | 2021-04-05T23:52:21.718538 | 2018-03-03T12:31:49 | 2018-03-03T12:31:49 | 124,418,067 | 0 | 0 | null | 2018-03-08T16:26:12 | 2018-03-08T16:26:12 | null | UTF-8 | Python | false | false | 566 | py | #!/usr/bin/python
import SoftLayer
import json
import sys
client = SoftLayer.Client(username='prod.tariq', api_key='53c53cba25872849417fcc1794f9acdeb91c6680f597ddf76488aa4e4d999e51')
object_mask = "mask[id]"
object_mask2 = """mask[hostname,billingItem.nextInvoiceTotalRecurringAmount]"""
user_info = client['Account'].getHardware(mask=object_mask)
mgr = SoftLayer.HardwareManager(client)
for json_dict in user_info:
for key,value in json_dict.iteritems():
hardware_info = mgr.get_hardware(hardware_id=value,mask=object_mask2)
print hardware_info
| [
"tarsidd@gmail.com"
] | tarsidd@gmail.com |
d7e5e857f01d9f595c4e22550aeb3ed978f814ef | f7378f4038882c3de627a7d1262790f649f5e89b | /dataset.py | 77564e166a38e75ee487cdf75078cb3d77632132 | [] | no_license | edui/imogiz-mobileunet | 176301a5238b0ab354b2fcf0a666c2820cbc165d | 49757428b9fc320211b417450f2e883d9d444225 | refs/heads/main | 2023-08-10T18:32:55.037061 | 2021-09-27T22:39:39 | 2021-09-27T22:39:39 | 408,748,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,710 | py | import random
import re
from glob import glob
import cv2
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision
from config import IMG_DIR
def _mask_to_img(mask_file):
img_file = re.sub('^{}/masks'.format(IMG_DIR),
'{}/images'.format(IMG_DIR), mask_file)
img_file = re.sub('\.ppm$', '.jpg', img_file)
return img_file
def _img_to_mask(img_file):
mask_file = re.sub('^{}/images'.format(IMG_DIR),
'{}/masks'.format(IMG_DIR), img_file)
# mask_file = re.sub('\.jpg$', '.ppm', mask_file)
return mask_file
def get_img_files_eval():
mask_files = sorted(glob('{}/masks/*.jpg'.format(IMG_DIR)))
return np.array([_mask_to_img(f) for f in mask_files])
def get_img_files():
mask_files = sorted(glob('{}/masks/*.jpg'.format(IMG_DIR)))
# mask_files = mask_files[:10000]
sorted_mask_files = []
# Sorting out
for msk in mask_files:
# Sort out black masks
msk_img = cv2.imread(msk)
if len(np.where(msk_img == 1)[0]) == 0:
continue
# Sort out night images
img_path = re.sub('^{}/masks'.format(IMG_DIR),
'{}/images'.format(IMG_DIR), msk)
img = cv2.imread(img_path)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
higher_img = gray_image[0:120, :]
if np.average(higher_img) > 100:
# Day image, so append
sorted_mask_files.append(msk)
# return np.array([_mask_to_img(f) for f in mask_files])
return np.array([_mask_to_img(f) for f in sorted_mask_files])
class MaskDataset(Dataset):
def __init__(self, img_files, transform, mask_transform=None, mask_axis=0):
self.img_files = img_files
self.mask_files = [_img_to_mask(f) for f in img_files]
self.transform = transform
if mask_transform is None:
self.mask_transform = transform
else:
self.mask_transform = mask_transform
self.mask_axis = mask_axis
def __getitem__(self, idx):
img = cv2.imread(self.img_files[idx])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.mask_files[idx])
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask = mask[:, :, self.mask_axis]
seed = random.randint(0, 2 ** 32)
# Apply transform to img
random.seed(seed)
img = Image.fromarray(img)
img = self.transform(img)
# Apply same transform to mask
random.seed(seed)
mask = Image.fromarray(mask)
mask = self.mask_transform(mask)
return img, mask
def __len__(self):
return len(self.img_files)
class MogizDataset(Dataset):
def __init__(self, ds_dir, ds_name, transform, mask_transform=None, mask_axis=0):
self.df = pd.read_csv(ds_dir + ds_name, header=None)
self.ds_dir = ds_dir
self.transform = transform
if mask_transform is None:
self.mask_transform = transform
else:
self.mask_transform = mask_transform
self.mask_axis = mask_axis
def __getitem__(self, idx):
image_name = self.df.iloc[idx, 0]
mask_name = self.df.iloc[idx, 1]
joint_name = self.df.iloc[idx, 2]
height = torch.from_numpy(
np.array([self.df.iloc[idx, 3]/100])).type(torch.FloatTensor)
weight = torch.from_numpy(
np.array([self.df.iloc[idx, 4]/100])).type(torch.FloatTensor)
img = cv2.imread(self.ds_dir + image_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.ds_dir + mask_name)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask = mask[:, :, self.mask_axis]
# For Heatmaps
#joint = np.load(self.ds_dir + joint_name).astype('int64')
#joint = torch.from_numpy(joint)
joint = height # not used
seed = random.randint(0, 2 ** 32)
# Apply transform to img
random.seed(seed)
img = Image.fromarray(img)
img = self.transform(img)
# Apply same transform to mask
random.seed(seed)
mask = Image.fromarray(mask)
mask = self.mask_transform(mask)
# return img, mask, height
return {'i': img, 'l': mask, 'j': joint, 'h': height, 'w': weight}
def __len__(self):
return len(self.df)
if __name__ == '__main__':
pass
#
# mask = cv2.imread('{}/masks/Aaron_Peirsol_0001.ppm'.format(IMG_DIR))
# mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
# mask = mask[:, :, 0]
# print(mask.shape)
# plt.imshow(mask)
# plt.show()
| [
"edui.bin@gmail.com"
] | edui.bin@gmail.com |
50b929b62405be6ed8aacd6a49a420bd9ba63219 | 23ac56d6e024a69ae9f6f9e471ddefd71c9f0243 | /reverse_list.py | 3ce059eb10cf84065d68099596ca3be2bda56c8f | [] | no_license | erenat77/data_structure_in_Python | c70538f2c510b5525b230f84f7b455a0524d7313 | 216b173ab27cbbd3440c783efbd671be47645457 | refs/heads/master | 2020-08-11T10:16:48.352675 | 2019-11-05T01:03:07 | 2019-11-05T01:03:07 | 214,548,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | l = [1,2,5,4,8,9,87,9,9,6,4,5]
# recursive solution
def rev(l):
if len(l)<=1 : return l
else: return [l[-1]] + rev(l[:-1])
rev(l)
#easy solution
print(l[::-1])
| [
"noreply@github.com"
] | erenat77.noreply@github.com |
7e41be08a3a77a30cf7becf9259474bda1cdf940 | 6bde544edbda4291b8fd10533e3ec0cca4855a1f | /problem_2.py | ac9fd9e6bef2503460e546c4ca2608d9b641bf76 | [] | no_license | ekdeguzm/project_euler_problem_2 | 5d2ba3806a1679e188eee293ada334e53f5175bc | 6ba89ca7b181236d4c916bd31aeedbb2ceb8665a | refs/heads/main | 2023-08-24T23:50:26.581516 | 2021-09-29T06:57:09 | 2021-09-29T06:57:09 | 411,562,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Probem 2 of Project Euler
# Python 3.9.5
# Even Fibonacci numbers
# Create Fibonacci list and even Fibonacci list
fib_list = []
even_fib_list = []
# Create Fibonacci sequence
def fibonacci(n):
a, b = 0, 1
for x in range(1, n):
a, b = b, a + b
return b
for i in range(1, 34):
fib_list.append(fibonacci(i))
# Print Fibonacci seq no more than 4,000,000
print(fib_list)
# Get the even values from fib_list and add it it into the even list
for value in fib_list:
if value % 2 == 0:
even_fib_list.append(value)
else:
None
print("Updated list", even_fib_list)
# Add values from even_fib_list together
print(sum(even_fib_list))
| [
"noreply@github.com"
] | ekdeguzm.noreply@github.com |
7e6dccde1c6ea2ba3cbd360b3009d30db942726a | b1e7481f8b5bf40c2547c95b1863e25b11b8ef78 | /Kai/python/modules/JetMETLogic.py | 9fdf533765af3ae52ed238853b1aaaeac74dfcea | [
"Apache-2.0"
] | permissive | NJManganelli/FourTopNAOD | 3df39fd62c0546cdbb1886b23e35ebdc1d3598ad | c86181ae02b1933be59d563c94e76d39b83e0c52 | refs/heads/master | 2022-12-22T22:33:58.697162 | 2022-12-17T01:19:36 | 2022-12-17T01:19:36 | 143,607,743 | 1 | 1 | Apache-2.0 | 2022-06-04T23:11:42 | 2018-08-05T11:40:42 | Python | UTF-8 | Python | false | false | 48,234 | py | from __future__ import division, print_function
import ROOT
import math
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import * #DeltaR, match collection methods
from FourTopNAOD.Kai.tools.toolbox import *
class JetMETLogic(Module):
def __init__(self, passLevel, era="2017", subera=None, isData=True, weightMagnitude=1, fillHists=False, btagging=['DeepJet', 'M'], MET=[45, 50], HT=[450,500], ZWidth=15, jetPtVar = "pt_nom", jetMVar = "mass_nom", verbose=False, probEvt=None, mode="Flag", debug=False):
# genEquivalentLuminosity=1, genXS=1, genNEvents=1, genSumWeights=1, era="2017", btagging=['DeepCSV','M'], lepPt=25, GenTop_LepSelection=None):
""" Jet, MET, HT logic that performs lepton cleaning and jet selection. Optionally can do b-tagging, but mode without this requirement can be enabled/disabled
passLevel is the level at which the module should trigger "True" to pass the event along to further modules. Available: 'all', 'baseline', 'selection'
Era is a string with the year of data taking or corresponding MC sample ("2017", "2018")
Subera is a string with the subera of data-taking, only for use in combination with isData=True and TriggerChannel ("B", "E", etc.)
isData is a boolean for when it's a data sample, as these are handled differently (trigger exclusivity and tier selection) from Monte Carlo.
TriggerChannel is a string with the trigger channel ("ElMu" for e-mu channel/dataset, regardless of which is higher pT, "El" for single-electron channel/dataset).
fillHists is a boolean for filling histograms.
Regarding data, internally there are 'tiers' associated with the trigger tuples. For MC, if the event fires any trigger from any tier, it should be accepted.
For data, given that events can be duplicated across data streams ('SingleMuon' and 'MuonEG'), triggers are divided into tiers.
The goal is to only select a data event from the highest available tier of triggers that it fires, and veto that event in appropriate
data streams when it corresponds to a lower trigger selection.
For example, let an event fire both a single muon trigger (tier 3) and a mu-mu trigger (tier 1), but not an e-mu trigger (tier 0). In the double muon dataset,
the event is selected because it fired the tier 1 trigger in the list (and not the tier 0 triggers). In the single muon dataset, the event is veto'd,
because it fired the tier 1 trigger as well as the tier 3. A different event that only fired the tier 3 trigger is appropriately picked up on the single muon
dataset, and while it may exist in the double muon dataset, it will only be becasue of a trigger that we have not checked for, and so we must not have picked it up
in that dataset"""
self.passLevel = passLevel
self.writeHistFile=True
self.fillHists = fillHists
if self.fillHists and not self.writeHistFile:
self.writeHistFile=True
self.verbose=verbose
self.probEvt = probEvt
self.isData = isData
self.weightMagnitude = weightMagnitude
self.btagging = btagging
self.era = era
if probEvt:
#self.probEvt = probEvt
print("Skipping events until event #{0:d} is found".format(probEvt))
self.verbose = True
#Bits for status flag checking
self.flagbits = {'isPrompt':0b000000000000001,
'isDecayedLeptonHadron':0b000000000000010,
'isTauDecayProduct':0b000000000000100,
'isPromptTauDecaypprProduct':0b000000000001000,
'isDirectTauDecayProduct':0b000000000010000,
'isDirectPromptTauDecayProduct':0b000000000100000,
'isDirectHadronDecayProduct':0b000000001000000,
'isHardProcess':0b000000010000000,
'fromHardProcess':0b000000100000000,
'isHardProcessTauDecayProduct':0b000001000000000,
'isDirectHardProcessTauDecayProduct':0b000010000000000,
'fromHardProcessBeforeFSR':0b000100000000000,
'isFirstCopy':0b001000000000000,
'isLastCopy':0b010000000000000,
'isLastCopyBeforeFSR':0b100000000000000
}
#Bits for Event Selection Variables
self.passbits = {'PV_minNDoF': 0b00000000000000000001,
'PV_maxAbsZ': 0b00000000000000000010,
'PV_maxRho': 0b00000000000000000100,
'MET_globalSuperTightHalo2016Filter': 0b00000000000000001000,
'MET_goodVertices': 0b00000000000000010000,
'MET_HBHENoiseFilter': 0b00000000000000100000,
'MET_HBHENoiseIsoFilter': 0b00000000000001000000,
'MET_EcalDeadCellTriggerPrimitiveFilter':0b00000000000010000000,
'MET_BadPFMuonFilter': 0b00000000000100000000,
'MET_ecalBadCalibFilterV2': 0b00000000001000000000,
'MET_pt': 0b00000000010000000000,
'unused1': 0b00000000100000000000,
'Lepton_ZWindow': 0b00000001000000000000,
'Jet_nJet25': 0b00000010000000000000,
'Jet_nJet20': 0b00000100000000000000,
'HT': 0b00001000000000000000,
'Jet_nBJet_2DCSV': 0b00010000000000000000,
'Jet_nBJet_2DJet': 0b00100000000000000000,
'unused2': 0b01000000000000000000,
'unused3': 0b10000000000000000000,
}
#bits for Object Selection Variables - Jets
self.jetbits = {'lepClean': 0b000000001,
'maxEta': 0b000000010,
'jetID': 0b000000100,
'pt25': 0b000001000,
'pt20': 0b000010000,
'unused': 0b000100000,
'DCSV': 0b001000000,
'DJET': 0b010000000,
'BTag_WP': 0b100000000
}
# Thresholds for Event and Jet levels
self.jet_threshold_bits = {}
self.jet_threshold_bits['baseline'] = self.jetbits['lepClean'] + self.jetbits['maxEta'] + self.jetbits['jetID'] + \
self.jetbits['pt20']
print("Baseline bits are {0:09b}".format(self.jet_threshold_bits['baseline']))
self.jet_threshold_bits['selection'] = self.jetbits['lepClean'] + self.jetbits['maxEta'] + self.jetbits['jetID'] + \
self.jetbits['pt20']
print("Selection bits are {0:09b}".format(self.jet_threshold_bits['selection']))
self.evt_threshold_bits = {}
# self.evt_threshold_bits['baseline'] = 0b00001100011111111111
# self.evt_threshold_bits['selection'] = 0b00001100011111111111
self.evt_threshold_bits['baseline'] = self.passbits['PV_minNDoF'] + self.passbits['PV_maxAbsZ'] +\
self.passbits['PV_maxRho'] + self.passbits['MET_globalSuperTightHalo2016Filter'] +\
self.passbits['MET_goodVertices'] + self.passbits['MET_HBHENoiseFilter'] + \
self.passbits['MET_HBHENoiseIsoFilter'] + \
self.passbits['MET_EcalDeadCellTriggerPrimitiveFilter'] + \
self.passbits['MET_BadPFMuonFilter'] + self.passbits['MET_ecalBadCalibFilterV2'] + \
self.passbits['MET_pt'] + self.passbits['Jet_nJet20'] + self.passbits['HT']
self.evt_threshold_bits['selection'] = self.passbits['PV_minNDoF'] + self.passbits['PV_maxAbsZ'] +\
self.passbits['PV_maxRho'] + self.passbits['MET_globalSuperTightHalo2016Filter'] +\
self.passbits['MET_goodVertices'] + self.passbits['MET_HBHENoiseFilter'] + \
self.passbits['MET_HBHENoiseIsoFilter'] + \
self.passbits['MET_EcalDeadCellTriggerPrimitiveFilter'] + \
self.passbits['MET_BadPFMuonFilter'] + self.passbits['MET_ecalBadCalibFilterV2'] + \
self.passbits['MET_pt'] + self.passbits['Jet_nJet20'] + self.passbits['HT']
#flags for MET filters
self.FlagsDict = {"2016" : { "isData" : ["globalSuperTightHalo2016Filter"],
"Common" : ["goodVertices",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter"
],
"NotRecommended" : ["BadChargedCandidateFilter",
"eeBadScFilter"
]
},
"2017" : { "isData" : ["globalSuperTightHalo2016Filter"],
"Common" : ["goodVertices",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter",
"ecalBadCalibFilterV2"
],
"NotRecommended" : ["BadChargedCandidateFilter",
"eeBadScFilter"
]
},
"2018" : { "isData" : ["globalSuperTightHalo2016Filter"],
"Common" : ["goodVertices",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter",
"ecalBadCalibFilterV2"
],
"NotRecommended" : ["BadChargedCandidateFilter",
"eeBadScFilter"
]
}
}
self.Flags = self.FlagsDict[era]
#Btagging dictionary
#FIXMEFIXMEFIXME
self.bTagWorkingPointDict = {
'2016':{
'DeepCSV':{
'L': 0.2217,
'M': 0.6321,
'T': 0.8953,
'Var': 'btagDeepB'
},
'DeepJet':{
'L': 0.0614,
'M': 0.3093,
'T': 0.7221,
'Var': 'btagDeepFlavB'
}
},
'2017':{
'CSVv2':{
'L': 0.5803,
'M': 0.8838,
'T': 0.9693,
'Var': 'btagCSVV2'
},
'DeepCSV':{
'L': 0.1522,
'M': 0.4941,
'T': 0.8001,
'Var': 'btagDeepB'
},
'DeepJet':{
'L': 0.0521,
'M': 0.3033,
'T': 0.7489,
'Var': 'btagDeepFlavB'
}
},
'2018':{
'DeepCSV':{
'L': 0.1241,
'M': 0.4184,
'T': 0.7527,
'Var': 'btagDeepB'
},
'DeepJet':{
'L': 0.0494,
'M': 0.2770,
'T': 0.7264,
'Var': 'btagDeepFlavB'
}
}
}
#2016selection required !isFake(), nDegreesOfFreedom> 4 (strictly),|z| < 24 (in cm? fractions of acentimeter?), and rho =sqrt(PV.x**2 + PV.y**2)< 2
#Cuts are to use strictly less than and greater than, i.e. PV.ndof > minNDoF, not >=
self.PVCutDict = {
'2016':{
'minNDoF': 4,
'maxAbsZ': 24.0,
'maxRho': 2
},
'2017':{
'minNDoF': 4,
'maxAbsZ': 24.0,
'maxRho': 2
},
'2018':{
'minNDoF': 4,
'maxAbsZ': 24.0,
'maxRho': 2
}
}
self.PVCut = self.PVCutDict[era]
#Weight variations
if self.isData:
self.weightList = ["NONE"]
else:
# self.weightList = ["NONE", "EWo", "EWS", "PUo", "EP"]
self.weightList = ["NOM"]
#NOM will be XS weight * PU weight * L1Prefiring weight? No Lepton weights, yet
#BTagging method, algorithm name, and chosen selection working point
self.BTName = btagging[0]
self.BTMeth = self.bTagWorkingPointDict[era][btagging[0]]
self.BTWP = self.bTagWorkingPointDict[era][btagging[0]][btagging[1]]
self.BTAlg = self.bTagWorkingPointDict[era][btagging[0]]["Var"]
self.MET = MET
self.HT = HT
self.ZWidth = ZWidth
# self.invertZWindow = invertZWindow
# self.invertZWindowEarlyReturn = invertZWindowEarlyReturn
self.jetPtVar = jetPtVar
self.jetMVar = jetMVar
self.mode = mode
self.debug = debug
if self.verbose:
print("BTMeth " + str(self.BTMeth))
print("BTWP " + str(self.BTWP))
print("BTAlg " + str(self.BTAlg))
print("Minimum lepton Pt: " + str(self.lepPt))
print("Minimum MET[Baseline, Selection]: " + str(self.MET))
print("Minimum HT[Baseline, Selection]: " + str(self.HT))
print("Z Window Width for veto bit: " + str(self.ZWidth))
# print("Inverted Z window: " + str(self.invertZWindow))
# print("Inverted Z window early return: " + str(self.invertZWindowEarlyReturn))
#event counters
self.counter = 0
self.BitsBins = 20
self.BitsMin = 0
self.BitsMax = 20
def beginJob(self, histFile=None,histDirName=None):
if self.fillHists == False and self.writehistFile == False:
Module.beginJob(self, None, None)
else:
if histFile == None or histDirName == None:
raise RuntimeError("fillHists set to True, but no histFile or histDirName specified")
###Inherited from Module
prevdir = ROOT.gDirectory
self.histFile = histFile
self.histFile.cd()
self.dir = self.histFile.mkdir( histDirName + "_JetMETLogic")
prevdir.cd()
self.objs = []
# self.JetMETLogic_Freq = {}
# self.JetMETLogic_Correl = {}
self.JetMETLogic_FailBits = {}
self.JetMETLogic_FailFirst = {}
for lvl in ["baseline", "selection"]:
# self.JetMETLogic_Freq[lvl] = ROOT.TH1D("JetMETLogic_Freq_{}".format(lvl),
# "HLT Paths Fired and Vetoed at {} level (weightMagnitude={}); Type; Events".format(lvl, self.weightMagnitude),
# 1, 0, 0)
# self.JetMETLogic_Correl[lvl] = ROOT.TH2D("JetMETLogic_Correl_{}".format(lvl),
# "Fired HLT Path Correlations at {} level (weightMagnitude={}); Path; Path ".format(lvl, self.weightMagnitude),
# self.PathsBins, self.PathsMin, self.PathsMax, self.PathsBins, self.PathsMin, self.PathsMax)
self.JetMETLogic_FailBits[lvl] = ROOT.TH1D("JetMETLogic_FailBits_{}".format(lvl),
"Failed JetMETLogic selection (any bits) at {} level (weightMagnitude={}); Path; Least significant bit power".format(lvl, self.weightMagnitude),
self.BitsBins, self.BitsMin, self.BitsMax)
self.JetMETLogic_FailFirst[lvl] = ROOT.TH1D("JetMETLogic_FailFirst_{}".format(lvl),
"Failed JetMETLogic selection (power of least significant bit) at {} level (weightMagnitude={}); Path; Least significant bit power".format(lvl, self.weightMagnitude),
self.BitsBins, self.BitsMin, self.BitsMax)
for lvl in ["baseline", "selection"]:
# self.addObject(self.JetMETLogic_Freq[lvl])
# self.addObject(self.JetMETLogic_Correl[lvl])
self.addObject(self.JetMETLogic_FailBits[lvl])
self.addObject(self.JetMETLogic_FailFirst[lvl])
# #Initialize labels to keep consistent across all files (only for labeled histograms, since introduction of 'extra' events in the histo counters (despite 0 weight)
# for lvl in ["baseline", "selection"]:
# for bitPos in xrange(self.BitsMin, self.BitsMax):
# # self.JetMETLogic_Correl[lvl].Fill(trig.trigger + " (T{})".format(trig.tier), trig.trigger + " (T{})".format(trig.tier), 0.0)
# # self.JetMETLogic_FailBits[lvl].Fill(bitPos+1, 0, 0.0)
# # self.JetMETLogic_FailFirst[lvl].Fill(bitPos+1, 0, 0.0)
# # for cat in ["Vetoed", "Fired", "Neither"]:
# # self.JetMETLogic_Freq[lvl].Fill(cat, 0.0)
# def endJob(self):
# if hasattr(self, 'objs') and self.objs != None:
# prevdir = ROOT.gDirectory
# self.dir.cd()
# for obj in self.objs:
# obj.Write()
# prevdir.cd()
# if hasattr(self, 'histFile') and self.histFile != None:
# self.histFile.Close()
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.branchList = inputTree.GetListOfBranches()
if "Jet_{0:s}".format(self.jetPtVar) not in self.branchList:
print("Warning: expected branch Jet_{0:s} to be present, but it is not. If not added in a module preceding this one, there will be a crash.".format(self.jetPtVar))
if "Jet_{0:s}".format(self.jetMVar) not in self.branchList:
print("Warning: expected branch Jet_{0:s} to be present, but it is not. If not added in a module preceding this one, there will be a crash.".format(self.jetMVar))
self.out = wrappedOutputTree
self.varTuple = [('Jet_OSV_baseline', 'i', 'Passes JetMETLeptonLogic at baseline level', 'nJet'),
('Jet_OSV_selection', 'i', 'Passes JetMETLogic at selection level', 'nJet'),
('ESV_JetMETLogic_baseline', 'i', 'Passes JetMETLogic at event level baseline,'\
' bits correspond to levels of baseline in JetMETLogic', None),
('ESV_JetMETLogic_nJet_baseline', 'i', 'Number of jets passing baseline requirements', None),
('ESV_JetMETLogic_HT_baseline', 'D', 'Scalar sum of selected jets\' Pt', None),
('ESV_JetMETLogic_H_baseline', 'D', 'Scalar sum of selected jets\' P', None),
('ESV_JetMETLogic_HT2M_baseline', 'D', 'Scalar sum of selected jets\' Pt except 2 highest b-tagged if they are medium or tight', None),
('ESV_JetMETLogic_H2M_baseline', 'D', 'Scalar sum of selected jets\' P except 2 highest b-tagged if they are medium or tight', None),
('ESV_JetMETLogic_HTb_baseline', 'D', 'Scalar sum of Pt for medium and tight b-tagged jets', None),
('ESV_JetMETLogic_HTH_baseline', 'D', 'Hadronic centrality, HT/H', None),
('ESV_JetMETLogic_HTRat_baseline', 'D', 'Ratio of Pt for two highest b-tagged jets to HT', None),
('ESV_JetMETLogic_dRbb_baseline', 'D', 'DeltaR between the two highest b-tagged jets', None),
('ESV_JetMETLogic_DiLepMass_baseline', 'D', 'Invariant mass of same-flavour leptons (0 default)', None),
('ESV_JetMETLogic_selection', 'i', 'Passes JetMETLogic at event level selection,'\
' bits correspond to levels of selection in JetMETLogic', None),
('ESV_JetMETLogic_nJet_selection', 'i', 'Number of jets passing selection requirements', None),
('ESV_JetMETLogic_HT_selection', 'D', 'Scalar sum of selected jets\' Pt', None),
('ESV_JetMETLogic_H_selection', 'D', 'Scalar sum of selected jets\' P', None),
('ESV_JetMETLogic_HT2M_selection', 'D', 'Scalar sum of selected jets\' Pt except 2 highest b-tagged if they are medium or tight', None),
('ESV_JetMETLogic_H2M_selection', 'D', 'Scalar sum of selected jets\' P except 2 highest b-tagged if they are medium or tight', None),
('ESV_JetMETLogic_HTb_selection', 'D', 'Scalar sum of Pt for medium and tight b-tagged jets', None),
('ESV_JetMETLogic_HTH_selection', 'D', 'Hadronic centrality, HT/H', None),
('ESV_JetMETLogic_HTRat_selection', 'D', 'Ratio of Pt for two highest b-tagged jets to HT', None),
('ESV_JetMETLogic_dRbb_selection', 'D', 'DeltaR between the two highest b-tagged jets', None),
('ESV_JetMETLogic_DiLepMass_selection', 'D', 'Invariant mass of same-flavour leptons (0 default)', None),
]
self.deprecated = [('ESV_JetMETLogic_nJet', 'I', 'Number of jets passing selection requirements', None),
('ESV_JetMETLogic_nJetBTL', 'I', 'Number of jets passing selection requirements and loose b-tagged', None),
('ESV_JetMETLogic_nJetBTM', 'I', 'Number of jets passing selection requirements and medium b-tagged', None),
('ESV_JetMETLogic_nJetBTT', 'I', 'Number of jets passing selection requirements and tight b-tagged', None),
]
if self.mode == "Flag":
if not self.out:
raise RuntimeError("No Output file selected, cannot flag events for JetMETLogic module")
else:
for name, valType, valTitle, lVar in self.varTuple:
self.out.branch("{}".format(name), valType, lenVar=lVar, title=valTitle)
elif self.mode == "Pass" or self.mode == "Fail" or self.mode == "Plot":
pass
if self.isData:
self.XSweight = self.dataWeightFunc
elif "genWeight" not in self.branchList:
self.XSweight = self.backupWeightFunc
print("Warning in TriggerAndLeptonLogic: expected branch genWeight to be present, but it is not."\
"The weight magnitude indicated will be used, but the sign of the genWeight must be assumed positive!")
else:
self.XSweight = self.genWeightFunc
def analyze(self, event): #called by the eventloop per-event
"""process event, return True (go to next module) or False (fail, go to next event)"""
#Increment counter and skip events past the maxEventsToProcess, if larger than -1
self.counter +=1
# if -1 < self.maxEventsToProcess < self.counter:
# return False
# if self.probEvt:
# if event.event != self.probEvt:
# return False
###############################################
### Collections and Objects and isData check###
###############################################
#Bits for passing different cuts in the event, make final decision at the end, the loop is going to be slow anyway, thanks to PostProcessor
ESV_baseline = 0
ESV_selection = 0
PV = Object(event, "PV")
otherPV = Collection(event, "OtherPV")
SV = Collection(event, "SV")
electrons = Collection(event, "Electron")
muons = Collection(event, "Muon")
taus = Collection(event, "Tau")
jets = Collection(event, "Jet")
# fatjets = Collection(event, "FatJet")
# subjets = Collection(event, "SubJet")
weight = self.XSweight(event) # * PU weight, L1Prefiring weight, etc.
if not self.isData:
generator = Object(event, "Generator")
btagweight = Object(event, "btagWeight") #contains .CSVV2 and .DeepCSVB float weights
if self.era == "2017":
met = Object(event, "METFixEE2017")
else:
met = Object(event, "MET")
HLT = Object(event, "HLT")
Filters = Object(event, "Flag")
#Set up dictionary for all the weights to be used.
# theWeight = {}
#Begin weight calculations. Some won't work properly with cutflow, so they'll be running weights
# ["NONE", "EWo", "EWS", "PUo", "EP"]
btagSFs = {}
for jet in jets:
pass
# for WLweight in self.weightList:
# if WLweight == "NONE":
# theWeight[WLweight] = 1
# elif WLweight == "EWo":
# theWeight[WLweight] = math.copysign(self.evtWeightBase, generator.weight)
# elif WLweight == "EWS":
# theWeight[WLweight] = math.copysign(self.evtWeightAlt, generator.weight)
# elif WLweight == "GWo":
# theWeight[weight] = generator.weight
# elif weight == "PUo":
# theWeight[weight] = event.puWeight #puWeightUp, puWeightDown
# elif weight == "EP":
# theWeight[weight] = math.copysign(self.evtWeightBase, generator.weight)*event.puWeight
# else:
# theWeight[weight] = -1
# self.cutflow[weight].Fill("> preselection", theWeight[weight])
######################
### Primary Vertex ###
######################
#Require ndof > minNDoF, |z| < maxAbsZ, and rho < maxRho
# if PV.ndof <= self.PVCut['minNDoF'] or abs(PV.z) >= self.VPCut['maxAbsZ'] or math.sqrt(PV.x**2 + PV.y**2) >= self.PVCut['maxRho']:
# return False
if PV.ndof > self.PVCut['minNDoF']:
ESV_baseline += self.passbits['PV_minNDoF']
ESV_selection += self.passbits['PV_minNDoF']
if abs(PV.z) < self.PVCut['maxAbsZ']:
ESV_baseline += self.passbits['PV_maxAbsZ']
ESV_selection += self.passbits['PV_maxAbsZ']
if math.sqrt(PV.x**2 + PV.y**2) < self.PVCut['maxRho']:
ESV_baseline += self.passbits['PV_maxRho']
ESV_selection += self.passbits['PV_maxRho']
###########
### MET ###
###########
#Check additional flag(s) solely for Data
if self.isData:
passFilters = getattr(Filters, self.Flags["isData"][0])
if passFilters:
ESV_baseline += self.passbits['MET_globalSuperTightHalo2016Filter']
ESV_selection += self.passbits['MET_globalSuperTightHalo2016Filter']
else:
#Default to true for MC
ESV_baseline += self.passbits['MET_globalSuperTightHalo2016Filter']
ESV_selection += self.passbits['MET_globalSuperTightHalo2016Filter']
#Ensure MC and Data pass all recommended filters for 2017 and 2018
for fi, flag in enumerate(self.Flags["Common"]):
passFilters = getattr(Filters, flag)
if passFilters:
ESV_baseline += self.passbits['MET_{}'.format(flag)]
ESV_selection += self.passbits['MET_{}'.format(flag)]
if met.pt >= self.MET[0]: #baseline level
ESV_baseline += self.passbits['MET_pt']
if met.pt >= self.MET[1]: #selection level
ESV_selection += self.passbits['MET_pt']
# for weight in self.weightList:
# self.cutflow[weight].Fill("> MET > {0:d}".format(self.MET), theWeight[weight])
if not self.isData:
pass
# gens = Collection(event, "GenPart")
# genjets = Collection(event, "GenJet")
# genfatjets = Collection(event, "GenJetAK8")
# gensubjets = Collection(event, "SubGenJetAK8")
# genmet = Object(event, "GenMET")
#These two are grabbed earlier
# generator = Object(event, "Generator") #stored earlier for weights access
# btagweight = Object(event, "btagWeight") #contains .CSVV2 and .DeepCSVB float weights
#This doesn't exist yet
# LHEReweightingWeight = Collection(event, "LHEReweightingWeight")
#These might fail because some of the samples lack weights... axe them for now, check later when actually needed.
# LHE = Object(event, "LHE")
# PSWeights = Collection(event, "PSWeight")
# LHEWeight = getattr(event, "LHEWeight_originalXWGTUP")
# LHEScaleWeight = Collection(event, "LHEScaleWeight")
# LHEPdfWeight = Collection(event, "LHEPdfWeight")
#BIG Weights lesson learned: you cannot use Collection, and possibly, you cannot even assign the variable and iterate through it using indices or
#pythonic methods. Thus, to ge the 3rd LHEScaleWeight, should use 3rdLHEScaleWeight = getattr(event, "LHEScaleWeight")[2] instead, indexing after acquis.
muon_baseline = []
muon_selection = []
for idx, muon in enumerate(muons):
if muon.OSV_baseline > 0:
muon_baseline.append((idx, muon))
if muon.OSV_selection > 0:
muon_selection.append((idx, muon))
electron_baseline = []
electron_selection = []
for idx, electron in enumerate(electrons):
if electron.OSV_baseline > 0:
electron_baseline.append((idx, electron))
if electron.OSV_selection > 0:
electron_selection.append((idx, electron))
leptons_baseline = electron_baseline + muon_baseline
leptons_selection = electron_selection + muon_selection
if self.debug:
if self.passLevel == 'baseline':
if len(leptons_baseline) > 2:
print("Mayday!")
if leptons_baseline[0][1].charge * leptons_baseline[1][1].charge > 0:
print("Charging up!")
if self.passLevel == 'selection':
if len(leptons_selection) > 2:
print("Mayday!")
if leptons_selection[0][1].charge * leptons_selection[1][1].charge > 0:
print("Charging up!")
#passbit if outside the Z window in same-flavor event or all in different-flavor event
if (len(electron_baseline) > 1 or len(muon_baseline) > 1):
DiLepMass_baseline = (leptons_baseline[0][1].p4() + leptons_baseline[1][1].p4()).M()
if abs( DiLepMass_baseline - 91.0) > self.ZWidth:
ESV_baseline += self.passbits['Lepton_ZWindow']
else: #opposite-flavor
ESV_baseline += self.passbits['Lepton_ZWindow']
DiLepMass_baseline = -1
#Should see no difference in invariant mass except when a collection drops below length 1, given the TriggerAndLeptonLogic Module in LeptonLogic.py
if (len(electron_selection) > 1 or len(muon_selection) > 1):
DiLepMass_selection = (leptons_selection[0][1].p4() + leptons_selection[1][1].p4()).M()
if abs( DiLepMass_selection - 91.0) > self.ZWidth:
ESV_selection += self.passbits['Lepton_ZWindow']
else: #opposite-flavor
ESV_selection += self.passbits['Lepton_ZWindow']
DiLepMass_selection = -1
############
### Jets ###
###########
jetsToClean_selection = set([lep[1].jetIdx for lep in leptons_selection])
selJets_selection = []
selBTsortedJets_selection = []
jetbits_selection = [0]*len(jets)
jetsToClean_baseline = set([lep[1].jetIdx for lep in leptons_baseline])
selJets_baseline = []
selBTsortedJets_baseline = []
jetbits_baseline = [0]*len(jets)
selJets_bugged = []
for idx, jet in enumerate(jets):
if idx not in jetsToClean_baseline:
jetbits_baseline[idx] += self.jetbits['lepClean']
if abs(jet.eta) < 2.5:
jetbits_baseline[idx] += self.jetbits['maxEta']
if jet.jetId >= 2:
jetbits_baseline[idx] += self.jetbits['jetID']
if getattr(jet, self.jetPtVar) > 25:
jetbits_baseline[idx] += self.jetbits['pt25']
if getattr(jet, self.jetPtVar) > 20:
jetbits_baseline[idx] += self.jetbits['pt20']
if getattr(jet, self.bTagWorkingPointDict[self.era]['DeepCSV']['Var']) > self.bTagWorkingPointDict[self.era]['DeepCSV']['L']:
jetbits_baseline[idx] += self.jetbits['DCSV']
if getattr(jet, self.bTagWorkingPointDict[self.era]['DeepJet']['Var']) > self.bTagWorkingPointDict[self.era]['DeepJet']['L']:
jetbits_baseline[idx] += self.jetbits['DJET']
if getattr(jet, self.BTAlg) > self.BTWP:
jetbits_baseline[idx] += self.jetbits['BTag_WP']
if (jetbits_baseline[idx] & self.jet_threshold_bits['baseline']) >= self.jet_threshold_bits['baseline']:
selJets_baseline.append((idx, jet))
selBTsortedJets_baseline.append((idx, jet))
# #BTagging input disabled without highest bit! Use DeepJet Loose...
# if jetbits_baseline[idx] >= 0b010010111:
if idx not in jetsToClean_selection:
jetbits_selection[idx] += self.jetbits['lepClean']
if abs(jet.eta) < 2.5:
jetbits_selection[idx] += self.jetbits['maxEta']
if jet.jetId >= 2: #dropped to 2==Tight due to bug in 4==TightLepVeto ID regarding muon energy fractions
jetbits_selection[idx] += self.jetbits['jetID']
if getattr(jet, self.jetPtVar) > 25:
jetbits_selection[idx] += self.jetbits['pt25']
if getattr(jet, self.jetPtVar) > 20:
jetbits_selection[idx] += self.jetbits['pt20']
if getattr(jet, self.bTagWorkingPointDict[self.era]['DeepCSV']['Var']) > self.bTagWorkingPointDict[self.era]['DeepCSV']['M']:
jetbits_selection[idx] += self.jetbits['DCSV']
if getattr(jet, self.bTagWorkingPointDict[self.era]['DeepJet']['Var']) > self.bTagWorkingPointDict[self.era]['DeepJet']['M']:
jetbits_selection[idx] += self.jetbits['DJET']
if getattr(jet, self.BTAlg) > self.BTWP:
jetbits_selection[idx] += self.jetbits['BTag_WP']
if (jetbits_selection[idx] & self.jet_threshold_bits['selection']) >= self.jet_threshold_bits['selection']:
selJets_selection.append((idx, jet))
selBTsortedJets_selection.append((idx, jet))
nJets_baseline = len(selJets_baseline)
nJets_selection = len(selJets_selection)
#BTagging algo used for sorting, still
selBTsortedJets_baseline.sort(key=lambda j : getattr(j[1], self.BTAlg), reverse=True)
selBTsortedJets_selection.sort(key=lambda j : getattr(j[1], self.BTAlg), reverse=True)
#B-tagged jets
# selBTLooseJets = [jetTup for jetTup in selBTsortedJets if getattr(jetTup[1], self.BTAlg) > self.BTMeth['L']]
# selBTMediumJets = [jetTup for jetTup in selBTLooseJets if getattr(jetTup[1], self.BTAlg) > self.BTMeth['M']]
# selBTTightJets = [jetTup for jetTup in selBTMediumJets if getattr(jetTup[1], self.BTAlg) > self.BTMeth['T']]
# selBTJets = [jetTup for jetTup in selBTsortedJets if getattr(jetTup[1], self.BTAlg) > self.BTWP]
# nJets = len(selJets)
# nBTLoose = len(selBTLooseJets)
# nBTMedium = len(selBTMediumJets)
# nBTTight = len(selBTTightJets)
# nBTSelected = len(selBTJets)
nJets25_baseline = [bits for bits in jetbits_baseline if (bits & self.jetbits['pt25'] > 0)]
nBJetsDeepCSV_baseline = [bits for bits in jetbits_baseline if (bits & self.jetbits['DCSV'] > 0)]
nBJetsDeepJet_baseline = [bits for bits in jetbits_baseline if (bits & self.jetbits['DJET'] > 0)]
#Just 3 jets in baseline
if nJets_baseline > 2:
ESV_baseline += self.passbits['Jet_nJet20']
if len(nJets25_baseline) > 2:
ESV_baseline += self.passbits['Jet_nJet25']
#Require 2 loose tagged jets
if len(nBJetsDeepCSV_baseline) > 1:
ESV_baseline += self.passbits['Jet_nBJet_2DCSV']
if len(nBJetsDeepJet_baseline) > 1:
ESV_baseline += self.passbits['Jet_nBJet_2DJet']
nJets25_selection = [bits for bits in jetbits_selection if (bits & self.jetbits['pt25'] > 0)]
nBJetsDeepCSV_selection = [bits for bits in jetbits_selection if (bits & self.jetbits['DCSV'] > 0)]
nBJetsDeepJet_selection = [bits for bits in jetbits_selection if (bits & self.jetbits['DJET'] > 0)]
#4 jets in selection
if nJets_selection > 3:
ESV_selection += self.passbits['Jet_nJet20']
if len(nJets25_selection) > 3:
ESV_selection += self.passbits['Jet_nJet25']
#Require 2 medium tagged jets
if len(nBJetsDeepCSV_selection) > 1:
ESV_selection += self.passbits['Jet_nBJet_2DCSV']
if len(nBJetsDeepJet_selection) > 1:
ESV_selection += self.passbits['Jet_nBJet_2DJet']
#HT and other calculations
HT_baseline = 0
H_baseline = 0
HT2M_baseline = 0
H2M_baseline = 0
HTb_baseline = 0
HTH_baseline = 0
HTRat_baseline = 0
dRbb_baseline = -1
for j, jet in selBTsortedJets_baseline:
HT_baseline += getattr(jet, self.jetPtVar)
jetP4_baseline = ROOT.TLorentzVector()
jetP4_baseline.SetPtEtaPhiM(getattr(jet, self.jetPtVar),
getattr(jet, "eta"),
getattr(jet, "phi"),
getattr(jet, self.jetMVar)
)
H_baseline += jetP4_baseline.P()
#Only use deepjet
if j > 1 and len(nBJetsDeepJet_baseline) > 1:
HT2M_baseline += getattr(jet, self.jetPtVar)
H2M_baseline += jetP4_baseline.P()
if jetbits_baseline[j] & self.jetbits['DJET']:
HTb_baseline += getattr(jet, self.jetPtVar)
if HT_baseline >= self.HT[0]:
ESV_baseline += self.passbits['HT']
if len(selBTsortedJets_baseline) > 3: #redundant, but only so long as 4 jet cut is in place
jet1_baseline = selBTsortedJets_baseline[0][1]
jet2_baseline = selBTsortedJets_baseline[1][1]
dRbb_baseline = deltaR(jet1_baseline, jet2_baseline)
HTRat_baseline = (jet1_baseline.pt + jet2_baseline.pt)/HT_baseline
HTH_baseline = HT_baseline/H_baseline
else:
dRbb_baseline = -1
HTRat_baseline = -0.1
HTH_baseline = -0.1
#HT and other calculations
HT_selection = 0
H_selection = 0
HT2M_selection = 0
H2M_selection = 0
HTb_selection = 0
HTH_selection = 0
HTRat_selection = 0
dRbb_selection = -1
for j, jet in selBTsortedJets_selection:
HT_selection += getattr(jet, self.jetPtVar)
jetP4_selection = ROOT.TLorentzVector()
jetP4_selection.SetPtEtaPhiM(getattr(jet, self.jetPtVar),
getattr(jet, "eta"),
getattr(jet, "phi"),
getattr(jet, self.jetMVar)
)
H_selection += jetP4_selection.P()
#Only use deepjet
if j > 1 and len(nBJetsDeepJet_selection) > 1:
HT2M_selection += getattr(jet, self.jetPtVar)
H2M_selection += jetP4_selection.P()
if jetbits_selection[j] & self.jetbits['DJET']:
HTb_selection += getattr(jet, self.jetPtVar)
if HT_selection >= self.HT[1]:
ESV_selection += self.passbits['HT']
if len(selBTsortedJets_selection) > 3: #redundant, but only so long as 4 jet cut is in place
jet1_selection = selBTsortedJets_selection[0][1]
jet2_selection = selBTsortedJets_selection[1][1]
dRbb_selection = deltaR(jet1_selection, jet2_selection)
HTRat_selection = (jet1_selection.pt + jet2_selection.pt)/HT_selection
HTH_selection = HT_selection/H_selection
else:
dRbb_selection = -1
HTRat_selection = -0.1
HTH_selection = -0.1
####################################
### Variables for branch filling ###
####################################
branchVals = {}
branchVals['Jet_OSV_baseline'] = jetbits_baseline
branchVals['Jet_OSV_selection'] = jetbits_selection
branchVals['ESV_JetMETLogic_baseline'] = ESV_baseline #Do a bit comparison at the end?
branchVals['ESV_JetMETLogic_selection'] = ESV_selection #do bit comparison at the end, but maybe still keep bits around...
branchVals['ESV_JetMETLogic_nJet_baseline'] = nJets_baseline
branchVals['ESV_JetMETLogic_nJet_selection'] = nJets_selection
# branchVals['ESV_JetMETLogic_nJetBTL'] = nBTLoose
# branchVals['ESV_JetMETLogic_nJetBTM'] = nBTMedium
# branchVals['ESV_JetMETLogic_nJetBTT'] = nBTTight
branchVals['ESV_JetMETLogic_HT_baseline'] = HT_baseline
branchVals['ESV_JetMETLogic_H_baseline'] = H_baseline
branchVals['ESV_JetMETLogic_HT2M_baseline'] = HT2M_baseline
branchVals['ESV_JetMETLogic_H2M_baseline'] = H2M_baseline
branchVals['ESV_JetMETLogic_HTb_baseline'] = HTb_baseline
branchVals['ESV_JetMETLogic_HTH_baseline'] = HTH_baseline
branchVals['ESV_JetMETLogic_HTRat_baseline'] = HTRat_baseline
branchVals['ESV_JetMETLogic_dRbb_baseline'] = dRbb_baseline
branchVals['ESV_JetMETLogic_DiLepMass_baseline'] = DiLepMass_baseline
branchVals['ESV_JetMETLogic_HT_selection'] = HT_selection
branchVals['ESV_JetMETLogic_H_selection'] = H_selection
branchVals['ESV_JetMETLogic_HT2M_selection'] = HT2M_selection
branchVals['ESV_JetMETLogic_H2M_selection'] = H2M_selection
branchVals['ESV_JetMETLogic_HTb_selection'] = HTb_selection
branchVals['ESV_JetMETLogic_HTH_selection'] = HTH_selection
branchVals['ESV_JetMETLogic_HTRat_selection'] = HTRat_selection
branchVals['ESV_JetMETLogic_dRbb_selection'] = dRbb_selection
branchVals['ESV_JetMETLogic_DiLepMass_selection'] = DiLepMass_selection
####################################
### Event pass values calculated ###
####################################
passVals = {}
passVals['ESV_JetMETLogic_pass_all'] = True
passVals['ESV_JetMETLogic_pass_baseline'] = ( (branchVals['ESV_JetMETLogic_baseline'] & self.evt_threshold_bits['baseline']) >= self.evt_threshold_bits['baseline'])
passVals['ESV_JetMETLogic_pass_selection'] = ( (branchVals['ESV_JetMETLogic_selection'] & self.evt_threshold_bits['selection']) >= self.evt_threshold_bits['selection'])
#######################
### Fill histograms ###
#######################
if self.fillHists:
for lvl in ["baseline", "selection"]:
if passVals['ESV_JetMETLogic_pass_{}'.format(lvl)]:
pass
else:
# self.addObject(self.JetMETLogic_Freq[lvl])
# self.addObject(self.JetMETLogic_Correl[lvl])
foundFirstFail = False
for bitPos, bitVal in enumerate(self.passbits.values()):
if (bitVal & self.evt_threshold_bits[lvl] == 0) or (bitVal & branchVals['ESV_JetMETLogic_{}'.format(lvl)] > 0):
#First skip values that aren't set in the evt_threshold, we can't fail on them, then additionally skip values that are passed in regard to those thresholds, using the comparison with bits in ESV_JetMETLogic_{lvl}
continue
#This is triggered when we have a bit that is in the threshold and was not met by the event, so it's a failure
self.JetMETLogic_FailBits[lvl].Fill(bitPos+1, weight)
if not foundFirstFail: self.JetMETLogic_FailFirst[lvl].Fill(bitPos+1, weight)
#And if we made it to this point, we skip filling any further bits in the second histo by flipping the flag below
foundFirstFail = True
##########################
### Write out branches ###
##########################
if self.out and self.mode == "Flag":
for name, valType, valTitle, lVar in self.varTuple:
self.out.fillBranch(name, branchVals[name])
return True
elif self.mode == "PassFail":
if passVals['ESV_JetMETLogic_pass_{}'.format(self.passLevel)]:
return True
else:
return False
elif self.mode == "Plot":
#Do something?
#Do pass through if plotting, make no assumptions about what should be done with the event
return True
else:
raise NotImplementedError("No method in place for JetMETLogic module in mode '{0}'".format(self.mode))
def genWeightFunc(self, event):
#Default value is currently useless, since the tree reader array tool raises an exception anyway
return math.copysign(self.weightMagnitude, getattr(event, "genWeight", 1))
def backupWeightFunc(self, event):
return self.weightMagnitude
def dataWeightFunc(self, event):
return 1
| [
"nmang001@ucr.edu"
] | nmang001@ucr.edu |
8cfa0564a630a016ac91663a5dbcade279afd639 | 144b54b91cbd541421c12df1074920c1bd635780 | /utils.py | 71aca180b475fef8fb48cacb903d2616b5893e9b | [
"MIT"
] | permissive | jajcayn/re_hippocampal_model | 777956b93476051202e10c908f419c69e9349c0e | 5dc984cec0591d27ed6dedf8e8e2ddd8e07b20c7 | refs/heads/main | 2023-04-19T05:29:14.064024 | 2021-04-21T09:37:45 | 2021-04-21T09:37:45 | 353,683,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | """
Helper functions
"""
import logging
from functools import partial
from multiprocessing import Pool, cpu_count
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from tqdm import tqdm
def run_in_parallel(
partial_function,
iterable,
workers=cpu_count(),
length=None,
assert_ordered=False,
):
"""
Wrapper for running functions in parallel with tqdm bar.
:param partial_function: partial function to be evaluated
:type partial_function: :class:`_functools.partial`
:param iterable: iterable comprised of arguments to be fed to partial
function
:type iterable: iterable
:param workers: number of workers to be used
:type workers: int
:param length: Length of the iterable / generator.
:type length: int|None
:param assert_ordered: whether to assert order of results same as the
iterable (imap vs imap_unordered)
:type assert_ordered: bool
:return: list of values returned by partial function
:rtype: list
"""
total = length
if total is None:
try:
total = len(iterable)
except (TypeError, AttributeError):
pass
# wrap method in order to get original exception from a worker process
partial_function = partial(_worker_fn, fn=partial_function)
pool = Pool(workers)
imap_func = pool.imap_unordered if not assert_ordered else pool.imap
results = []
for result in tqdm(imap_func(partial_function, iterable), total=total):
results.append(result)
pool.close()
pool.join()
return results
def _worker_fn(item, fn):
"""
Wrapper for worker method in order to get original exception from
a worker process and to log correct exception stacktrace.
:param item: item from iterable
:param fn: partial function to be evaluated
:type fn: :class:`_functools.partial`
"""
try:
return fn(item)
except Exception as e:
logging.exception(e)
raise
class AnchoredHScaleBar(matplotlib.offsetbox.AnchoredOffsetbox):
"""
Creates horizontal scale bar in the matplotlib figures.
Taken from https://stackoverflow.com/a/43343934.
"""
def __init__(
self,
size=1,
extent=0.03,
label="",
loc=2,
ax=None,
pad=0.6,
borderpad=0.5,
ppad=0,
sep=4,
txtsize=16,
prop=None,
frameon=False,
linekw={},
**kwargs
):
if not ax:
ax = plt.gca()
trans = ax.get_xaxis_transform()
size_bar = matplotlib.offsetbox.AuxTransformBox(trans)
line = Line2D([0, size], [0, 0], **linekw)
vline1 = Line2D([0, 0], [-extent / 2.0, extent / 2.0], **linekw)
vline2 = Line2D([size, size], [-extent / 2.0, extent / 2.0], **linekw)
size_bar.add_artist(line)
size_bar.add_artist(vline1)
size_bar.add_artist(vline2)
txt = matplotlib.offsetbox.TextArea(
label, minimumdescent=False, textprops={"size": txtsize}
)
self.vpac = matplotlib.offsetbox.VPacker(
children=[size_bar, txt], align="center", pad=ppad, sep=sep
)
matplotlib.offsetbox.AnchoredOffsetbox.__init__(
self,
loc,
pad=pad,
borderpad=borderpad,
child=self.vpac,
prop=prop,
frameon=frameon,
**kwargs
)
| [
"nikola.jajcay@gmail.com"
] | nikola.jajcay@gmail.com |
d2d8d1a76517bf0cbfed79f32e7b7f96acb604a2 | a7a8e79ba13962d792d9aa1ee758f095083044b9 | /gened.py | 6eedddca46433cccfb8e35171c9c33fc5f66edd9 | [] | no_license | vannjo02/Gen_eds | 96744091a8f08504fe68739ecee5e8b7b1b17b4a | 5fda16f2531e38358fdfadadf890037c4ad56fe5 | refs/heads/master | 2021-01-20T09:55:02.222524 | 2017-05-04T19:15:36 | 2017-05-04T19:15:36 | 90,300,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | from flask import Flask, render_template, request
import psycopg2
from flask_bootstrap import Bootstrap
import os
app = Flask(__name__)
Bootstrap(app)
conn = psycopg2.connect(os.environ['DATABASE_URL'])
print('READY')
@app.route('/')
def index():
reqlst = ["Human Expression—Primary Texts", "Intercultural", "Historical", "Natural World—Nonlab", "Religion", "Human Expression", "Skills", "Human Behavior", "Human Behavior—Social Science Methods", "Quantitative", "Natural World—Lab", "Biblical Studies", "Wellness"]
cur=conn.cursor()
cur.execute("select number, title, count(requirement.description) as count from course join course_requirement on (course.id = course_requirement.course) join requirement on (requirement.id = course_requirement.requirement) group by number, title order by count desc limit 5")
res=cur.fetchall()
print(res)
return render_template('index.html', reqs=reqlst, res = res)
@app.route('/requirement/')
def requirement():
reqs=tuple(request.args.getlist('option'))
cur=conn.cursor()
cur.execute("select number, title from course join course_requirement on (course.id = course_requirement.course) join requirement on (requirement.id = course_requirement.requirement) where requirement.description in %s group by number, title having count(requirement.description) >= %s", (reqs, len(reqs)))
res=cur.fetchall()
print(res)
return render_template('requirement.html', courses=res, reqs = reqs)
@app.route('/course/<crs>')
def course(crs):
cur = conn.cursor()
cur.execute("select requirement.description from course join course_requirement on (course.id = course_requirement.course) join requirement on (requirement.id = course_requirement.requirement) where course.number = %s", (crs,))
res = cur.fetchall()
print(res)
cur.execute("select title, course.description from course where course.number = %s", (crs,))
info = cur.fetchall()[0]
print(info)
return render_template('course.html', course = res, info = info, crs = crs)
@app.route('/search/')
def search():
query = tuple(request.args.getlist('input'))[0].title()
search= "%" + query + "%"
cur = conn.cursor()
cur.execute("select number, title from course where course.title like %s", (search,))
search = cur.fetchall()
print("Results", search)
fulfills = []
for course in search:
cur.execute("select requirement.description from course join course_requirement on (course.id = course_requirement.course) join requirement on (requirement.id = course_requirement.requirement) where course.number = %s", (course[0],))
tmp = []
for req in cur.fetchall():
tmp.append(req[0])
fulfills.append(tmp)
print("Reqs list", fulfills)
return render_template('search.html', search = search, lst = fulfills, query = query)
if __name__ == '__main__':
app.run(debug='True')
| [
"vannjo02@luther.edu"
] | vannjo02@luther.edu |
835a35a0816d80e070b145914b614c4079752764 | 680d9e12f9916f68f84921e1b0328786454f2d50 | /cmd_line_sample.py | 3485a5f3ce5e52b1f0b411f971f00a44f69189b3 | [] | no_license | vkarpov15/hydra-injector-py | 24c791fd1bc3c90681b48f7515fc0e359fd14a94 | 3948b5056c4cf563db77c7172c6660daa7c62b19 | refs/heads/master | 2020-12-24T13:52:41.541934 | 2012-11-09T01:39:32 | 2012-11-09T01:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | #
# cmd_line_sample.py
#
# Created on: November 3, 2012
# Author: Valeri Karpov
#
# An example usage of CommandLineInjector - a very general method for stripping
# padding from a sample file. While this is a somewhat trivial example, it
# highlights some of the more useful features of this library - managing object
# ("square") life cycle, wiring two methods / "circles" together, constructing
# squares from command line params, and a minimum of non-reusable boilerplate
#
from CommandLineInjector import *
import inspect
class FileReader:
inject = ["infile"]
def __init__(self, infile):
self.filename = infile
print infile
def initialize(self):
self.f = open(self.filename, "r")
def close(self):
self.f.close()
def getLines(self):
return self.f.readlines()
class FileWriter:
inject = ["outfile"]
def __init__(self, outfile):
self.filename = outfile
print outfile
def initialize(self):
self.f = open(self.filename, "w")
def close(self):
self.f.close()
def writeLine(self, line):
self.f.write("%s\n" % line)
def removePaddingFromFile(reader):
lines = reader.getLines()
newLines = [line.strip() for line in lines]
return newLines
def writeUnpaddedFile(writer, lines = "method:removePaddingFromFile"):
for line in lines:
writer.writeLine(line)
#### This is boilerplate
#### Sample run: python cmd_line_sample.py writeUnpaddedFile --f="../test" --outfile=../test2
class MyRunner:
def run(self, method, params):
return eval(method)(**params)
def getSpecs(self, method):
return inspect.getargspec(eval(method))
# Binding magic. Roughly translated:
# 1) Whenever a method or class asks for something called "reader", it means
# a FileReader where the constructor parameter "infile" is taken from
# command line parameter -f
# 2) Similar to above, "writer" is a FileWriter where all of its constructor
# parameters are taken from command line parameter with same name
# 3/4) Add the methods removePaddingFromFile and writeUnpaddedFile as callable
# methods from command line
# 5) Run using command line arguments using the runner from this scope
CommandLineInjector().addClass("reader", FileReader, { "infile" : "f" }).addClass("writer", FileWriter).addMethod("removePaddingFromFile").addMethod("writeUnpaddedFile").run(MyRunner())
| [
"valkar207@gmail.com"
] | valkar207@gmail.com |
d9c4b8a7de6dbd3755b12d629a970ee4b0778798 | 49197a748adea1618a2cece7a1ae057006da090c | /jgodwin/micro/micro.py | f1132e04bc3d6e7e46ac6817121583f752dcc324 | [] | no_license | psava/cwp12 | 0bbb1f213c66737509280fc4b0ac5c53b52d017a | 3f47c1bf358caa5ebe608ab88fc12b85fd489220 | refs/heads/master | 2021-01-10T21:24:57.572992 | 2012-10-10T15:52:18 | 2012-10-10T15:52:18 | 2,213,082 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 17,263 | py | from rsf.cluster import *
import random,fdmod
def getpar():
par = {
###############################
# Model/Image dimensions
###############################
#'nx':501, 'ox':0, 'dx':0.002, 'lx':'x', 'ux':'km',
#'ny':151, 'oy':0, 'dy':0.002, 'ly':'y', 'uy':'km',
#'nz':351, 'oz':0, 'dz':0.002, 'lz':'z', 'uz':'km',
'nx':251, 'ox':0, 'dx':0.005, 'lx':'x', 'ux':'km',
'ny':75, 'oy':0, 'dy':0.005, 'ly':'y', 'uy':'km',
'nz':176, 'oz':0, 'dz':0.005, 'lz':'z', 'uz':'km',
###############################
# Wavelet parameters
###############################
'nt':4001, 'ot':0, 'dt':0.001, 'lt':'t', 'ut':'s',
'frq':45, # Peak frequency for Ricker wavelet
'kt':100, # Wavelet start position (wavelets are delayd for zero-phase)
###############################
# Modeling code parameters
###############################
'cfl': True,
'dabc':True, # Use absorbing boundary condition?
'nb':80, # How many cells for absorbing boundary?
'abcone':True, # Use additional ramp condition for boundaries? (Use default)
'dsou':False, # Use displacement source (acoustic-only)
'expl':False, # Use exploding reflector (acoustic-only)
'free':False, # Use free surface (generate multiples)
'jdata':1, # Interval between time-iterations before saving data at recv
'snap':True, # Save wavefield snapshots?
'verb':True, # Verbose output?
'jsnap':1, # Interval between time-iterations before saving wfld snapshot
'debug':False, # Debugging output (elastic-only)?
'nbell':5, # Size of interpolation for injection
'ssou':False, # Use stress-source (elastic-only)
'ompchunk':1, # OpenMP chunk size (use default)
'ompnth':2, # Number of OpenMP threads to use (4 works best)
###############################
# Thomsen parameters for models
###############################
'vp':1.5,
'ro':2.0,
###############################
# Miscellaneous parameters
###############################
'height':10,
'nht': 80,
'nhx': 40,
'nhz': 40,
}
fdmod.param(par)
par['nframe']=5
par['iframe']=4
# ------------------------------------------------------------
# End user parameters -- NO EDITS BELOW
# ------------------------------------------------------------
par['kz']=2./3.*par['nz']
return par
def windowreceivers(rr,groups,keys,par):
for group,gpars in groups.items():
nwin = gpars['nr']
owin = gpars['or']
dwin = gpars['dr']
Flow('rr-'+group,gpars['group'],'window n2=%d f2=%d j2=%d squeeze=n' % (nwin,owin,dwin))
Plot('rr-'+group,fdmod.rrplot('plotcol=%d plotfat=10' % gpars['color'],par))
Flow(rr,['rr-'+group for group in keys],
'cat axis=2 ${SOURCES[1:%d]}' % len(groups))
Plot(rr,['rr-'+group for group in keys],'Overlay')
def triangulate(image,tcube,noisy,clean,groups,keys,hypocenters,subgroups,snapshots,par):
ii = 0
Fork(nodes=1,time=3,ipn=1)
for group in keys:
gpars = groups[group]
nwin = gpars['nr']
Flow('da-'+group,noisy,
'''
window n1=%d f1=%d squeeze=n |
''' % (nwin,ii) +
'''put o1=%(oz)g d1=%(dz)g ''' % par)
Result('da-'+group+'_',clean,
'''
window n1=%d f1=%d squeeze=n |
''' % (nwin,ii) +
'''
put o1=0 d1=1 | transp |
wiggle poly=y pclip=99 title="" labelsz=6 labelfat=3 titlesz=12 titlefat=3
label2="\F2 trace\F3 " label1="\F2 time\F3"
''' )
Result('da-'+group,
'''put o1=0 d1=1 | transp |
wiggle poly=n pclip=100 title="" transp=%(transp)d labelsz=6 labelfat=3 titlesz=12 titlefat=3
yreverse=%(yreverse)d %(custom)s
label2="\F2 trace\F3 " label1="\F2 time\F3"
''' % gpars)
backproject('da-'+group,'rr-'+group,'vp-2d','ro-2d','_wa-'+group,par)
Flow('wa-%s'%group,'_wa-%s'%group,
'''
transp plane=23 | transp plane=12
''' % par)
Result('wa-%s' % group,'_wa-%s' % group,
'window f3=%d n3=%d j3=%d | ' % (snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=100',par))
for i in range(snapshots[0],snapshots[0]+snapshots[1]*snapshots[2],snapshots[2]):
Plot('wa-%s-%d' % (group,i),'_wa-%s' % group,'window n3=1 f3=%d | ' % (i) + fdmod.cgrey('pclip=100',par))
Result('wa-%s-%d' % (group,i),['wa-%s-%d' % (group,i),'rr-2d'],'Overlay')
subgroupwflds = []
for sub in subgroups:
for j in range(0,nwin,sub):
if sub + j <= nwin:
Flow('da-%s-%d-%d' % (group,sub,j),'da-%s' % group,
'''
window n1=%d f1=%d squeeze=n
''' % (sub,j))
Flow('rr-%s-%d-%d' % (group,sub,j),'rr-%s' % group,
'''
window n2=%d f2=%d squeeze=n
''' % (sub,j))
else:
Flow('da-%s-%d-%d' % (group,sub,j),'da-%s' % group,
'''
window f1=%d squeeze=n
''' % (j))
Flow('rr-%s-%d-%d' % (group,sub,j),'rr-%s' % group,
'''
window f2=%d squeeze=n
''' % (j))
backproject('da-%s-%d-%d' % (group,sub,j),
'rr-%s-%d-%d' % (group,sub,j),
'vp-2d','ro-2d','_wa-%s-%d-%d' % (group,sub,j),par)
# Go from z-x-t to t-z-x
Flow('wa-%s-%d-%d'% (group,sub,j),'_wa-%s-%d-%d'%(group,sub,j),
'''
transp plane=23 | transp plane=12
''' % par)
Result('wa-%s-%d-%d' % (group,sub,j),'_wa-%s-%d-%d' % (group,sub,j),
'window f3=%d n3=%d j3=%d | ' % (snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=100',par))
subgroupwflds.append('_wa-%s-%d-%d' % (group,sub,j))
j = 0
for hypocenter in hypocenters:
xi = hypocenter[0]
zi = hypocenter[1]
ti = hypocenter[2]
Flow('hypo-%d-%s' % (j,group), '_wa-%s' % group,
'''
window min1=%(oz)f min2=%(ox)f n1=%(nz)d n2=%(nx)d |
''' % par +
'''
window n1=1 n2=1 f1=%d f2=%d
''' % (zi,xi))
for subgroupwfld in subgroupwflds:
subgrouphypo = subgroupwfld.replace('_wa','hypo-%d' % j)
Flow(subgrouphypo,subgroupwfld,
'''
window n1=1 n2=1 f1=%d f2=%d
''' % (zi,xi))
j+= 1
ii += nwin
Iterate()
Join()
for jhypo in range(len(hypocenters)):
Flow('hypo-%d' % jhypo, ['hypo-%d-%s' % (jhypo,group) for group in keys],
'''
cat axis=2 ${SOURCES[1:%d]}
''' % len(keys))
Result('hypo-%d' % jhypo, 'grey pclip=95')
#Save('hypo-%d' % jhypo)
for sub in subgroups:
subwflds = []
for group in keys:
for j in range(0,groups[group]['nr'],sub):
subwflds.append('hypo-%d-%s-%d-%d' % (jhypo,group,sub,j))
Flow('hypo-%d-%d' % (jhypo,sub), subwflds,
'''
cat axis=2 ${SOURCES[1:%d]}
''' % len(subwflds))
Result('hypo-%d-%d' % (jhypo,sub),'grey pclip=95')
#Save('hypo-%d-%d' % (jhypo,sub))
for sub in subgroups:
subwflds = ['wa-%s-%d-%d'% (group,sub,j) for group in keys for j in range(0,groups[group]['nr'],sub) ]
Flow(tcube+'-sem-%d' % sub,subwflds,
'''
semblance m=10 ${SOURCES[1:%d]} |
transp plane=12 | transp plane=23
''' % len(subwflds))
Flow(tcube+'-%d' % sub,subwflds,
'''
add mode=p ${SOURCES[1:%d]} |
transp plane=12 | transp plane=23
''' % len(subwflds))
Result(tcube+'-sem-%d'% sub,
'window f3=%d n3=%d j3=%d | ' %
(snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=99.9 gainpanel=a',par))
Result(tcube+'-%d' % sub,
'window f3=%d n3=%d j3=%d | ' % (snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=99.9 gainpanel=a',par))
Flow(image+'-%d' % sub,tcube+'-%d' % sub,'stack axis=3')
#Flow(image+'-sem-%d' % sub,tcube+'-sem-%d' % sub,'thr thr=0.4 mode="hard" | stack axis=3')
Flow(image+'-sem-%d' % sub,tcube+'-sem-%d' % sub,'stack axis=3')
Plot(image+'-sem-box-%d' % sub,image+'-sem-%d' % sub,
fdmod.cgrey('pclip=100 min2=0.4 max2=0.9 min1=0.2 max1=0.4',par))
Plot(image+'-box-%d' % sub,image+'-%d' % sub,
fdmod.cgrey('pclip=99.98 min2=0.4 max2=0.9 min1=0.2 max1=0.4',par))
Plot(image+'-%d' % sub,fdmod.cgrey('pclip=99.98',par))
Result(image+'-%d' % sub,[image+'-%d' % sub,'ss-2d','box'],'Overlay')
Result('image-box-%d' % sub,[image+'-box'+'-%d' % sub,'ss-2d-box'],'Overlay')
Result('image-sem-box-%d' % sub,[image+'-sem-box-%d' % sub,'ss-2d-box'],'Overlay')
Flow(tcube+'-sem',['wa-%s' % group for group in keys],
'''
semblance m=10 ${SOURCES[1:%d]} |
transp plane=12 | transp plane=23
''' % len(keys))
Flow(tcube,['wa-%s'%group for group in keys],
'''
add mode=p ${SOURCES[1:%d]} |
transp plane=12 | transp plane=23
''' % len(keys))
Result(tcube,
'window f3=%d n3=%d j3=%d | ' %
(snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=100 gainpanel=a',par))
Result(tcube+'-sem',
'window f3=%d n3=%d j3=%d | ' %
(snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=100 gainpanel=a',par))
for i in range(snapshots[0],snapshots[0]+snapshots[1]*snapshots[2],snapshots[2]):
Plot(tcube+'-%d' % i,tcube,'window n3=%d f3=%d | ' % (1,i) + fdmod.cgrey('pclip=99.9 gainpanel=a',par))
Result(tcube+'-%d' % i , [tcube+'-%d' % i,'rr-2d'],'Overlay')
Plot(tcube+'-sem-%d' % i,tcube+'-sem','window n3=%d f3=%d | ' % (1,i) + fdmod.cgrey('pclip=99.9 gainpanel=a',par))
Result(tcube+'-sem-%d' % i , [tcube+'-sem-%d' % i,'rr-2d'],'Overlay')
Flow(image,tcube,'stack axis=3')
#Flow(image+'-sem',tcube+'-sem','thr thr=0.4 mode="hard" | stack axis=3')
Flow(image+'-sem',tcube+'-sem','stack axis=3')
Plot(image+'-box',image,fdmod.cgrey('pclip=99.98 min2=0.4 max2=0.9 min1=0.2 max1=0.4',par))
Plot(image+'-sem-box',image+'-sem',fdmod.cgrey('pclip=99.98 min2=0.4 max2=0.9 min1=0.2 max1=0.4',par))
Plot(image,fdmod.cgrey('pclip=99.98',par))
Plot(image+'-sem',fdmod.cgrey('pclip=100',par))
Result(image,[image,'ss-2d','box'],'Overlay')
Result(image+'-sem',[image+'-sem','ss-2d','box'],'Overlay')
Result('image-box',[image+'-box','ss-2d-box'],'Overlay')
Result('image-sem-box',[image+'-sem-box','ss-2d-box'],'Overlay')
# ------------------------------------------------------------
# Setup functions for calling FD operators
# ------------------------------------------------------------
# These operations are usually hidden, but having them here is more
# transparent. All possible options are specified by the user.
def backproject(data,receivers,velocity,density,wavefieldname,par):
Flow(data+'-reversed',data,'sfreverse which=2 opt=i')
awefd(data+'-junk',wavefieldname,data+'-reversed',
velocity,density,
receivers,receivers, par)
def awefd(odat,owfl,idat,velo,dens,sou,rec,par):
# call the acoustic wave equation code
# see sfawe for a more detaile description of options
Flow([odat,owfl],[idat,velo,dens,sou,rec],
'''
awe
ompchunk=%(ompchunk)d ompnth=%(ompnth)d
snap=%(snap)d jsnap=%(jsnap)d
dabc=%(dabc)d nb=%(nb)d
dsou=%(dsou)d free=%(free)d
expl=%(expl)d jdata=%(jdata)d
cfl=%(cfl)d
fmax=%(frq)f
verb=%(verb)d
vel=${SOURCES[1]}
den=${SOURCES[2]}
sou=${SOURCES[3]}
rec=${SOURCES[4]}
wfl=${TARGETS[1]}
nqz=%(nz)d
nqx=%(nx)d
dqz=%(dz)f
dqx=%(dx)f
oqz=%(oz)f
oqx=%(ox)f
''' % par)
# ------------------------------------------------------------
def wavelet(waveletname,frequency,kt,par):
partemp = par.copy()
partemp['kt'] = kt
partemp['frequency'] = frequency
Flow(waveletname,None,
'''
spike nsp=1 mag=1 n1=%(nt)d d1=%(dt)g o1=%(ot)g k1=%(kt)d |
pad end1=%(nt)d |
ricker1 frequency=%(frequency)g |
window n1=%(nt)d |
scale axis=123 |
put label1=t | thr thr=0.001
''' % partemp)
# ------------------------------------------------------------
def makemicroseisms(ns,wav,sou,par):
sources = []
wavelets = []
r = random.Random()
r.seed(1234)
locations = []
for i in range(ns):
tag = '-%03d' % i
xi = r.randrange(100,150)
zi = r.randrange(50,60)
ti = r.randrange(par['nt']/4,3*par['nt']/4)
print 'Microseism %d %d %d %d' % (i,xi,zi,ti)
locations.append((xi,zi,ti))
xsou = par['ox']+par['dx']*xi
zsou = par['oz']+par['dz']*zi
fdmod.point(sou+tag,xsou,zsou,par)
wavelet(wav+tag,par['frq'],ti,par)
sources.append(sou+tag)
wavelets.append(wav+tag)
Flow(wav+'_',wavelets,'cat axis=2 ${SOURCES[1:%d]}' % ns)
Flow(sou,sources,'cat axis=2 ${SOURCES[1:%d]}' % ns)
Plot('ss-2d',fdmod.ssplot('symbol=+ symbolsz=7 plotfat=5',par))
Plot('ss-2d-box','ss-2d',
fdmod.ssplot('min1=0.4 max1=0.9 min2=0.2 max2=0.4 plotfat=5 symbol=+ symbolsz=9',par))
Flow( 'wava','wav_','add scale=10000000 | transp')
Result('wava','transp |' + fdmod.waveplot('',par))
# These are bad locations, no microseisms here.
locations.append((50,25,100))
locations.append((75,80,100))
return locations
# ------------------------------------------------------------
def model(rr,par):
Flow('zero-2d',None,
'''
spike nsp=1 mag=0.0
n1=%(nz)d o1=%(oz)g d1=%(dz)g
n2=%(nx)d o2=%(ox)g d2=%(dx)g |
put label1=%(lz)s label2=%(lx)s unit1=%(uz)s unit2=%(ux)s
''' % par)
Flow('vz-2d','zero-2d',
'''
spike nsp=5
nsp=5 k1=10,40,70,100,130 l1=39,69,99,129,%(nz)d mag=0.2,0.4,0.6,0.8,1.0
n1=%(nz)d o1=%(oz)g d1=%(dz)g
n2=%(nx)d o2=%(ox)g d2=%(dx)g |
put label1=%(lz)s label2=%(lx)s unit1=%(uz)s unit2=%(ux)s |
add add=%(vp)f
''' % par)
Flow('fault-2d','zero-2d',
'''
spike nsp=1 k1=40 mag=1.0 l1=%(nz)d k2=60 l2=%(nx)d p2=1
n1=%(nz)d o1=%(oz)g d1=%(dz)g
n2=%(nx)d o2=%(ox)g d2=%(dx)g |
put label1=%(lz)s label2=%(lx)s unit1=%(uz)s unit2=%(ux)s
''' % par)
Flow('const-2d','zero-2d',
'''
spike nsp=1 mag=1.0 k1=40 l1=%(nz)d k2=1 l2=59
n1=%(nz)d o1=%(oz)g d1=%(dz)g
n2=%(nx)d o2=%(ox)g d2=%(dx)g |
put label1=%(lz)s label2=%(lx)s unit1=%(uz)s unit2=%(ux)s
''' % par)
Flow('vp-2d','vz-2d','window')
Flow('ro-2d','zero-2d','math output="%(ro)g"' %par)
fdmod.makebox('box',0.2,0.4,0.4,0.9,par)
Plot('box',fdmod.bbplot('',par))
Plot('vp-2d',fdmod.cgrey('allpos=y pclip=100 bias=1.5 ',par))
Plot('ro-2d',fdmod.cgrey('bias=2. allpos=y',par))
Result('vp-2d','vp-2d ss-2d rr-2d box','Overlay')
Result('ro-2d','ro-2d ss-2d','Overlay')
def synthesize(data,rr,snapshots,par):
# 2D acoustic modeling
awefd(data,'wa-2d','wava','vp-2d','ro-2d','ss-2d',rr,par)
Result(data,'transp |' + fdmod.dgrey('',par))
for i in range(snapshots[0],snapshots[0]+snapshots[1]*snapshots[2],snapshots[2]):
Plot('wa-2d-%d' % i,'wa-2d','window n3=%d f3=%d | ' % (1,i) + fdmod.cgrey('pclip=99.9 gainpanel=a',par))
Result('wa-2d-%d' %i , ['wa-2d-%d' % i,rr],'Overlay')
def addnoise(noisy,data,scale,snapshots,par):
Flow(noisy,data, 'math output="0" | noise seed=123 | transp | bandpass flo=20 fhi=50 | transp | add scale=%f | add mode=a ${SOURCES[0]} | add scale=1e6' % scale)
Result(noisy,'transp | grey pclip=99.9')
backproject(noisy,'rr-2d','vp-2d','ro-2d','wa-%s'% noisy,par)
Result('wa-%s' % noisy,
'window f3=%d n3=%d j3=%d | ' % (snapshots[0],snapshots[1],snapshots[2]) +
fdmod.cgrey('pclip=100',par))
| [
"jgodwin@mines.edu"
] | jgodwin@mines.edu |
0f6b34fbcc11d1d36e1186122b4196348d01de41 | 15d3a10db27128c06f84c30fa8d64b2e1c629fd9 | /express/express/api_exception.py | 50d8121033b83ac36e6070744f39d492bda13465 | [] | no_license | yiyuhao/exp | 7cba6650e3113ba05698f90a7baf75b680dd6435 | 866a90b2e6f0d113559b0674f514cdd56020f7d6 | refs/heads/master | 2020-03-19T20:20:04.799355 | 2018-07-15T14:55:24 | 2018-07-15T14:55:24 | 136,897,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # -*- coding: utf-8 -*
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
return response | [
"yiyuhao@mixadx.com"
] | yiyuhao@mixadx.com |
a3b8ebb9edc3184f04b98b58d25d2ad29b4d644c | 3b21c2a5422dc2b900f65894849e7e2e765fc7cc | /CameraField.py | 8e4d25c66f759a3b7ebfd2a2dfdccca52657da95 | [] | no_license | mrbhjv/dft_python | 2c519dcdb5100511376c35db63c0248628fb9b3e | 480fffd81374f37f6a62c362fb551b2021772429 | refs/heads/master | 2020-04-24T09:04:28.412581 | 2011-07-21T12:23:47 | 2011-07-21T12:23:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import naoqi
from naoqi import ALProxy
import numpy
import DynamicField
import math_tools
class NaoCameraField(DynamicField.DynamicField):
"Camera field"
def __init__(self):
"Constructor"
DynamicField.DynamicField.__init__(self, dimension_bounds = [[40],[30],[15]])
self._vision_proxy = ALProxy("ALVideoDevice", "nao.ini.rub.de", 9559)
self._gvm_name = "nao vision"
self._gvm_name = self._vision_proxy.subscribe(self._gvm_name, 0, 12, 30)
# switch off auto white balance
self._vision_proxy.setParam(12, 0)
# select the bottom camera
self._vision_proxy.setParam(18, 1)
self._name = "nao_camera_field"
def __del__(self):
self._gvm_name = self._vision_proxy.unsubscribe(self._gvm_name)
def _step_computation(self):
naoimage = self._vision_proxy.getImageRemote(self._gvm_name)
hsv_image = numpy.fromstring(naoimage[6], dtype=numpy.uint8)
hue = hsv_image[::3].reshape(120,160)
saturation = hsv_image[1::3].reshape(120,160)
hue = numpy.rot90(hue, 3)
saturation = numpy.rot90(saturation, 3)
sizes = self.get_input_dimension_sizes()
max_activation_level = 5.0
hue = math_tools.linear_interpolation_2d_custom(hue, [sizes[0], sizes[1]])
saturation = math_tools.linear_interpolation_2d_custom(saturation, [sizes[0], sizes[1]])
hue = numpy.round(hue * ((sizes[2] - 1)/255.)).astype(numpy.int)
saturation = saturation * (2 * max_activation_level / 255.) - max_activation_level
for i in range(sizes[0]):
for j in range(sizes[1]):
color = hue[i][j]
self._activation[i][j] = -max_activation_level
self._activation[i][j][color] = saturation[i][j]
self._activation[0,:,:] = -max_activation_level
self._activation[sizes[0]-1,:,:] = -max_activation_level
self._activation[:,0,:] = -max_activation_level
self._activation[:,sizes[1]-1,:] = -max_activation_level
self._output_buffer = self.compute_thresholded_activation(self._activation)
class GaussCameraField(DynamicField.DynamicField):
"Camera field"
def __init__(self):
"Constructor"
DynamicField.DynamicField.__init__(self, dimension_bounds = [[40],[30],[15]])
self._activation += math_tools.gauss_3d([40,30,15], 9.0, [2.0,2.0,2.0], [10,20,0])
self._output_buffer = self.compute_thresholded_activation(self._activation)
def _step_computation(self):
pass
class DummyCameraField(DynamicField.DynamicField):
"Camera field"
def __init__(self):
"Constructor"
DynamicField.DynamicField.__init__(self, dimension_bounds = [[40],[30],[15]])
camera_field_file = open("snapshots/camera_field.txt", 'r')
activation = numpy.fromfile(camera_field_file, sep=', ')
camera_field_file.close()
activation = activation.reshape(160,120,50)
self._activation = math_tools.linear_interpolation_nd(activation, [40, 30, 15])
self._output_buffer = self.compute_thresholded_activation(self._activation)
def _step_computation(self):
pass
| [
"mathis.richter@ini.rub.de"
] | mathis.richter@ini.rub.de |
65e87e100e5ca37ed1bf10f7336709b79e1b9140 | b558b4348ff88bb670bf1a318d3c22d48ebf5627 | /src/manage.py | 7325504ca062609d3ef47b1c19939d4ae8762ec9 | [] | no_license | rnjane/Flight-Booking-API | 540fe63d47a6ac622633b8560603ce34600857e2 | 05f974c1f6b3dafe18b7cdc417f11f314271625b | refs/heads/develop | 2022-12-10T13:21:57.567096 | 2019-02-04T11:06:11 | 2019-02-04T11:06:11 | 162,910,819 | 0 | 0 | null | 2022-12-08T01:34:12 | 2018-12-23T17:26:58 | Python | UTF-8 | Python | false | false | 546 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookingproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"robert.njane@andela.com"
] | robert.njane@andela.com |
9f9308863eec758d41777158b11d291e1b437a83 | 9b63ade6dd9c166b2e9dc363de94d6a02149bc69 | /app/core/migrations/0001_initial.py | dd8e72540712189dd4156237b574b3e1e3799370 | [
"MIT"
] | permissive | nafeesahyounis/recipe-app-api | 25244f7a12772a0281d99c03a0c6cf9b434ba849 | ff9f42de65d9b0185d9ab9fb565e4a881831cb15 | refs/heads/main | 2023-03-07T13:00:00.005410 | 2021-02-24T10:02:04 | 2021-02-24T10:02:04 | 338,332,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | # Generated by Django 3.1.6 on 2021-02-23 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"nafeesah.youniss@gmail.com"
] | nafeesah.youniss@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.