hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790446d7cdc8deb70d6166749028b7a8600b03dd
| 1,957
|
py
|
Python
|
sntools/formats/warren2020.py
|
arfon/sntools
|
ad783dbb81e50ca786dfcc22a7359e74ecd58010
|
[
"BSD-3-Clause"
] | null | null | null |
sntools/formats/warren2020.py
|
arfon/sntools
|
ad783dbb81e50ca786dfcc22a7359e74ecd58010
|
[
"BSD-3-Clause"
] | null | null | null |
sntools/formats/warren2020.py
|
arfon/sntools
|
ad783dbb81e50ca786dfcc22a7359e74ecd58010
|
[
"BSD-3-Clause"
] | null | null | null |
"""Parse Warren2020 fluxes.
Fluxes from https://zenodo.org/record/3952926 (DOI:10.5281/zenodo.3952926)
See https://arxiv.org/abs/1902.01340 and https://arxiv.org/abs/1912.03328
for description of the models.
"""
import h5py
from sntools.formats import gamma, get_starttime, get_endtime
flux = {}
def parse_input(input, inflv, starttime, endtime):
"""Read simulations data from input file.
Arguments:
input -- prefix of file containing neutrino fluxes
inflv -- neutrino flavor to consider
starttime -- start time set by user via command line option (or None)
endtime -- end time set by user via command line option (or None)
"""
f = h5py.File(input, 'r')
for (t, r) in f['sim_data']['shock_radius']:
if r > 1:
tbounce = t * 1000 # convert to ms
break
starttime = get_starttime(starttime, 1000 * f['sim_data']['shock_radius'][0][0] - tbounce)
endtime = get_endtime(endtime, 1000 * f['sim_data']['shock_radius'][-1][0] - tbounce)
# Save flux data to dictionary to look up in nu_emission() below
global flux
flux = {}
path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv]
for i, (t, lum) in enumerate(f[path]['lum']):
t = 1000 * t - tbounce # convert to time post-bounce in ms
if (t < starttime - 30) or (t > endtime + 30):
# Ignore data outside of the requested time span.
continue
lum *= 1e51 * 624.151 # convert from 10^51 erg/s to MeV/ms
mean_e = f[path]['avg_energy'][i][1]
mean_e_sq = f[path]['rms_energy'][i][1]**2
flux[t] = (mean_e, mean_e_sq, lum)
f.close()
return (starttime, endtime, sorted(flux.keys()))
def prepare_evt_gen(binned_t):
global flux
gamma.flux = flux
gamma.prepare_evt_gen(binned_t)
flux = gamma.flux
def nu_emission(eNu, time):
gamma.flux = flux
return gamma.nu_emission(eNu, time)
| 30.578125
| 94
| 0.633112
|
import h5py
from sntools.formats import gamma, get_starttime, get_endtime
flux = {}
def parse_input(input, inflv, starttime, endtime):
f = h5py.File(input, 'r')
for (t, r) in f['sim_data']['shock_radius']:
if r > 1:
tbounce = t * 1000
break
starttime = get_starttime(starttime, 1000 * f['sim_data']['shock_radius'][0][0] - tbounce)
endtime = get_endtime(endtime, 1000 * f['sim_data']['shock_radius'][-1][0] - tbounce)
global flux
flux = {}
path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv]
for i, (t, lum) in enumerate(f[path]['lum']):
t = 1000 * t - tbounce
if (t < starttime - 30) or (t > endtime + 30):
continue
lum *= 1e51 * 624.151
mean_e = f[path]['avg_energy'][i][1]
mean_e_sq = f[path]['rms_energy'][i][1]**2
flux[t] = (mean_e, mean_e_sq, lum)
f.close()
return (starttime, endtime, sorted(flux.keys()))
def prepare_evt_gen(binned_t):
global flux
gamma.flux = flux
gamma.prepare_evt_gen(binned_t)
flux = gamma.flux
def nu_emission(eNu, time):
gamma.flux = flux
return gamma.nu_emission(eNu, time)
| true
| true
|
790447008db9f910586240130365e9560a196a87
| 1,114
|
py
|
Python
|
code/python/scripts/bible2books.py
|
morethanbooks/XML-TEI-Bible
|
eb42b0ff37ad0049e84f01eb55ec786c8b4a54ea
|
[
"CC-BY-4.0"
] | 18
|
2016-10-05T15:38:49.000Z
|
2021-11-09T08:43:16.000Z
|
code/python/scripts/bible2books.py
|
morethanbooks/XML-TEI-Bible
|
eb42b0ff37ad0049e84f01eb55ec786c8b4a54ea
|
[
"CC-BY-4.0"
] | null | null | null |
code/python/scripts/bible2books.py
|
morethanbooks/XML-TEI-Bible
|
eb42b0ff37ad0049e84f01eb55ec786c8b4a54ea
|
[
"CC-BY-4.0"
] | 2
|
2020-12-22T10:27:29.000Z
|
2021-04-16T12:00:42.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 2 14:33:27 2018
@author: jose
"""
import pandas as pd
import re
import os
import glob
metadata = pd.read_csv("/home/jose/Dropbox/biblia/tb/documentation/libros.csv", sep="\t")
for doc in glob.glob("/home/jose/Dropbox/biblia/datos/origen/rv95.txt"):
#print("aquí va doc!!!: ",doc)
input_name = os.path.splitext(os.path.split(doc)[1])[0]
#print(input_name)
with open(doc, "r", errors="replace", encoding="utf-8") as fin:
biblia = fin.read()
for index, row in metadata.iterrows():
print(row[["id","codebook"]])
if row["id"] < 66:
book = re.findall(r"(\n\|"+str(row["id"])+"\|.*?)\n\|"+str(int(row["id"])+1)+"\|", biblia, flags=re.DOTALL)[0]
else:
book = re.findall(r"(\n\|"+str(row["id"])+"\|.*?)\Z", biblia, flags=re.DOTALL|re.MULTILINE)[0]
#print(book[0:100])
with open ("/home/jose/Dropbox/biblia/datos/origen/"+ row["codebook"]+".txt", "w", encoding="utf-8") as fout:
fout.write(book)
fin.close()
| 34.8125
| 126
| 0.552065
|
import pandas as pd
import re
import os
import glob
metadata = pd.read_csv("/home/jose/Dropbox/biblia/tb/documentation/libros.csv", sep="\t")
for doc in glob.glob("/home/jose/Dropbox/biblia/datos/origen/rv95.txt"):
input_name = os.path.splitext(os.path.split(doc)[1])[0]
with open(doc, "r", errors="replace", encoding="utf-8") as fin:
biblia = fin.read()
for index, row in metadata.iterrows():
print(row[["id","codebook"]])
if row["id"] < 66:
book = re.findall(r"(\n\|"+str(row["id"])+"\|.*?)\n\|"+str(int(row["id"])+1)+"\|", biblia, flags=re.DOTALL)[0]
else:
book = re.findall(r"(\n\|"+str(row["id"])+"\|.*?)\Z", biblia, flags=re.DOTALL|re.MULTILINE)[0]
with open ("/home/jose/Dropbox/biblia/datos/origen/"+ row["codebook"]+".txt", "w", encoding="utf-8") as fout:
fout.write(book)
fin.close()
| true
| true
|
790447429455cd3062a5a8b99b87ade7744c0f00
| 2,025
|
py
|
Python
|
tests/additionals/test_functions.py
|
ikamensh/python_dwd
|
bed0aa6dee4f23bc13cca3a6c400d9c4f0f2c32b
|
[
"MIT"
] | null | null | null |
tests/additionals/test_functions.py
|
ikamensh/python_dwd
|
bed0aa6dee4f23bc13cca3a6c400d9c4f0f2c32b
|
[
"MIT"
] | null | null | null |
tests/additionals/test_functions.py
|
ikamensh/python_dwd
|
bed0aa6dee4f23bc13cca3a6c400d9c4f0f2c32b
|
[
"MIT"
] | null | null | null |
from python_dwd.additionals.functions import check_parameters, retrieve_time_resolution_from_filename,\
retrieve_parameter_from_filename, retrieve_period_type_from_filename, determine_parameters
from python_dwd.enumerations.period_type_enumeration import PeriodType
from python_dwd.enumerations.time_resolution_enumeration import TimeResolution
from python_dwd.enumerations.parameter_enumeration import Parameter
def test_check_parameters():
assert check_parameters(Parameter.PRECIPITATION, TimeResolution.MINUTE_10, PeriodType.HISTORICAL)
def test_retrieve_time_resolution_from_filename():
assert retrieve_time_resolution_from_filename('10minutenwerte_2019.csv') == TimeResolution.MINUTE_10
assert retrieve_time_resolution_from_filename('1minutenwerte_2019.csv') == TimeResolution.MINUTE_1
assert retrieve_time_resolution_from_filename('tageswerte__2019.csv') == TimeResolution.DAILY
assert retrieve_time_resolution_from_filename('tageswerte2019.csv') == None
def test_retrieve_parameter_from_filename():
assert retrieve_parameter_from_filename('bidb_!!_st_.xml', TimeResolution.HOURLY) == Parameter.SOLAR
assert retrieve_parameter_from_filename('10000_historical_nieder_.txt', TimeResolution.MINUTE_1) \
== Parameter.PRECIPITATION
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.DAILY) == Parameter.CLIMATE_SUMMARY
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.MINUTE_1) is None
def test_retrieve_period_type_from_filename():
assert retrieve_period_type_from_filename('_hist.xml') == PeriodType.HISTORICAL
assert retrieve_period_type_from_filename('no_period_type') is None
def test_determine_parameters():
assert determine_parameters('10minutenwerte_hist_nieder_') == (Parameter.PRECIPITATION,
TimeResolution.MINUTE_10,
PeriodType.HISTORICAL)
| 53.289474
| 119
| 0.788148
|
from python_dwd.additionals.functions import check_parameters, retrieve_time_resolution_from_filename,\
retrieve_parameter_from_filename, retrieve_period_type_from_filename, determine_parameters
from python_dwd.enumerations.period_type_enumeration import PeriodType
from python_dwd.enumerations.time_resolution_enumeration import TimeResolution
from python_dwd.enumerations.parameter_enumeration import Parameter
def test_check_parameters():
assert check_parameters(Parameter.PRECIPITATION, TimeResolution.MINUTE_10, PeriodType.HISTORICAL)
def test_retrieve_time_resolution_from_filename():
assert retrieve_time_resolution_from_filename('10minutenwerte_2019.csv') == TimeResolution.MINUTE_10
assert retrieve_time_resolution_from_filename('1minutenwerte_2019.csv') == TimeResolution.MINUTE_1
assert retrieve_time_resolution_from_filename('tageswerte__2019.csv') == TimeResolution.DAILY
assert retrieve_time_resolution_from_filename('tageswerte2019.csv') == None
def test_retrieve_parameter_from_filename():
assert retrieve_parameter_from_filename('bidb_!!_st_.xml', TimeResolution.HOURLY) == Parameter.SOLAR
assert retrieve_parameter_from_filename('10000_historical_nieder_.txt', TimeResolution.MINUTE_1) \
== Parameter.PRECIPITATION
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.DAILY) == Parameter.CLIMATE_SUMMARY
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.MINUTE_1) is None
def test_retrieve_period_type_from_filename():
assert retrieve_period_type_from_filename('_hist.xml') == PeriodType.HISTORICAL
assert retrieve_period_type_from_filename('no_period_type') is None
def test_determine_parameters():
assert determine_parameters('10minutenwerte_hist_nieder_') == (Parameter.PRECIPITATION,
TimeResolution.MINUTE_10,
PeriodType.HISTORICAL)
| true
| true
|
7904477c1c6c63f98767e457f313574adecf776e
| 8,549
|
py
|
Python
|
v2.5.7/toontown/toon/DistributedNPCSpecialQuestGiverAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v2.5.7/toontown/toon/DistributedNPCSpecialQuestGiverAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v2.5.7/toontown/toon/DistributedNPCSpecialQuestGiverAI.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from direct.task.Task import Task
from panda3d.core import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback=None, hq=0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest=None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[
questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[
questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[
questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
else:
if self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
| 38.336323
| 123
| 0.628729
|
from direct.task.Task import Task
from panda3d.core import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback=None, hq=0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest=None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[
questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[
questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[
questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
else:
if self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
| true
| true
|
790448220efac63bfb62d538d5c99a91eed05636
| 1,996
|
py
|
Python
|
src/main/python/post_setup.py
|
Shafaq-Siddiqi/systemml
|
eca11c6fe9cff88df2e1960caf1b0cff9bf2b2b6
|
[
"Apache-2.0"
] | 1
|
2020-12-19T23:01:46.000Z
|
2020-12-19T23:01:46.000Z
|
src/main/python/post_setup.py
|
Shafaq-Siddiqi/systemml
|
eca11c6fe9cff88df2e1960caf1b0cff9bf2b2b6
|
[
"Apache-2.0"
] | 7
|
2020-12-19T21:59:55.000Z
|
2022-02-09T22:36:24.000Z
|
src/main/python/post_setup.py
|
Shafaq-Siddiqi/systemml
|
eca11c6fe9cff88df2e1960caf1b0cff9bf2b2b6
|
[
"Apache-2.0"
] | 1
|
2021-02-24T22:50:06.000Z
|
2021-02-24T22:50:06.000Z
|
#!/usr/bin/env python3
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
from __future__ import print_function
import os
import sys
import platform
try:
exec(open('systemds/project_info.py').read())
except IOError:
print("Could not read project_info.py.", file=sys.stderr)
sys.exit()
ARTIFACT_NAME = __project_artifact_id__
ARTIFACT_VERSION = __project_version__
ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split("-")[0]
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
src_path_prefix = os.path.join(root_dir, 'src', 'main', 'python', 'dist', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT)
src_path = src_path_prefix + '.zip' if platform.system() == "Windows" and os.path.exists(
src_path_prefix + '.zip') else src_path_prefix + '.tar.gz'
os.rename(
src_path,
os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tar.gz'))
wheel_name = '-'.join([ARTIFACT_NAME, ARTIFACT_VERSION_SHORT, 'py3', 'none', 'any.whl'])
wheel = os.path.join(root_dir, 'src', 'main', 'python', 'dist', wheel_name)
os.rename(wheel, os.path.join(root_dir, 'target', wheel_name))
| 42.468085
| 119
| 0.695892
|
from __future__ import print_function
import os
import sys
import platform
try:
exec(open('systemds/project_info.py').read())
except IOError:
print("Could not read project_info.py.", file=sys.stderr)
sys.exit()
ARTIFACT_NAME = __project_artifact_id__
ARTIFACT_VERSION = __project_version__
ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split("-")[0]
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
src_path_prefix = os.path.join(root_dir, 'src', 'main', 'python', 'dist', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT)
src_path = src_path_prefix + '.zip' if platform.system() == "Windows" and os.path.exists(
src_path_prefix + '.zip') else src_path_prefix + '.tar.gz'
os.rename(
src_path,
os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tar.gz'))
wheel_name = '-'.join([ARTIFACT_NAME, ARTIFACT_VERSION_SHORT, 'py3', 'none', 'any.whl'])
wheel = os.path.join(root_dir, 'src', 'main', 'python', 'dist', wheel_name)
os.rename(wheel, os.path.join(root_dir, 'target', wheel_name))
| true
| true
|
7904487cd040a8ed617d1df88414dfe80ed31c8c
| 1,353
|
py
|
Python
|
examples/accelerometer.py
|
somacoder/pymetawear
|
3cac6415e610e5ae7d2dac3ffbe6136a65f566f1
|
[
"MIT"
] | null | null | null |
examples/accelerometer.py
|
somacoder/pymetawear
|
3cac6415e610e5ae7d2dac3ffbe6136a65f566f1
|
[
"MIT"
] | null | null | null |
examples/accelerometer.py
|
somacoder/pymetawear
|
3cac6415e610e5ae7d2dac3ffbe6136a65f566f1
|
[
"MIT"
] | 1
|
2021-01-08T03:45:35.000Z
|
2021-01-08T03:45:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`accelerometer`
==================
Updated by lkasso <hello@mbientlab.com>
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-04-10
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from pymetawear.discover import select_device
from pymetawear.client import MetaWearClient
address = select_device()
c = MetaWearClient(str(address), 'pygatt', debug=True)
print("New client created: {0}".format(c))
def acc_callback(data):
"""Handle a (epoch, (x,y,z)) accelerometer tuple."""
print("Epoch time: [{0}] - X: {1}, Y: {2}, Z: {3}".format(data[0], *data[1]))
print("Get possible accelerometer settings...")
settings = c.accelerometer.get_possible_settings()
print(settings)
time.sleep(1.0)
print("Write accelerometer settings...")
c.accelerometer.set_settings(data_rate=3.125, data_range=4.0)
time.sleep(1.0)
print("Check accelerometer settings...")
settings = c.accelerometer.get_current_settings()
print(settings)
print("Subscribing to accelerometer signal notifications...")
c.accelerometer.high_frequency_stream = False
c.accelerometer.notifications(acc_callback)
time.sleep(10.0)
print("Unsubscribe to notification...")
c.accelerometer.notifications(None)
time.sleep(5.0)
c.disconnect()
| 22.932203
| 81
| 0.739098
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from pymetawear.discover import select_device
from pymetawear.client import MetaWearClient
address = select_device()
c = MetaWearClient(str(address), 'pygatt', debug=True)
print("New client created: {0}".format(c))
def acc_callback(data):
print("Epoch time: [{0}] - X: {1}, Y: {2}, Z: {3}".format(data[0], *data[1]))
print("Get possible accelerometer settings...")
settings = c.accelerometer.get_possible_settings()
print(settings)
time.sleep(1.0)
print("Write accelerometer settings...")
c.accelerometer.set_settings(data_rate=3.125, data_range=4.0)
time.sleep(1.0)
print("Check accelerometer settings...")
settings = c.accelerometer.get_current_settings()
print(settings)
print("Subscribing to accelerometer signal notifications...")
c.accelerometer.high_frequency_stream = False
c.accelerometer.notifications(acc_callback)
time.sleep(10.0)
print("Unsubscribe to notification...")
c.accelerometer.notifications(None)
time.sleep(5.0)
c.disconnect()
| true
| true
|
790448cb187a7ee7056d0fc9952d881e2341275c
| 13,032
|
py
|
Python
|
generated/ansible-collection/subscriptionssubscriptionfactory.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/subscriptionssubscriptionfactory.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/ansible-collection/subscriptionssubscriptionfactory.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: subscriptionssubscriptionfactory
version_added: '2.9'
short_description: Manage Azure SubscriptionFactory instance.
description:
- 'Create, update and delete instance of Azure SubscriptionFactory.'
options:
enrollment_account_name:
description:
- >-
The name of the enrollment account to which the subscription will be
billed.
required: true
type: str
name:
description:
- The display name of the subscription.
type: str
owners:
description:
- >-
The list of principals that should be granted Owner access on the
subscription. Principals should be of type User, Service Principal or
Security Group.
type: list
suboptions:
object_id:
description:
- Object id of the Principal
required: true
type: str
offer_type:
description:
- >-
The offer type of the subscription. For example, MS-AZR-0017P
(EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are
available. Only valid when creating a subscription in a enrollment
account scope.
type: str
additional_parameters:
description:
- >-
Additional, untyped parameters to support custom subscription creation
scenarios.
type: >-
unknown[DictionaryType
{"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
type: str
state:
description:
- Assert the state of the SubscriptionFactory.
- >-
Use C(present) to create or update an SubscriptionFactory and C(absent)
to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: createSubscription
azure.rm.subscriptionssubscriptionfactory:
enrollment_account_name: myEnrollmentAccount
body:
offerType: MS-AZR-0017P
displayName: Test Ea Azure Sub
owners:
- objectId: 973034ff-acb7-409c-b731-e789672c7b31
- objectId: 67439a9e-8519-4016-a630-f5f805eba567
additionalParameters:
customData:
key1: value1
key2: true
'''
RETURN = '''
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSubscriptionFactory(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
enrollment_account_name=dict(
type='str',
updatable=False,
disposition='enrollmentAccountName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='/'
),
owners=dict(
type='list',
disposition='/',
options=dict(
object_id=dict(
type='str',
disposition='objectId',
required=true
)
)
),
offer_type=dict(
type='str',
updatable=False,
disposition='/',
choices=['MS-AZR-0017P',
'MS-AZR-0148P']
),
additional_parameters=dict(
type='unknown[DictionaryType {"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.enrollment_account_name = None
self.subscription_link = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-03-01-preview'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMSubscriptionFactory, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
self.url = ('/providers' +
'/Microsoft.Billing' +
'/enrollmentAccounts' +
'/{{ enrollment_account_name }}' +
'/providers' +
'/Microsoft.Subscription' +
'/createSubscription')
self.url = self.url.replace('{{ enrollment_account_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("SubscriptionFactory instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('SubscriptionFactory instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the SubscriptionFactory instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('SubscriptionFactory instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('SubscriptionFactory instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["subscription_link"] = response["subscription_link"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the SubscriptionFactory instance {0}'.format(self.))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
else:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the SubscriptionFactory instance.')
self.fail('Error creating the SubscriptionFactory instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the SubscriptionFactory instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the SubscriptionFactory instance.')
self.fail('Error deleting the SubscriptionFactory instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the SubscriptionFactory instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("SubscriptionFactory instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the SubscriptionFactory instance.')
if found is True:
return response
return False
def main():
AzureRMSubscriptionFactory()
if __name__ == '__main__':
main()
| 36.099723
| 319
| 0.518339
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: subscriptionssubscriptionfactory
version_added: '2.9'
short_description: Manage Azure SubscriptionFactory instance.
description:
- 'Create, update and delete instance of Azure SubscriptionFactory.'
options:
enrollment_account_name:
description:
- >-
The name of the enrollment account to which the subscription will be
billed.
required: true
type: str
name:
description:
- The display name of the subscription.
type: str
owners:
description:
- >-
The list of principals that should be granted Owner access on the
subscription. Principals should be of type User, Service Principal or
Security Group.
type: list
suboptions:
object_id:
description:
- Object id of the Principal
required: true
type: str
offer_type:
description:
- >-
The offer type of the subscription. For example, MS-AZR-0017P
(EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are
available. Only valid when creating a subscription in a enrollment
account scope.
type: str
additional_parameters:
description:
- >-
Additional, untyped parameters to support custom subscription creation
scenarios.
type: >-
unknown[DictionaryType
{"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
type: str
state:
description:
- Assert the state of the SubscriptionFactory.
- >-
Use C(present) to create or update an SubscriptionFactory and C(absent)
to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: createSubscription
azure.rm.subscriptionssubscriptionfactory:
enrollment_account_name: myEnrollmentAccount
body:
offerType: MS-AZR-0017P
displayName: Test Ea Azure Sub
owners:
- objectId: 973034ff-acb7-409c-b731-e789672c7b31
- objectId: 67439a9e-8519-4016-a630-f5f805eba567
additionalParameters:
customData:
key1: value1
key2: true
'''
RETURN = '''
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSubscriptionFactory(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
enrollment_account_name=dict(
type='str',
updatable=False,
disposition='enrollmentAccountName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='/'
),
owners=dict(
type='list',
disposition='/',
options=dict(
object_id=dict(
type='str',
disposition='objectId',
required=true
)
)
),
offer_type=dict(
type='str',
updatable=False,
disposition='/',
choices=['MS-AZR-0017P',
'MS-AZR-0148P']
),
additional_parameters=dict(
type='unknown[DictionaryType {"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.enrollment_account_name = None
self.subscription_link = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-03-01-preview'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMSubscriptionFactory, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
self.url = ('/providers' +
'/Microsoft.Billing' +
'/enrollmentAccounts' +
'/{{ enrollment_account_name }}' +
'/providers' +
'/Microsoft.Subscription' +
'/createSubscription')
self.url = self.url.replace('{{ enrollment_account_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("SubscriptionFactory instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('SubscriptionFactory instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the SubscriptionFactory instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
self.results['changed'] = True
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('SubscriptionFactory instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
while self.get_resource():
time.sleep(20)
else:
self.log('SubscriptionFactory instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["subscription_link"] = response["subscription_link"]
return self.results
def create_update_resource(self):
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
else:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the SubscriptionFactory instance.')
self.fail('Error creating the SubscriptionFactory instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the SubscriptionFactory instance.')
self.fail('Error deleting the SubscriptionFactory instance: {0}'.format(str(e)))
return True
def get_resource(self):
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Did not find the SubscriptionFactory instance.')
if found is True:
return response
return False
def main():
AzureRMSubscriptionFactory()
if __name__ == '__main__':
main()
| true
| true
|
79044a1643797f0de28142d2867bee9257fac389
| 2,735
|
py
|
Python
|
game/rendering.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
game/rendering.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
game/rendering.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import annotations
import numpy as np
import tcod
import g
import game.constants
import game.engine
import game.game_map
import game.render_functions
from game.tiles import tile_graphics
def render_map(console: tcod.Console, gamemap: game.game_map.GameMap) -> None:
# The default graphics are of tiles that are visible.
light = tile_graphics[gamemap.tiles]
light[gamemap.fire > 0] = (ord("^"), (255, 255, 255), (0xCC, 0x22, 0))
# Apply effects to create a darkened map of tile graphics.
dark = gamemap.memory.copy()
dark["fg"] //= 2
dark["bg"] //= 8
visible = gamemap.visible
if g.fullbright:
visible = np.ones_like(visible)
for entity in sorted(gamemap.entities, key=lambda x: x.render_order.value):
if not visible[entity.x, entity.y]:
continue # Skip entities that are not in the FOV.
light[entity.x, entity.y]["ch"] = ord(entity.char)
light[entity.x, entity.y]["fg"] = entity.color
console.rgb[0 : gamemap.width, 0 : gamemap.height] = np.select(
condlist=[visible, gamemap.explored],
choicelist=[light, dark],
default=dark,
)
for entity in sorted(gamemap.entities, key=lambda x: x.render_order.value):
if not visible[entity.x, entity.y]:
continue # Skip entities that are not in the FOV.
console.print(entity.x, entity.y, entity.char, fg=entity.color)
visible.choose((gamemap.memory, light), out=gamemap.memory)
def render_ui(console: tcod.Console, engine: game.engine.Engine) -> None:
UI_WIDTH = game.constants.ui_width
UI_LEFT = console.width - UI_WIDTH
LOG_HEIGHT = console.height - 8
engine.message_log.render(
console=console, x=UI_LEFT, y=console.height - LOG_HEIGHT, width=UI_WIDTH, height=LOG_HEIGHT
)
console.draw_rect(UI_LEFT, 0, UI_WIDTH, 2, 0x20, (0xFF, 0xFF, 0xFF), (0, 0, 0))
game.render_functions.render_bar(
console=console,
x=UI_LEFT,
y=0,
current_value=engine.player.fighter.hp,
maximum_value=engine.player.fighter.max_hp,
total_width=UI_WIDTH,
)
game.render_functions.render_names_at_mouse_location(console=console, x=UI_LEFT, y=1, engine=engine)
if g.mouse_pos:
console.rgb[g.mouse_pos]["fg"] = (0, 0, 0)
console.rgb[g.mouse_pos]["bg"] = (255, 255, 255)
if g.fullbright or engine.game_map.visible[g.mouse_pos]:
console.print(
UI_LEFT,
2,
f"Fire={engine.game_map.fire[g.mouse_pos]}, Heat={engine.game_map.heat[g.mouse_pos]}, "
f"Smoke={engine.game_map.smoke[g.mouse_pos]},\nFuel={engine.game_map.fuel[g.mouse_pos]}",
)
| 34.620253
| 105
| 0.652285
|
from __future__ import annotations
import numpy as np
import tcod
import g
import game.constants
import game.engine
import game.game_map
import game.render_functions
from game.tiles import tile_graphics
def render_map(console: tcod.Console, gamemap: game.game_map.GameMap) -> None:
light = tile_graphics[gamemap.tiles]
light[gamemap.fire > 0] = (ord("^"), (255, 255, 255), (0xCC, 0x22, 0))
dark = gamemap.memory.copy()
dark["fg"] //= 2
dark["bg"] //= 8
visible = gamemap.visible
if g.fullbright:
visible = np.ones_like(visible)
for entity in sorted(gamemap.entities, key=lambda x: x.render_order.value):
if not visible[entity.x, entity.y]:
continue
light[entity.x, entity.y]["ch"] = ord(entity.char)
light[entity.x, entity.y]["fg"] = entity.color
console.rgb[0 : gamemap.width, 0 : gamemap.height] = np.select(
condlist=[visible, gamemap.explored],
choicelist=[light, dark],
default=dark,
)
for entity in sorted(gamemap.entities, key=lambda x: x.render_order.value):
if not visible[entity.x, entity.y]:
continue
console.print(entity.x, entity.y, entity.char, fg=entity.color)
visible.choose((gamemap.memory, light), out=gamemap.memory)
def render_ui(console: tcod.Console, engine: game.engine.Engine) -> None:
UI_WIDTH = game.constants.ui_width
UI_LEFT = console.width - UI_WIDTH
LOG_HEIGHT = console.height - 8
engine.message_log.render(
console=console, x=UI_LEFT, y=console.height - LOG_HEIGHT, width=UI_WIDTH, height=LOG_HEIGHT
)
console.draw_rect(UI_LEFT, 0, UI_WIDTH, 2, 0x20, (0xFF, 0xFF, 0xFF), (0, 0, 0))
game.render_functions.render_bar(
console=console,
x=UI_LEFT,
y=0,
current_value=engine.player.fighter.hp,
maximum_value=engine.player.fighter.max_hp,
total_width=UI_WIDTH,
)
game.render_functions.render_names_at_mouse_location(console=console, x=UI_LEFT, y=1, engine=engine)
if g.mouse_pos:
console.rgb[g.mouse_pos]["fg"] = (0, 0, 0)
console.rgb[g.mouse_pos]["bg"] = (255, 255, 255)
if g.fullbright or engine.game_map.visible[g.mouse_pos]:
console.print(
UI_LEFT,
2,
f"Fire={engine.game_map.fire[g.mouse_pos]}, Heat={engine.game_map.heat[g.mouse_pos]}, "
f"Smoke={engine.game_map.smoke[g.mouse_pos]},\nFuel={engine.game_map.fuel[g.mouse_pos]}",
)
| true
| true
|
79044aafe60974ffe22e1146ae93d53991ba1bf9
| 23,871
|
py
|
Python
|
src/nfc/llcp/llc.py
|
javgh/bitpay-brick
|
688cb3403111494bba5f453ea681515e03bf43b4
|
[
"MIT"
] | 15
|
2016-11-20T15:38:49.000Z
|
2021-08-23T02:59:49.000Z
|
src/nfc/llcp/llc.py
|
javgh/bitpay-brick
|
688cb3403111494bba5f453ea681515e03bf43b4
|
[
"MIT"
] | null | null | null |
src/nfc/llcp/llc.py
|
javgh/bitpay-brick
|
688cb3403111494bba5f453ea681515e03bf43b4
|
[
"MIT"
] | 3
|
2016-11-21T11:57:13.000Z
|
2019-03-24T21:12:41.000Z
|
# -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2011 Stephen Tiedemann <stephen.tiedemann@googlemail.com>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
import time
from types import *
import threading
import collections
import random
import nfc.clf
import nfc.dep
# local imports
from tco import *
from pdu import *
from err import *
from opt import *
RAW_ACCESS_POINT, LOGICAL_DATA_LINK, DATA_LINK_CONNECTION = range(3)
wks_map = {
"urn:nfc:sn:sdp" : 1,
"urn:nfc:sn:ip" : 2,
"urn:nfc:sn:obex": 3,
"urn:nfc:sn:snep": 4}
class ServiceAccessPoint(object):
def __init__(self, addr, llc):
self.llc = llc
self.addr = addr
self.sock_list = collections.deque()
self.send_list = collections.deque()
def __str__(self):
return "SAP {0:>2}".format(self.addr)
@property
def mode(self):
with self.llc.lock:
try:
if isinstance(self.sock_list[0], RawAccessPoint):
return RAW_ACCESS_POINT
if isinstance(self.sock_list[0], LogicalDataLink):
return LOGICAL_DATA_LINK
if isinstance(self.sock_list[0], DataLinkConnection):
return DATA_LINK_CONNECTION
except IndexError: return 0
def insert_socket(self, socket):
with self.llc.lock:
try: insertable = type(socket) == type(self.sock_list[0])
except IndexError: insertable = True
if insertable:
socket.bind(self.addr)
self.sock_list.appendleft(socket)
else: log.error("can't insert socket of different type")
return insertable
def remove_socket(self, socket):
assert socket.addr == self.addr
socket.close()
with self.llc.lock:
try: self.sock_list.remove(socket)
except ValueError: pass
if len(self.sock_list) == 0:
# completely remove this sap
self.llc.sap[self.addr] = None
def send(self, pdu):
self.send_list.append(pdu)
def shutdown(self):
while True:
try: socket = self.sock_list.pop()
except IndexError: return
log.debug("shutdown socket %s" % str(socket))
socket.bind(None); socket.close()
#
# enqueue() and dequeue() are called from llc run thread
#
def enqueue(self, pdu):
with self.llc.lock:
if isinstance(pdu, Connect):
for socket in self.sock_list:
if socket.state.LISTEN:
socket.enqueue(pdu)
return
else:
for socket in self.sock_list:
if pdu.ssap == socket.peer or socket.peer is None:
socket.enqueue(pdu)
return
if pdu.type in connection_mode_pdu_types:
self.send(DisconnectedMode(pdu.ssap, pdu.dsap, reason=1))
def dequeue(self, max_size):
with self.llc.lock:
for socket in self.sock_list:
#print "dequeue from", socket
pdu = socket.dequeue(max_size)
if pdu: return pdu
else:
try: return self.send_list.popleft()
except IndexError: pass
def sendack(self, max_size):
with self.llc.lock:
for socket in self.sock_list:
pdu = socket.sendack(max_size)
if pdu: return pdu
class ServiceDiscovery(object):
def __init__(self, llc):
self.llc = llc
self.snl = dict()
self.tids = range(256)
self.resp = threading.Condition(self.llc.lock)
self.sent = dict()
self.sdreq = collections.deque()
self.sdres = collections.deque()
self.dmpdu = collections.deque()
def __str__(self):
return "SAP 1"
@property
def mode(self):
return LOGICAL_DATA_LINK
def resolve(self, name):
with self.resp:
if self.snl is None: return None
log.debug("resolve service name '{0}'".format(name))
try: return self.snl[name]
except KeyError: pass
tid = random.choice(self.tids)
self.tids.remove(tid)
self.sdreq.append((tid, name))
while not self.snl is None and not name in self.snl:
self.resp.wait()
return None if self.snl is None else self.snl[name]
#
# enqueue() and dequeue() are called from llc run thread
#
def enqueue(self, pdu):
with self.llc.lock:
if isinstance(pdu, ServiceNameLookup) and not self.snl is None:
for tid, sap in pdu.sdres:
try: name = self.sent[tid]
except KeyError: pass
else:
log.debug("resolved '{0}' to remote addr {1}"
.format(name, sap))
self.snl[name] = sap
self.tids.append(tid)
self.resp.notify_all()
for tid, name in pdu.sdreq:
try: sap = self.llc.snl[name]
except KeyError: sap = 0
self.sdres.append((tid, sap))
def dequeue(self, max_size):
if max_size < 2:
return None
with self.llc.lock:
if len(self.sdres) > 0 or len(self.sdreq) > 0:
pdu = ServiceNameLookup(dsap=1, ssap=1)
max_size -= len(pdu)
while max_size > 0:
try: pdu.sdres.append(self.sdres.popleft())
except IndexError: break
for i in range(len(self.sdreq)):
tid, name = self.sdreq[0]
if 1 + len(name) > max_size:
self.sdreq.rotate(-1)
else:
pdu.sdreq.append(self.sdreq.popleft())
self.sent[tid] = name
return pdu
if len(self.dmpdu) > 0 and max_size >= 2:
return self.dmpdu.popleft()
def shutdown(self):
with self.llc.lock:
self.snl = None
self.resp.notify_all()
class LogicalLinkController(object):
def __init__(self, recv_miu=248, send_lto=500, send_agf=True,
symm_log=True):
self.lock = threading.RLock()
self.cfg = dict()
self.cfg['recv-miu'] = recv_miu
self.cfg['send-lto'] = send_lto
self.cfg['send-agf'] = send_agf
self.cfg['symm-log'] = symm_log
self.snl = dict({"urn:nfc:sn:sdp" : 1})
self.sap = 64 * [None]
self.sap[0] = ServiceAccessPoint(0, self)
self.sap[1] = ServiceDiscovery(self)
def __str__(self):
local = "Local(MIU={miu}, LTO={lto}ms)".format(
miu=self.cfg.get('recv-miu'), lto=self.cfg.get('send-lto'))
remote = "Remote(MIU={miu}, LTO={lto}ms)".format(
miu=self.cfg.get('send-miu'), lto=self.cfg.get('recv-lto'))
return "LLC: {local} {remote}".format(local=local, remote=remote)
def activate(self, mac):
assert type(mac) in (nfc.dep.Initiator, nfc.dep.Target)
self.mac = None
miu = self.cfg['recv-miu']
lto = self.cfg['send-lto']
wks = 1+sum(sorted([1<<sap for sap in self.snl.values() if sap < 15]))
pax = ParameterExchange(version=(1,1), miu=miu, lto=lto, wks=wks)
if type(mac) == nfc.dep.Initiator:
gb = mac.activate(gbi='Ffm'+pax.to_string()[2:])
self.run = self.run_as_initiator
role = "Initiator"
if type(mac) == nfc.dep.Target:
gb = mac.activate(gbt='Ffm'+pax.to_string()[2:], wt=9)
self.run = self.run_as_target
role = "Target"
if gb is not None and gb.startswith('Ffm') and len(gb) >= 6:
info = ["LLCP Link established as NFC-DEP {0}".format(role)]
info.append("Local LLCP Settings")
info.append(" LLCP Version: {0[0]}.{0[1]}".format(pax.version))
info.append(" Link Timeout: {0} ms".format(pax.lto))
info.append(" Max Inf Unit: {0} octet".format(pax.miu))
info.append(" Service List: {0:016b}".format(pax.wks))
pax = ProtocolDataUnit.from_string("\x00\x40" + str(gb[3:]))
info.append("Remote LLCP Settings")
info.append(" LLCP Version: {0[0]}.{0[1]}".format(pax.version))
info.append(" Link Timeout: {0} ms".format(pax.lto))
info.append(" Max Inf Unit: {0} octet".format(pax.miu))
info.append(" Service List: {0:016b}".format(pax.wks))
log.info('\n'.join(info))
self.cfg['rcvd-ver'] = pax.version
self.cfg['send-miu'] = pax.miu
self.cfg['recv-lto'] = pax.lto
self.cfg['send-wks'] = pax.wks
self.cfg['send-lsc'] = pax.lsc
log.debug("llc cfg {0}".format(self.cfg))
if type(mac) == nfc.dep.Initiator and mac.rwt is not None:
max_rwt = 4096/13.56E6 * 2**10
if mac.rwt > max_rwt:
log.warning("NFC-DEP RWT {0:.3f} exceeds max {1:.3f} sec"
.format(mac.rwt, max_rwt))
self.mac = mac
return bool(self.mac)
def terminate(self, reason):
log.debug("llcp link termination caused by {0}".format(reason))
if reason == "local choice":
self.exchange(Disconnect(0, 0), timeout=0.1)
self.mac.deactivate()
elif reason == "remote choice":
self.mac.deactivate()
# shutdown local services
for i in range(63, -1, -1):
if not self.sap[i] is None:
log.debug("closing service access point %d" % i)
self.sap[i].shutdown()
self.sap[i] = None
def exchange(self, pdu, timeout):
if not isinstance(pdu, Symmetry) or self.cfg.get('symm-log') is True:
log.debug("SEND {0}".format(pdu))
data = pdu.to_string() if pdu else None
try:
data = self.mac.exchange(data, timeout)
if data is None: return None
except nfc.clf.DigitalProtocolError as error:
log.debug("{0!r}".format(error))
return None
pdu = ProtocolDataUnit.from_string(data)
if not isinstance(pdu, Symmetry) or self.cfg.get('symm-log') is True:
log.debug("RECV {0}".format(pdu))
return pdu
def run_as_initiator(self, terminate=lambda: False):
recv_timeout = 1E-3 * (self.cfg['recv-lto'] + 10)
symm = 0
try:
pdu = self.collect(delay=0.01)
while not terminate():
if pdu is None: pdu = Symmetry()
pdu = self.exchange(pdu, recv_timeout)
if pdu is None:
return self.terminate(reason="link disruption")
if pdu == Disconnect(0, 0):
return self.terminate(reason="remote choice")
symm = symm + 1 if type(pdu) == Symmetry else 0
self.dispatch(pdu)
pdu = self.collect(delay=0.001)
if pdu is None and symm >= 10:
pdu = self.collect(delay=0.05)
else:
self.terminate(reason="local choice")
except KeyboardInterrupt:
print # move to new line
self.terminate(reason="local choice")
raise KeyboardInterrupt
except IOError:
self.terminate(reason="input/output error")
raise SystemExit
finally:
log.debug("llc run loop terminated on initiator")
def run_as_target(self, terminate=lambda: False):
recv_timeout = 1E-3 * (self.cfg['recv-lto'] + 10)
symm = 0
try:
pdu = None
while not terminate():
pdu = self.exchange(pdu, recv_timeout)
if pdu is None:
return self.terminate(reason="link disruption")
if pdu == Disconnect(0, 0):
return self.terminate(reason="remote choice")
symm = symm + 1 if type(pdu) == Symmetry else 0
self.dispatch(pdu)
pdu = self.collect(delay=0.001)
if pdu is None and symm >= 10:
pdu = self.collect(delay=0.05)
if pdu is None: pdu = Symmetry()
else:
self.terminate(reason="local choice")
except KeyboardInterrupt:
print # move to new line
self.terminate(reason="local choice")
raise KeyboardInterrupt
except IOError:
self.terminate(reason="input/output error")
raise SystemExit
finally:
log.debug("llc run loop terminated on target")
def collect(self, delay=None):
if delay: time.sleep(delay)
pdu_list = list()
max_data = None
with self.lock:
active_sap_list = [sap for sap in self.sap if sap is not None]
for sap in active_sap_list:
#log.debug("query sap {0}, max_data={1}"
# .format(sap, max_data))
pdu = sap.dequeue(max_data if max_data else 2179)
if pdu is not None:
if self.cfg['send-agf'] == False:
return pdu
pdu_list.append(pdu)
if max_data is None:
max_data = self.cfg["send-miu"] + 2
max_data -= len(pdu)
if max_data < bool(len(pdu_list)==1) * 2 + 2 + 2:
break
else: max_data = self.cfg["send-miu"] + 2
for sap in active_sap_list:
if sap.mode == DATA_LINK_CONNECTION:
pdu = sap.sendack(max_data)
if not pdu is None:
if self.cfg['send-agf'] == False:
return pdu
pdu_list.append(pdu)
max_data -= len(pdu)
if max_data < bool(len(pdu_list)==1) * 2 + 2 + 3:
break
if len(pdu_list) > 1:
return AggregatedFrame(aggregate=pdu_list)
if len(pdu_list) == 1:
return pdu_list[0]
return None
def dispatch(self, pdu):
if isinstance(pdu, Symmetry):
return
if isinstance(pdu, AggregatedFrame):
if pdu.dsap == 0 and pdu.ssap == 0:
[log.debug(" " + str(p)) for p in pdu]
[self.dispatch(p) for p in pdu]
return
if isinstance(pdu, Connect) and pdu.dsap == 1:
# connect-by-name
addr = self.snl.get(pdu.sn)
if not addr or self.sap[addr] is None:
log.debug("no service named '{0}'".format(pdu.sn))
pdu = DisconnectedMode(pdu.ssap, 1, reason=2)
self.sap[1].dmpdu.append(pdu)
return
pdu = Connect(dsap=addr, ssap=pdu.ssap, rw=pdu.rw, miu=pdu.miu)
with self.lock:
sap = self.sap[pdu.dsap]
if sap:
sap.enqueue(pdu)
return
log.debug("discard PDU {0}".format(str(pdu)))
return
def resolve(self, name):
return self.sap[1].resolve(name)
def socket(self, socket_type):
if socket_type == RAW_ACCESS_POINT:
return RawAccessPoint(recv_miu=self.cfg["recv-miu"])
if socket_type == LOGICAL_DATA_LINK:
return LogicalDataLink(recv_miu=self.cfg["recv-miu"])
if socket_type == DATA_LINK_CONNECTION:
return DataLinkConnection(recv_miu=128, recv_win=1)
def setsockopt(self, socket, option, value):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if option == SO_RCVMIU:
value = min(value, self.cfg['recv-miu'])
socket.setsockopt(option, value)
return socket.getsockopt(option)
def getsockopt(self, socket, option):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if isinstance(socket, LogicalDataLink):
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
if isinstance(socket, RawAccessPoint):
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.getsockopt(option)
def bind(self, socket, addr_or_name=None):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not socket.addr is None:
raise Error(errno.EINVAL)
if addr_or_name is None:
self._bind_by_none(socket)
elif type(addr_or_name) is IntType:
self._bind_by_addr(socket, addr_or_name)
elif type(addr_or_name) is StringType:
self._bind_by_name(socket, addr_or_name)
else: raise Error(errno.EFAULT)
def _bind_by_none(self, socket):
with self.lock:
try: addr = 32 + self.sap[32:64].index(None)
except ValueError: raise Error(errno.EAGAIN)
else:
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
def _bind_by_addr(self, socket, addr):
with self.lock:
if addr in range(32, 64):
if self.sap[addr] is None:
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
else: raise Error(errno.EADDRINUSE)
else: raise Error(errno.EACCES)
def _bind_by_name(self, socket, name):
if not (name.startswith("urn:nfc:sn") or
name.startswith("urn:nfc:xsn") or
name == "com.android.npp"): # invalid name but legacy
raise Error(errno.EFAULT)
with self.lock:
if self.snl.get(name) != None:
raise Error(errno.EADDRINUSE)
addr = wks_map.get(name)
if addr is None:
try: addr = 16 + self.sap[16:32].index(None)
except ValueError: raise Error(errno.EADDRNOTAVAIL)
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
self.snl[name] = addr
def connect(self, socket, dest):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not socket.is_bound:
self.bind(socket)
socket.connect(dest)
log.debug("connected ({0} ===> {1})".format(socket.addr, socket.peer))
def listen(self, socket, backlog):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not isinstance(socket, DataLinkConnection):
raise Error(errno.EOPNOTSUPP)
if not type(backlog) == IntType:
raise TypeError("backlog must be integer")
if backlog < 0:
raise ValueError("backlog mmust not be negative")
backlog = min(backlog, 16)
if not socket.is_bound:
self.bind(socket)
socket.listen(backlog)
def accept(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not isinstance(socket, DataLinkConnection):
raise Error(errno.EOPNOTSUPP)
while True:
client = socket.accept()
if not client.is_bound:
self.bind(client)
if self.sap[client.addr].insert_socket(client):
log.debug("new data link connection ({0} <=== {1})"
.format(client.addr, client.peer))
return client
else:
pdu = DisconnectedMode(client.peer, socket.addr, reason=0x20)
super(DataLinkConnection, socket).send(pdu)
def send(self, socket, message):
return self.sendto(socket, message, socket.peer)
def sendto(self, socket, message, dest):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if isinstance(socket, RawAccessPoint):
if not isinstance(message, ProtocolDataUnit):
raise TypeError("message must be a pdu on raw access point")
if not socket.is_bound:
self.bind(socket)
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.send(message)
if not type(message) == StringType:
raise TypeError("sendto() argument *message* must be a string")
if isinstance(socket, LogicalDataLink):
if dest is None:
raise Error(errno.EDESTADDRREQ)
if not socket.is_bound:
self.bind(socket)
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.sendto(message, dest)
if isinstance(socket, DataLinkConnection):
return socket.send(message)
def recv(self, socket):
message, sender = self.recvfrom(socket)
return message
def recvfrom(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not (socket.addr and self.sap[socket.addr]):
raise Error(errno.EBADF)
if isinstance(socket, RawAccessPoint):
return (socket.recv(), None)
if isinstance(socket, LogicalDataLink):
return socket.recvfrom()
if isinstance(socket, DataLinkConnection):
return (socket.recv(), socket.peer)
def poll(self, socket, event, timeout=None):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not (socket.addr and self.sap[socket.addr]):
raise Error(errno.EBADF)
return socket.poll(event, timeout)
def close(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if socket.is_bound:
self.sap[socket.addr].remove_socket(socket)
else: socket.close()
def getsockname(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
return socket.addr
def getpeername(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
return socket.peer
| 38.011146
| 79
| 0.548071
|
import logging
log = logging.getLogger(__name__)
import time
from types import *
import threading
import collections
import random
import nfc.clf
import nfc.dep
from tco import *
from pdu import *
from err import *
from opt import *
RAW_ACCESS_POINT, LOGICAL_DATA_LINK, DATA_LINK_CONNECTION = range(3)
wks_map = {
"urn:nfc:sn:sdp" : 1,
"urn:nfc:sn:ip" : 2,
"urn:nfc:sn:obex": 3,
"urn:nfc:sn:snep": 4}
class ServiceAccessPoint(object):
def __init__(self, addr, llc):
self.llc = llc
self.addr = addr
self.sock_list = collections.deque()
self.send_list = collections.deque()
def __str__(self):
return "SAP {0:>2}".format(self.addr)
@property
def mode(self):
with self.llc.lock:
try:
if isinstance(self.sock_list[0], RawAccessPoint):
return RAW_ACCESS_POINT
if isinstance(self.sock_list[0], LogicalDataLink):
return LOGICAL_DATA_LINK
if isinstance(self.sock_list[0], DataLinkConnection):
return DATA_LINK_CONNECTION
except IndexError: return 0
def insert_socket(self, socket):
with self.llc.lock:
try: insertable = type(socket) == type(self.sock_list[0])
except IndexError: insertable = True
if insertable:
socket.bind(self.addr)
self.sock_list.appendleft(socket)
else: log.error("can't insert socket of different type")
return insertable
def remove_socket(self, socket):
assert socket.addr == self.addr
socket.close()
with self.llc.lock:
try: self.sock_list.remove(socket)
except ValueError: pass
if len(self.sock_list) == 0:
# completely remove this sap
self.llc.sap[self.addr] = None
def send(self, pdu):
self.send_list.append(pdu)
def shutdown(self):
while True:
try: socket = self.sock_list.pop()
except IndexError: return
log.debug("shutdown socket %s" % str(socket))
socket.bind(None); socket.close()
#
# enqueue() and dequeue() are called from llc run thread
#
def enqueue(self, pdu):
with self.llc.lock:
if isinstance(pdu, Connect):
for socket in self.sock_list:
if socket.state.LISTEN:
socket.enqueue(pdu)
return
else:
for socket in self.sock_list:
if pdu.ssap == socket.peer or socket.peer is None:
socket.enqueue(pdu)
return
if pdu.type in connection_mode_pdu_types:
self.send(DisconnectedMode(pdu.ssap, pdu.dsap, reason=1))
def dequeue(self, max_size):
with self.llc.lock:
for socket in self.sock_list:
#print "dequeue from", socket
pdu = socket.dequeue(max_size)
if pdu: return pdu
else:
try: return self.send_list.popleft()
except IndexError: pass
def sendack(self, max_size):
with self.llc.lock:
for socket in self.sock_list:
pdu = socket.sendack(max_size)
if pdu: return pdu
class ServiceDiscovery(object):
def __init__(self, llc):
self.llc = llc
self.snl = dict()
self.tids = range(256)
self.resp = threading.Condition(self.llc.lock)
self.sent = dict()
self.sdreq = collections.deque()
self.sdres = collections.deque()
self.dmpdu = collections.deque()
def __str__(self):
return "SAP 1"
@property
def mode(self):
return LOGICAL_DATA_LINK
def resolve(self, name):
with self.resp:
if self.snl is None: return None
log.debug("resolve service name '{0}'".format(name))
try: return self.snl[name]
except KeyError: pass
tid = random.choice(self.tids)
self.tids.remove(tid)
self.sdreq.append((tid, name))
while not self.snl is None and not name in self.snl:
self.resp.wait()
return None if self.snl is None else self.snl[name]
#
# enqueue() and dequeue() are called from llc run thread
#
def enqueue(self, pdu):
with self.llc.lock:
if isinstance(pdu, ServiceNameLookup) and not self.snl is None:
for tid, sap in pdu.sdres:
try: name = self.sent[tid]
except KeyError: pass
else:
log.debug("resolved '{0}' to remote addr {1}"
.format(name, sap))
self.snl[name] = sap
self.tids.append(tid)
self.resp.notify_all()
for tid, name in pdu.sdreq:
try: sap = self.llc.snl[name]
except KeyError: sap = 0
self.sdres.append((tid, sap))
def dequeue(self, max_size):
if max_size < 2:
return None
with self.llc.lock:
if len(self.sdres) > 0 or len(self.sdreq) > 0:
pdu = ServiceNameLookup(dsap=1, ssap=1)
max_size -= len(pdu)
while max_size > 0:
try: pdu.sdres.append(self.sdres.popleft())
except IndexError: break
for i in range(len(self.sdreq)):
tid, name = self.sdreq[0]
if 1 + len(name) > max_size:
self.sdreq.rotate(-1)
else:
pdu.sdreq.append(self.sdreq.popleft())
self.sent[tid] = name
return pdu
if len(self.dmpdu) > 0 and max_size >= 2:
return self.dmpdu.popleft()
def shutdown(self):
with self.llc.lock:
self.snl = None
self.resp.notify_all()
class LogicalLinkController(object):
def __init__(self, recv_miu=248, send_lto=500, send_agf=True,
symm_log=True):
self.lock = threading.RLock()
self.cfg = dict()
self.cfg['recv-miu'] = recv_miu
self.cfg['send-lto'] = send_lto
self.cfg['send-agf'] = send_agf
self.cfg['symm-log'] = symm_log
self.snl = dict({"urn:nfc:sn:sdp" : 1})
self.sap = 64 * [None]
self.sap[0] = ServiceAccessPoint(0, self)
self.sap[1] = ServiceDiscovery(self)
def __str__(self):
local = "Local(MIU={miu}, LTO={lto}ms)".format(
miu=self.cfg.get('recv-miu'), lto=self.cfg.get('send-lto'))
remote = "Remote(MIU={miu}, LTO={lto}ms)".format(
miu=self.cfg.get('send-miu'), lto=self.cfg.get('recv-lto'))
return "LLC: {local} {remote}".format(local=local, remote=remote)
def activate(self, mac):
assert type(mac) in (nfc.dep.Initiator, nfc.dep.Target)
self.mac = None
miu = self.cfg['recv-miu']
lto = self.cfg['send-lto']
wks = 1+sum(sorted([1<<sap for sap in self.snl.values() if sap < 15]))
pax = ParameterExchange(version=(1,1), miu=miu, lto=lto, wks=wks)
if type(mac) == nfc.dep.Initiator:
gb = mac.activate(gbi='Ffm'+pax.to_string()[2:])
self.run = self.run_as_initiator
role = "Initiator"
if type(mac) == nfc.dep.Target:
gb = mac.activate(gbt='Ffm'+pax.to_string()[2:], wt=9)
self.run = self.run_as_target
role = "Target"
if gb is not None and gb.startswith('Ffm') and len(gb) >= 6:
info = ["LLCP Link established as NFC-DEP {0}".format(role)]
info.append("Local LLCP Settings")
info.append(" LLCP Version: {0[0]}.{0[1]}".format(pax.version))
info.append(" Link Timeout: {0} ms".format(pax.lto))
info.append(" Max Inf Unit: {0} octet".format(pax.miu))
info.append(" Service List: {0:016b}".format(pax.wks))
pax = ProtocolDataUnit.from_string("\x00\x40" + str(gb[3:]))
info.append("Remote LLCP Settings")
info.append(" LLCP Version: {0[0]}.{0[1]}".format(pax.version))
info.append(" Link Timeout: {0} ms".format(pax.lto))
info.append(" Max Inf Unit: {0} octet".format(pax.miu))
info.append(" Service List: {0:016b}".format(pax.wks))
log.info('\n'.join(info))
self.cfg['rcvd-ver'] = pax.version
self.cfg['send-miu'] = pax.miu
self.cfg['recv-lto'] = pax.lto
self.cfg['send-wks'] = pax.wks
self.cfg['send-lsc'] = pax.lsc
log.debug("llc cfg {0}".format(self.cfg))
if type(mac) == nfc.dep.Initiator and mac.rwt is not None:
max_rwt = 4096/13.56E6 * 2**10
if mac.rwt > max_rwt:
log.warning("NFC-DEP RWT {0:.3f} exceeds max {1:.3f} sec"
.format(mac.rwt, max_rwt))
self.mac = mac
return bool(self.mac)
def terminate(self, reason):
log.debug("llcp link termination caused by {0}".format(reason))
if reason == "local choice":
self.exchange(Disconnect(0, 0), timeout=0.1)
self.mac.deactivate()
elif reason == "remote choice":
self.mac.deactivate()
# shutdown local services
for i in range(63, -1, -1):
if not self.sap[i] is None:
log.debug("closing service access point %d" % i)
self.sap[i].shutdown()
self.sap[i] = None
def exchange(self, pdu, timeout):
if not isinstance(pdu, Symmetry) or self.cfg.get('symm-log') is True:
log.debug("SEND {0}".format(pdu))
data = pdu.to_string() if pdu else None
try:
data = self.mac.exchange(data, timeout)
if data is None: return None
except nfc.clf.DigitalProtocolError as error:
log.debug("{0!r}".format(error))
return None
pdu = ProtocolDataUnit.from_string(data)
if not isinstance(pdu, Symmetry) or self.cfg.get('symm-log') is True:
log.debug("RECV {0}".format(pdu))
return pdu
def run_as_initiator(self, terminate=lambda: False):
recv_timeout = 1E-3 * (self.cfg['recv-lto'] + 10)
symm = 0
try:
pdu = self.collect(delay=0.01)
while not terminate():
if pdu is None: pdu = Symmetry()
pdu = self.exchange(pdu, recv_timeout)
if pdu is None:
return self.terminate(reason="link disruption")
if pdu == Disconnect(0, 0):
return self.terminate(reason="remote choice")
symm = symm + 1 if type(pdu) == Symmetry else 0
self.dispatch(pdu)
pdu = self.collect(delay=0.001)
if pdu is None and symm >= 10:
pdu = self.collect(delay=0.05)
else:
self.terminate(reason="local choice")
except KeyboardInterrupt:
print # move to new line
self.terminate(reason="local choice")
raise KeyboardInterrupt
except IOError:
self.terminate(reason="input/output error")
raise SystemExit
finally:
log.debug("llc run loop terminated on initiator")
def run_as_target(self, terminate=lambda: False):
recv_timeout = 1E-3 * (self.cfg['recv-lto'] + 10)
symm = 0
try:
pdu = None
while not terminate():
pdu = self.exchange(pdu, recv_timeout)
if pdu is None:
return self.terminate(reason="link disruption")
if pdu == Disconnect(0, 0):
return self.terminate(reason="remote choice")
symm = symm + 1 if type(pdu) == Symmetry else 0
self.dispatch(pdu)
pdu = self.collect(delay=0.001)
if pdu is None and symm >= 10:
pdu = self.collect(delay=0.05)
if pdu is None: pdu = Symmetry()
else:
self.terminate(reason="local choice")
except KeyboardInterrupt:
print # move to new line
self.terminate(reason="local choice")
raise KeyboardInterrupt
except IOError:
self.terminate(reason="input/output error")
raise SystemExit
finally:
log.debug("llc run loop terminated on target")
def collect(self, delay=None):
if delay: time.sleep(delay)
pdu_list = list()
max_data = None
with self.lock:
active_sap_list = [sap for sap in self.sap if sap is not None]
for sap in active_sap_list:
#log.debug("query sap {0}, max_data={1}"
# .format(sap, max_data))
pdu = sap.dequeue(max_data if max_data else 2179)
if pdu is not None:
if self.cfg['send-agf'] == False:
return pdu
pdu_list.append(pdu)
if max_data is None:
max_data = self.cfg["send-miu"] + 2
max_data -= len(pdu)
if max_data < bool(len(pdu_list)==1) * 2 + 2 + 2:
break
else: max_data = self.cfg["send-miu"] + 2
for sap in active_sap_list:
if sap.mode == DATA_LINK_CONNECTION:
pdu = sap.sendack(max_data)
if not pdu is None:
if self.cfg['send-agf'] == False:
return pdu
pdu_list.append(pdu)
max_data -= len(pdu)
if max_data < bool(len(pdu_list)==1) * 2 + 2 + 3:
break
if len(pdu_list) > 1:
return AggregatedFrame(aggregate=pdu_list)
if len(pdu_list) == 1:
return pdu_list[0]
return None
def dispatch(self, pdu):
if isinstance(pdu, Symmetry):
return
if isinstance(pdu, AggregatedFrame):
if pdu.dsap == 0 and pdu.ssap == 0:
[log.debug(" " + str(p)) for p in pdu]
[self.dispatch(p) for p in pdu]
return
if isinstance(pdu, Connect) and pdu.dsap == 1:
# connect-by-name
addr = self.snl.get(pdu.sn)
if not addr or self.sap[addr] is None:
log.debug("no service named '{0}'".format(pdu.sn))
pdu = DisconnectedMode(pdu.ssap, 1, reason=2)
self.sap[1].dmpdu.append(pdu)
return
pdu = Connect(dsap=addr, ssap=pdu.ssap, rw=pdu.rw, miu=pdu.miu)
with self.lock:
sap = self.sap[pdu.dsap]
if sap:
sap.enqueue(pdu)
return
log.debug("discard PDU {0}".format(str(pdu)))
return
def resolve(self, name):
return self.sap[1].resolve(name)
def socket(self, socket_type):
if socket_type == RAW_ACCESS_POINT:
return RawAccessPoint(recv_miu=self.cfg["recv-miu"])
if socket_type == LOGICAL_DATA_LINK:
return LogicalDataLink(recv_miu=self.cfg["recv-miu"])
if socket_type == DATA_LINK_CONNECTION:
return DataLinkConnection(recv_miu=128, recv_win=1)
def setsockopt(self, socket, option, value):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if option == SO_RCVMIU:
value = min(value, self.cfg['recv-miu'])
socket.setsockopt(option, value)
return socket.getsockopt(option)
def getsockopt(self, socket, option):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if isinstance(socket, LogicalDataLink):
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
if isinstance(socket, RawAccessPoint):
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.getsockopt(option)
def bind(self, socket, addr_or_name=None):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not socket.addr is None:
raise Error(errno.EINVAL)
if addr_or_name is None:
self._bind_by_none(socket)
elif type(addr_or_name) is IntType:
self._bind_by_addr(socket, addr_or_name)
elif type(addr_or_name) is StringType:
self._bind_by_name(socket, addr_or_name)
else: raise Error(errno.EFAULT)
def _bind_by_none(self, socket):
with self.lock:
try: addr = 32 + self.sap[32:64].index(None)
except ValueError: raise Error(errno.EAGAIN)
else:
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
def _bind_by_addr(self, socket, addr):
with self.lock:
if addr in range(32, 64):
if self.sap[addr] is None:
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
else: raise Error(errno.EADDRINUSE)
else: raise Error(errno.EACCES)
def _bind_by_name(self, socket, name):
if not (name.startswith("urn:nfc:sn") or
name.startswith("urn:nfc:xsn") or
name == "com.android.npp"): # invalid name but legacy
raise Error(errno.EFAULT)
with self.lock:
if self.snl.get(name) != None:
raise Error(errno.EADDRINUSE)
addr = wks_map.get(name)
if addr is None:
try: addr = 16 + self.sap[16:32].index(None)
except ValueError: raise Error(errno.EADDRNOTAVAIL)
socket.bind(addr)
self.sap[addr] = ServiceAccessPoint(addr, self)
self.sap[addr].insert_socket(socket)
self.snl[name] = addr
def connect(self, socket, dest):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not socket.is_bound:
self.bind(socket)
socket.connect(dest)
log.debug("connected ({0} ===> {1})".format(socket.addr, socket.peer))
def listen(self, socket, backlog):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not isinstance(socket, DataLinkConnection):
raise Error(errno.EOPNOTSUPP)
if not type(backlog) == IntType:
raise TypeError("backlog must be integer")
if backlog < 0:
raise ValueError("backlog mmust not be negative")
backlog = min(backlog, 16)
if not socket.is_bound:
self.bind(socket)
socket.listen(backlog)
def accept(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not isinstance(socket, DataLinkConnection):
raise Error(errno.EOPNOTSUPP)
while True:
client = socket.accept()
if not client.is_bound:
self.bind(client)
if self.sap[client.addr].insert_socket(client):
log.debug("new data link connection ({0} <=== {1})"
.format(client.addr, client.peer))
return client
else:
pdu = DisconnectedMode(client.peer, socket.addr, reason=0x20)
super(DataLinkConnection, socket).send(pdu)
def send(self, socket, message):
return self.sendto(socket, message, socket.peer)
def sendto(self, socket, message, dest):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if isinstance(socket, RawAccessPoint):
if not isinstance(message, ProtocolDataUnit):
raise TypeError("message must be a pdu on raw access point")
if not socket.is_bound:
self.bind(socket)
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.send(message)
if not type(message) == StringType:
raise TypeError("sendto() argument *message* must be a string")
if isinstance(socket, LogicalDataLink):
if dest is None:
raise Error(errno.EDESTADDRREQ)
if not socket.is_bound:
self.bind(socket)
# FIXME: set socket send miu when activated
socket.send_miu = self.cfg['send-miu']
return socket.sendto(message, dest)
if isinstance(socket, DataLinkConnection):
return socket.send(message)
def recv(self, socket):
message, sender = self.recvfrom(socket)
return message
def recvfrom(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not (socket.addr and self.sap[socket.addr]):
raise Error(errno.EBADF)
if isinstance(socket, RawAccessPoint):
return (socket.recv(), None)
if isinstance(socket, LogicalDataLink):
return socket.recvfrom()
if isinstance(socket, DataLinkConnection):
return (socket.recv(), socket.peer)
def poll(self, socket, event, timeout=None):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if not (socket.addr and self.sap[socket.addr]):
raise Error(errno.EBADF)
return socket.poll(event, timeout)
def close(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
if socket.is_bound:
self.sap[socket.addr].remove_socket(socket)
else: socket.close()
def getsockname(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
return socket.addr
def getpeername(self, socket):
if not isinstance(socket, TransmissionControlObject):
raise Error(errno.ENOTSOCK)
return socket.peer
| true
| true
|
79044b868e61cc658326e8d01a1b024997ff170e
| 5,287
|
py
|
Python
|
awx/main/tests/conftest.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/tests/conftest.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/tests/conftest.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
# Python
import pytest
from unittest import mock
from contextlib import contextmanager
from awx.main.models import Credential, UnifiedJob
from awx.main.tests.factories import (
create_organization,
create_job_template,
create_instance,
create_instance_group,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
from django.core.cache import cache
def pytest_addoption(parser):
parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator")
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
import sys
del sys._called_from_test
@pytest.fixture
def mock_access():
@contextmanager
def access_given_class(TowerClass):
try:
mock_instance = mock.MagicMock(__name__='foobar')
MockAccess = mock.MagicMock(return_value=mock_instance)
the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False)
the_patch.__enter__()
yield mock_instance
finally:
the_patch.__exit__()
return access_given_class
@pytest.fixture
def job_template_factory():
return create_job_template
@pytest.fixture
def organization_factory():
return create_organization
@pytest.fixture
def notification_template_factory():
return create_notification_template
@pytest.fixture
def survey_spec_factory():
return create_survey_spec
@pytest.fixture
def instance_factory():
return create_instance
@pytest.fixture
def instance_group_factory():
return create_instance_group
@pytest.fixture
def default_instance_group(instance_factory, instance_group_factory):
return create_instance_group("default", instances=[create_instance("hostA")])
@pytest.fixture
def controlplane_instance_group(instance_factory, instance_group_factory):
return create_instance_group("controlplane", instances=[create_instance("hostA")])
@pytest.fixture
def job_template_with_survey_passwords_factory(job_template_factory):
def rf(persisted):
"Returns job with linked JT survey with password survey questions"
objects = job_template_factory(
'jt',
organization='org1',
survey=[
{'variable': 'submitter_email', 'type': 'text', 'default': 'foobar@redhat.com'},
{'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'},
{'variable': 'SSN', 'type': 'password'},
],
persisted=persisted,
)
return objects.job_template
return rf
@pytest.fixture
def job_with_secret_key_unit(job_with_secret_key_factory):
return job_with_secret_key_factory(persisted=False)
@pytest.fixture
def workflow_job_template_factory():
return create_workflow_job_template
@pytest.fixture
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
return job_template_with_survey_passwords_factory(persisted=False)
@pytest.fixture
def mock_cache():
class MockCache(object):
cache = {}
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, timeout=60):
self.cache[key] = value
def delete(self, key):
del self.cache[key]
return MockCache()
def pytest_runtest_teardown(item, nextitem):
# clear Django cache at the end of every test ran
# NOTE: this should not be memcache (as it is deprecated), nor should it be redis.
# This is a local test cache, so we want every test to start with an empty cache
cache.clear()
@pytest.fixture(scope='session', autouse=True)
def mock_external_credential_input_sources():
# Credential objects query their related input sources on initialization.
# We mock that behavior out of credentials by default unless we need to
# test it explicitly.
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_has_unpartitioned_events():
# has_unpartitioned_events determines if there are any events still
# left in the old, unpartitioned job events table. In order to work,
# this method looks up when the partition migration occurred. When
# Django's unit tests run, however, there will be no record of the migration.
# We mock this out to circumvent the migration query.
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_get_event_queryset_no_job_created():
"""
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
job_created field. That field does not actually exist in a non-partition scenario.
"""
def event_qs(self):
kwargs = {self.event_parent_key: self.id}
return self.event_class.objects.filter(**kwargs)
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
yield _fixture
| 28.89071
| 157
| 0.723284
|
import pytest
from unittest import mock
from contextlib import contextmanager
from awx.main.models import Credential, UnifiedJob
from awx.main.tests.factories import (
create_organization,
create_job_template,
create_instance,
create_instance_group,
create_notification_template,
create_survey_spec,
create_workflow_job_template,
)
from django.core.cache import cache
def pytest_addoption(parser):
parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator")
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
import sys
del sys._called_from_test
@pytest.fixture
def mock_access():
@contextmanager
def access_given_class(TowerClass):
try:
mock_instance = mock.MagicMock(__name__='foobar')
MockAccess = mock.MagicMock(return_value=mock_instance)
the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False)
the_patch.__enter__()
yield mock_instance
finally:
the_patch.__exit__()
return access_given_class
@pytest.fixture
def job_template_factory():
return create_job_template
@pytest.fixture
def organization_factory():
return create_organization
@pytest.fixture
def notification_template_factory():
return create_notification_template
@pytest.fixture
def survey_spec_factory():
return create_survey_spec
@pytest.fixture
def instance_factory():
return create_instance
@pytest.fixture
def instance_group_factory():
return create_instance_group
@pytest.fixture
def default_instance_group(instance_factory, instance_group_factory):
return create_instance_group("default", instances=[create_instance("hostA")])
@pytest.fixture
def controlplane_instance_group(instance_factory, instance_group_factory):
return create_instance_group("controlplane", instances=[create_instance("hostA")])
@pytest.fixture
def job_template_with_survey_passwords_factory(job_template_factory):
def rf(persisted):
objects = job_template_factory(
'jt',
organization='org1',
survey=[
{'variable': 'submitter_email', 'type': 'text', 'default': 'foobar@redhat.com'},
{'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'},
{'variable': 'SSN', 'type': 'password'},
],
persisted=persisted,
)
return objects.job_template
return rf
@pytest.fixture
def job_with_secret_key_unit(job_with_secret_key_factory):
return job_with_secret_key_factory(persisted=False)
@pytest.fixture
def workflow_job_template_factory():
return create_workflow_job_template
@pytest.fixture
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
return job_template_with_survey_passwords_factory(persisted=False)
@pytest.fixture
def mock_cache():
class MockCache(object):
cache = {}
def get(self, key, default=None):
return self.cache.get(key, default)
def set(self, key, value, timeout=60):
self.cache[key] = value
def delete(self, key):
del self.cache[key]
return MockCache()
def pytest_runtest_teardown(item, nextitem):
cache.clear()
@pytest.fixture(scope='session', autouse=True)
def mock_external_credential_input_sources():
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_has_unpartitioned_events():
# We mock this out to circumvent the migration query.
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
yield _fixture
@pytest.fixture(scope='session', autouse=True)
def mock_get_event_queryset_no_job_created():
def event_qs(self):
kwargs = {self.event_parent_key: self.id}
return self.event_class.objects.filter(**kwargs)
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
yield _fixture
| true
| true
|
79044bb12a0b11a75934dd8d23e20c3a89850fc7
| 218
|
py
|
Python
|
conf/script/src/build_system/compiler/build_option/sanitizer.py
|
benoit-dubreuil/template-repo-cpp-full-ecosystem
|
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
|
[
"MIT"
] | null | null | null |
conf/script/src/build_system/compiler/build_option/sanitizer.py
|
benoit-dubreuil/template-repo-cpp-full-ecosystem
|
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
|
[
"MIT"
] | 113
|
2021-02-15T19:22:36.000Z
|
2021-05-07T15:17:42.000Z
|
conf/script/src/build_system/compiler/build_option/sanitizer.py
|
benoit-dubreuil/template-repo-cpp-full-ecosystem
|
f506dd5e2a61cdd311b6a6a4be4abc59567b4b20
|
[
"MIT"
] | null | null | null |
__all__ = ['CompilerSanitizer']
from enum import Enum, unique
@unique
class CompilerSanitizer(Enum):
NONE = 'none'
ADDRESS = 'address'
THREAD = 'thread'
UNDEFINED = 'undefined'
MEMORY = 'memory'
| 16.769231
| 31
| 0.66055
|
__all__ = ['CompilerSanitizer']
from enum import Enum, unique
@unique
class CompilerSanitizer(Enum):
NONE = 'none'
ADDRESS = 'address'
THREAD = 'thread'
UNDEFINED = 'undefined'
MEMORY = 'memory'
| true
| true
|
79044c621323ba7c551b2e4d38f01d306299281a
| 1,049
|
py
|
Python
|
azure-mgmt-resource/azure/mgmt/resource/features/__init__.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-resource/azure/mgmt/resource/features/__init__.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-resource/azure/mgmt/resource/features/__init__.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .feature_client import FeatureClient
from .version import VERSION
__all__ = ['FeatureClient']
__version__ = VERSION
| 36.172414
| 76
| 0.656816
|
from .feature_client import FeatureClient
from .version import VERSION
__all__ = ['FeatureClient']
__version__ = VERSION
| true
| true
|
79044de0ccade054d72a819f02d293037742a7cd
| 1,458
|
py
|
Python
|
Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py
|
mikefeneley/school
|
5156f4537ca76782e7ad6df3c5ffe7b9fb5038da
|
[
"MIT"
] | 1
|
2021-04-21T16:54:04.000Z
|
2021-04-21T16:54:04.000Z
|
Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py
|
mikefeneley/school
|
5156f4537ca76782e7ad6df3c5ffe7b9fb5038da
|
[
"MIT"
] | null | null | null |
Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py
|
mikefeneley/school
|
5156f4537ca76782e7ad6df3c5ffe7b9fb5038da
|
[
"MIT"
] | null | null | null |
import sys, imp, atexit, os
sys.path.append("/home/courses/cs3214/software/pexpect-dpty/");
import pexpect, shellio, signal, time, os, re, proc_check
# Determine the path this file is in
thisdir = os.path.dirname(os.path.realpath(__file__))
#Ensure the shell process is terminated
def force_shell_termination(shell_process):
c.close(force=True)
# pulling in the regular expression and other definitions
# this should be the eshoutput.py file of the hosting shell, see usage above
definitions_scriptname = sys.argv[1]
def_module = imp.load_source('', definitions_scriptname)
# you can define logfile=open("log.txt", "w") in your eshoutput.py if you want logging!
logfile = None
if hasattr(def_module, 'logfile'):
logfile = def_module.logfile
#spawn an instance of the shell, note the -p flags
c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile, args=['-p', thisdir])
atexit.register(force_shell_termination, shell_process=c)
# set timeout for all following 'expect*' calls to 5 seconds
c.timeout = 5
#############################################################################
#
# Actual Test
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (1)"
c.sendline("systemInfo")
assert c.expect('------------------------------------------------\r\n') == 0, "Shell did not print out expected values";
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (2)"
shellio.success()
| 34.714286
| 120
| 0.687243
|
import sys, imp, atexit, os
sys.path.append("/home/courses/cs3214/software/pexpect-dpty/");
import pexpect, shellio, signal, time, os, re, proc_check
thisdir = os.path.dirname(os.path.realpath(__file__))
def force_shell_termination(shell_process):
c.close(force=True)
definitions_scriptname = sys.argv[1]
def_module = imp.load_source('', definitions_scriptname)
logfile = None
if hasattr(def_module, 'logfile'):
logfile = def_module.logfile
c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile, args=['-p', thisdir])
atexit.register(force_shell_termination, shell_process=c)
c.timeout = 5
| true
| true
|
79044e86db35a13256fbdc227e39b6fae547182d
| 1,361
|
py
|
Python
|
pier14/opc-client/playlist.py
|
FlamingLotusGirls/soma
|
df0da411d5b4e1f3a37a09fd4bd3ec3407aec7b5
|
[
"Apache-2.0"
] | 1
|
2015-01-15T17:01:29.000Z
|
2015-01-15T17:01:29.000Z
|
pier14/opc-client/playlist.py
|
FlamingLotusGirls/soma
|
df0da411d5b4e1f3a37a09fd4bd3ec3407aec7b5
|
[
"Apache-2.0"
] | null | null | null |
pier14/opc-client/playlist.py
|
FlamingLotusGirls/soma
|
df0da411d5b4e1f3a37a09fd4bd3ec3407aec7b5
|
[
"Apache-2.0"
] | 1
|
2015-10-29T18:15:53.000Z
|
2015-10-29T18:15:53.000Z
|
import random
class Playlist:
"""
A list of routines (aka a list of light effect layer lists, or a list of
flame sequences) all intended for use in a single context (e.g. when the
headset is on). One routine in the playlist is selected at any given time.
"""
def __init__(self, routines, index = 0, shuffle=False):
self.routines = routines
self.selected = index
self.order = range(len(self.routines))
self.shuffle = shuffle
if shuffle:
random.shuffle(self.order)
self.print_selection()
def print_selection(self):
print "Playlist selecting index %d:" % self.selected
for x in self.routines[self.order[self.selected]]:
print " ", repr(x)
def selection(self):
return self.routines[self.order[self.selected]]
def advance(self):
"""
Switch the selected routine to the next one in the list, either
consecutively or randomly depending on whether shuffle is true
"""
if len(self.routines) > 1:
selected = self.selected + 1
if selected >= len(self.routines):
if self.shuffle:
random.shuffle(self.order)
selected = 0
self.selected = selected
self.print_selection()
| 34.025
| 78
| 0.588538
|
import random
class Playlist:
"""
A list of routines (aka a list of light effect layer lists, or a list of
flame sequences) all intended for use in a single context (e.g. when the
headset is on). One routine in the playlist is selected at any given time.
"""
def __init__(self, routines, index = 0, shuffle=False):
self.routines = routines
self.selected = index
self.order = range(len(self.routines))
self.shuffle = shuffle
if shuffle:
random.shuffle(self.order)
self.print_selection()
def print_selection(self):
print "Playlist selecting index %d:" % self.selected
for x in self.routines[self.order[self.selected]]:
print " ", repr(x)
def selection(self):
return self.routines[self.order[self.selected]]
def advance(self):
"""
Switch the selected routine to the next one in the list, either
consecutively or randomly depending on whether shuffle is true
"""
if len(self.routines) > 1:
selected = self.selected + 1
if selected >= len(self.routines):
if self.shuffle:
random.shuffle(self.order)
selected = 0
self.selected = selected
self.print_selection()
| false
| true
|
79044e9948c60a74ceb46f9eb7a137dcb97eefec
| 193
|
py
|
Python
|
src/rest_framework_jwt/compat.py
|
nigoroll/django-rest-framework-jwt
|
737464f7a8b546165fdfc870b73c8059ed926327
|
[
"MIT"
] | null | null | null |
src/rest_framework_jwt/compat.py
|
nigoroll/django-rest-framework-jwt
|
737464f7a8b546165fdfc870b73c8059ed926327
|
[
"MIT"
] | null | null | null |
src/rest_framework_jwt/compat.py
|
nigoroll/django-rest-framework-jwt
|
737464f7a8b546165fdfc870b73c8059ed926327
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.urls import include, url
except ImportError:
from django.conf.urls import include, url # noqa: F401
| 19.3
| 59
| 0.715026
|
from __future__ import unicode_literals
try:
from django.urls import include, url
except ImportError:
from django.conf.urls import include, url
| true
| true
|
7904504b598977d27811250029d2b25d47407ac1
| 2,758
|
py
|
Python
|
tasks/retriever/mrr.py
|
platiagro/tasks
|
a6103cb101eeed26381cdb170a11d0e1dc53d3ad
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2021-02-16T12:39:57.000Z
|
2021-07-21T11:36:39.000Z
|
tasks/retriever/mrr.py
|
platiagro/tasks
|
a6103cb101eeed26381cdb170a11d0e1dc53d3ad
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 20
|
2020-10-26T18:05:27.000Z
|
2021-11-30T19:05:22.000Z
|
tasks/retriever/mrr.py
|
platiagro/tasks
|
a6103cb101eeed26381cdb170a11d0e1dc53d3ad
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 7
|
2020-10-13T18:12:22.000Z
|
2021-08-13T19:16:21.000Z
|
# Import dependencies
# Math/Torch
import numpy as np
import torch.nn as nn
# Typing
from typing import List
# Instantiate class
class MRR(nn.Module):
"""Compute MRR metric (Mean reciprocal rank)"""
def __init__(self, max_rank = 10):
super(MRR, self).__init__()
# Set max mrr rank
self.max_rank = max_rank
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float:
"""Calculate the reciprocal rank for a given hypothesis and reference
Params:
hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Reference id (as a integer) of the correct id of response
Returns:
reciprocal rank
"""
# Assure hypothesis_ids is a numpy array
hypothesis_ids = np.asarray(hypothesis_ids)
# Calculate rank
try:
rank = np.where(hypothesis_ids == reference_id)[0][0] + 1
except IndexError:
rank = self.max_rank + 1
# Rank grater then max_rank is set to zero
if rank > self.max_rank:
reciprocal_rank = 0.0
else:
# Calculate reciprocal rank
reciprocal_rank = 1. / rank
return reciprocal_rank
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float:
"""Score the mean reciprocal rank for the batch
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]
>>> batch_reference_id = [2, 2, 1]
>>> mrr = MRR()
>>> mrr(batch_hypothesis_ids, batch_reference_id)
0.61111111111111105
Args:
batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Batch of reference id (as a integer) of the correct id of response
Returns:
Mean reciprocal rank (MRR)
"""
# Assure batches have same length
assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length."
# Size of batch
batch_size = len(batch_hypothesis_ids)
# MRR to be calculated
mrr = 0
for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id):
# Calculate reciprocal rank
reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id)
# Add to MRR
mrr += reciprocal_rank/batch_size
return mrr
| 32.447059
| 130
| 0.603698
|
import numpy as np
import torch.nn as nn
from typing import List
class MRR(nn.Module):
def __init__(self, max_rank = 10):
super(MRR, self).__init__()
self.max_rank = max_rank
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float:
hypothesis_ids = np.asarray(hypothesis_ids)
try:
rank = np.where(hypothesis_ids == reference_id)[0][0] + 1
except IndexError:
rank = self.max_rank + 1
if rank > self.max_rank:
reciprocal_rank = 0.0
else:
reciprocal_rank = 1. / rank
return reciprocal_rank
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float:
assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length."
batch_size = len(batch_hypothesis_ids)
mrr = 0
for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id):
reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id)
mrr += reciprocal_rank/batch_size
return mrr
| true
| true
|
7904505524c62cc2235184f189624e0b2645a878
| 8,080
|
py
|
Python
|
userena/contrib/umessages/migrations/0001_initial.py
|
ixc/django-userena
|
5a8c61dcf5133e8b745c25d3b54e5578043222d8
|
[
"BSD-3-Clause"
] | 2
|
2019-02-14T00:44:29.000Z
|
2020-04-28T17:04:05.000Z
|
userena/contrib/umessages/migrations/0001_initial.py
|
barszczmm/django-easy-userena
|
096052d3e0c5ef0442d93fe90e1bd783f1c1dc7c
|
[
"BSD-3-Clause"
] | null | null | null |
userena/contrib/umessages/migrations/0001_initial.py
|
barszczmm/django-easy-userena
|
096052d3e0c5ef0442d93fe90e1bd783f1c1dc7c
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageContact'
db.create_table('umessages_messagecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='from_users', to=orm['auth.User'])),
('to_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='to_users', to=orm['auth.User'])),
('latest_message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
))
db.send_create_signal('umessages', ['MessageContact'])
# Adding unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.create_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Adding model 'MessageRecipient'
db.create_table('umessages_messagerecipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
('read_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['MessageRecipient'])
# Adding model 'Message'
db.create_table('umessages_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sent_messages', to=orm['auth.User'])),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('sender_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['Message'])
def backwards(self, orm):
# Removing unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.delete_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Deleting model 'MessageContact'
db.delete_table('umessages_messagecontact')
# Deleting model 'MessageRecipient'
db.delete_table('umessages_messagerecipient')
# Deleting model 'Message'
db.delete_table('umessages_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'umessages.message': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_messages'", 'symmetrical': 'False', 'through': "orm['umessages.MessageRecipient']", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': "orm['auth.User']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'umessages.messagecontact': {
'Meta': {'ordering': "['latest_message']", 'unique_together': "(('from_user', 'to_user'),)", 'object_name': 'MessageContact'},
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"})
},
'umessages.messagerecipient': {
'Meta': {'object_name': 'MessageRecipient'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['umessages']
| 65.691057
| 221
| 0.593936
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table('umessages_messagecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='from_users', to=orm['auth.User'])),
('to_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='to_users', to=orm['auth.User'])),
('latest_message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
))
db.send_create_signal('umessages', ['MessageContact'])
db.create_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
db.create_table('umessages_messagerecipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
('read_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['MessageRecipient'])
db.create_table('umessages_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sent_messages', to=orm['auth.User'])),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('sender_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['Message'])
def backwards(self, orm):
db.delete_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
db.delete_table('umessages_messagecontact')
db.delete_table('umessages_messagerecipient')
db.delete_table('umessages_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'umessages.message': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_messages'", 'symmetrical': 'False', 'through': "orm['umessages.MessageRecipient']", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': "orm['auth.User']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'umessages.messagecontact': {
'Meta': {'ordering': "['latest_message']", 'unique_together': "(('from_user', 'to_user'),)", 'object_name': 'MessageContact'},
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"})
},
'umessages.messagerecipient': {
'Meta': {'object_name': 'MessageRecipient'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['umessages']
| true
| true
|
790451695a306cb7aa4c76c5e3e4cccfba51dab7
| 971
|
py
|
Python
|
test/test_tiktok.py
|
lijemutu/auddit_extension
|
030f965a79fc9d01985d6e00f7fea41083dd6617
|
[
"MIT"
] | null | null | null |
test/test_tiktok.py
|
lijemutu/auddit_extension
|
030f965a79fc9d01985d6e00f7fea41083dd6617
|
[
"MIT"
] | null | null | null |
test/test_tiktok.py
|
lijemutu/auddit_extension
|
030f965a79fc9d01985d6e00f7fea41083dd6617
|
[
"MIT"
] | 1
|
2021-06-04T16:50:36.000Z
|
2021-06-04T16:50:36.000Z
|
import unittest,os
from src.tasks.scrape_reddit.tiktok import dwn_tiktok
from src.tasks.generate_video.task import generate_tiktok
from src.tasks.upload_video.task import upload_video
class TestTiktok(unittest.TestCase):
def setUp(self):
pass
def test_tiktok(self):
context = {
'page':{
'Nombre':"Pagina que hace compilaciones perronas de tiktok",
"thumbnail": False,
'description':['Y a ti te ha pasado eso? \nIngresa mi codigo para que ganes dinero!!\nKwai 848290921'],
'tags':['Amor','Meme','Chistes','Divertido','Reddit'],
"playlist":"Compilaciones TikTok"
},
'video_path':os.getcwd()+"\\"+r"test\test_videos\caption.mp4",
'thumbnail_path':os.getcwd()+"\\"+r"data\thumbnails\8ccd23f7-7292-41d7-a743-b2c9f2b7fd36.png"}
#dwn_tiktok(context)
#generate_tiktok(context)
upload_video(context)
| 37.346154
| 119
| 0.627188
|
import unittest,os
from src.tasks.scrape_reddit.tiktok import dwn_tiktok
from src.tasks.generate_video.task import generate_tiktok
from src.tasks.upload_video.task import upload_video
class TestTiktok(unittest.TestCase):
def setUp(self):
pass
def test_tiktok(self):
context = {
'page':{
'Nombre':"Pagina que hace compilaciones perronas de tiktok",
"thumbnail": False,
'description':['Y a ti te ha pasado eso? \nIngresa mi codigo para que ganes dinero!!\nKwai 848290921'],
'tags':['Amor','Meme','Chistes','Divertido','Reddit'],
"playlist":"Compilaciones TikTok"
},
'video_path':os.getcwd()+"\\"+r"test\test_videos\caption.mp4",
'thumbnail_path':os.getcwd()+"\\"+r"data\thumbnails\8ccd23f7-7292-41d7-a743-b2c9f2b7fd36.png"}
upload_video(context)
| true
| true
|
7904526d4f781baed9cdc8dc4972204a7a65ec51
| 236
|
py
|
Python
|
some-euler/p31.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | 1
|
2017-02-20T21:04:47.000Z
|
2017-02-20T21:04:47.000Z
|
some-euler/p31.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | null | null | null |
some-euler/p31.py
|
rik0/rk-exempla
|
811f859a0980b0636bbafa2656893d988c4d0e32
|
[
"MIT"
] | 2
|
2017-02-20T21:04:49.000Z
|
2021-05-18T11:29:16.000Z
|
import constraint
coins = [1, 2, 5, 10, 20, 50, 100, 200]
CSP = constraint.Problem()
for coin in coins:
CSP.addVariable(coin, range(0, 201, coin))
CSP.addConstraint(constraint.ExactSumConstraint(200))
print len(CSP.getSolutions())
| 26.222222
| 53
| 0.724576
|
import constraint
coins = [1, 2, 5, 10, 20, 50, 100, 200]
CSP = constraint.Problem()
for coin in coins:
CSP.addVariable(coin, range(0, 201, coin))
CSP.addConstraint(constraint.ExactSumConstraint(200))
print len(CSP.getSolutions())
| false
| true
|
790452a76a5127c5bfaf46d2b1df02f5a0a37e16
| 1,533
|
py
|
Python
|
setup.py
|
andersy005/repo2singularity
|
4e0759a625fc856c140c904f87fcdfa891c8bf6b
|
[
"BSD-3-Clause"
] | 3
|
2020-06-25T17:03:44.000Z
|
2022-02-21T17:43:19.000Z
|
setup.py
|
andersy005/repo2singularity
|
4e0759a625fc856c140c904f87fcdfa891c8bf6b
|
[
"BSD-3-Clause"
] | 1
|
2020-05-15T04:49:22.000Z
|
2020-06-30T15:34:30.000Z
|
setup.py
|
andersy005/repo2singularity
|
4e0759a625fc856c140c904f87fcdfa891c8bf6b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('requirements.txt') as f:
INSTALL_REQUIREs = f.read().strip().split('\n')
with open('README.md', encoding='utf8') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
]
setup(
name='repo2singularity',
description='Repo2singularity: Wrapper around repo2docker producing producing Jupyter enabled Singularity images.',
long_description=LONG_DESCRIPTION,
python_requires='>=3.6',
maintainer='Anderson Banihirwe',
classifiers=CLASSIFIERS,
url='https://github.com/andersy005/repo2singularity',
packages=find_packages(exclude=('tests',)),
include_package_data=True,
install_requires=INSTALL_REQUIREs,
license='Apache 2.0',
zip_safe=False,
entry_points={'console_scripts': ['repo2singularity = repo2singularity.core:main']},
keywords='reproducible science environments docker singularity',
use_scm_version={'version_scheme': 'post-release', 'local_scheme': 'dirty-tag'},
setup_requires=['setuptools_scm', 'setuptools>=30.3.0'],
)
| 37.390244
| 119
| 0.700587
|
from setuptools import find_packages, setup
with open('requirements.txt') as f:
INSTALL_REQUIREs = f.read().strip().split('\n')
with open('README.md', encoding='utf8') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
]
setup(
name='repo2singularity',
description='Repo2singularity: Wrapper around repo2docker producing producing Jupyter enabled Singularity images.',
long_description=LONG_DESCRIPTION,
python_requires='>=3.6',
maintainer='Anderson Banihirwe',
classifiers=CLASSIFIERS,
url='https://github.com/andersy005/repo2singularity',
packages=find_packages(exclude=('tests',)),
include_package_data=True,
install_requires=INSTALL_REQUIREs,
license='Apache 2.0',
zip_safe=False,
entry_points={'console_scripts': ['repo2singularity = repo2singularity.core:main']},
keywords='reproducible science environments docker singularity',
use_scm_version={'version_scheme': 'post-release', 'local_scheme': 'dirty-tag'},
setup_requires=['setuptools_scm', 'setuptools>=30.3.0'],
)
| true
| true
|
79045314afbaa88b5ab7373b4af3d0264c122e9c
| 14,093
|
py
|
Python
|
lte/gateway/python/magma/pipelined/qos/qos_tc_impl.py
|
khansiddiquekc/magma
|
891718acbe3b9cb3973ae0376d9bcadb31503905
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/qos/qos_tc_impl.py
|
khansiddiquekc/magma
|
891718acbe3b9cb3973ae0376d9bcadb31503905
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/qos/qos_tc_impl.py
|
khansiddiquekc/magma
|
891718acbe3b9cb3973ae0376d9bcadb31503905
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
from typing import Optional # noqa
from lte.protos.policydb_pb2 import FlowMatch
from .tc_ops_cmd import TcOpsCmd, argSplit, run_cmd
from .tc_ops_pyroute2 import TcOpsPyRoute2
from .types import QosInfo
from .utils import IdManager
LOG = logging.getLogger('pipelined.qos.qos_tc_impl')
# LOG.setLevel(logging.DEBUG)
# TODO - replace this implementation with pyroute2 tc
ROOT_QID = 65534
DEFAULT_RATE = '80Kbit'
DEFAULT_INTF_SPEED = '1000'
class TrafficClass:
"""
Creates/Deletes queues in linux. Using Qdiscs for flow based
rate limiting(traffic shaping) of user traffic.
"""
tc_ops = None
@staticmethod
def delete_class(intf: str, qid: int, skip_filter=False) -> int:
qid_hex = hex(qid)
if not skip_filter:
TrafficClass.tc_ops.del_filter(intf, qid_hex, qid_hex)
return TrafficClass.tc_ops.del_htb(intf, qid_hex)
@staticmethod
def create_class(
intf: str, qid: int, max_bw: int, rate=None,
parent_qid=None, skip_filter=False,
) -> int:
if not rate:
rate = DEFAULT_RATE
if not parent_qid:
parent_qid = ROOT_QID
if parent_qid == qid:
# parent qid should only be self for root case, everything else
# should be the child of root class
LOG.error('parent and self qid equal, setting parent_qid to root')
parent_qid = ROOT_QID
qid_hex = hex(qid)
parent_qid_hex = '1:' + hex(parent_qid)
err = TrafficClass.tc_ops.create_htb(intf, qid_hex, max_bw, rate, parent_qid_hex)
if err < 0 or skip_filter:
return err
# add filter
return TrafficClass.tc_ops.create_filter(intf, qid_hex, qid_hex)
@staticmethod
def init_qdisc(
intf: str, show_error=False, enable_pyroute2=False,
default_gbr=DEFAULT_RATE,
) -> int:
# TODO: Convert this class into an object.
if TrafficClass.tc_ops is None:
if enable_pyroute2:
TrafficClass.tc_ops = TcOpsPyRoute2()
else:
TrafficClass.tc_ops = TcOpsCmd()
cmd_list = []
speed = DEFAULT_INTF_SPEED
qid_hex = hex(ROOT_QID)
fn = "/sys/class/net/{intf}/speed".format(intf=intf)
try:
with open(fn, encoding="utf-8") as f:
speed = f.read().strip()
except OSError:
LOG.error('unable to read speed from %s defaulting to %s', fn, speed)
# qdisc does not support replace, so check it before creating the HTB qdisc.
qdisc_type = TrafficClass._get_qdisc_type(intf)
if qdisc_type != "htb":
qdisc_cmd = "tc qdisc add dev {intf} root handle 1: htb".format(intf=intf)
cmd_list.append(qdisc_cmd)
LOG.info("Created root qdisc")
parent_q_cmd = "tc class replace dev {intf} parent 1: classid 1:{root_qid} htb "
parent_q_cmd += "rate {speed}Mbit ceil {speed}Mbit"
parent_q_cmd = parent_q_cmd.format(intf=intf, root_qid=qid_hex, speed=speed)
cmd_list.append(parent_q_cmd)
tc_cmd = "tc class replace dev {intf} parent 1:{root_qid} classid 1:1 htb "
tc_cmd += "rate {rate} ceil {speed}Mbit"
tc_cmd = tc_cmd.format(
intf=intf, root_qid=qid_hex, rate=default_gbr,
speed=speed,
)
cmd_list.append(tc_cmd)
return run_cmd(cmd_list, show_error)
@staticmethod
def read_all_classes(intf: str):
qid_list = []
# example output of this command
# b'class htb 1:1 parent 1:fffe prio 0 rate 12Kbit ceil 1Gbit burst \
# 1599b cburst 1375b \nclass htb 1:fffe root rate 1Gbit ceil 1Gbit \
# burst 1375b cburst 1375b \n'
# we need to parse this output and extract class ids from here
tc_cmd = "tc class show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
for ln in output.decode('utf-8').split("\n"):
ln = ln.strip()
if not ln:
continue
tok = ln.split()
if len(tok) < 5:
continue
if tok[1] != "htb":
continue
if tok[3] == 'root':
continue
qid_str = tok[2].split(':')[1]
qid = int(qid_str, 16)
pqid_str = tok[4].split(':')[1]
pqid = int(pqid_str, 16)
qid_list.append((qid, pqid))
LOG.debug("TC-dump: %s qid %d pqid %d", ln, qid, pqid)
except subprocess.CalledProcessError as e:
LOG.error('failed extracting classids from tc %s', e)
return qid_list
@staticmethod
def dump_class_state(intf: str, qid: int):
qid_hex = hex(qid)
tc_cmd = "tc -s -d class show dev {} classid 1:{}".format(
intf,
qid_hex,
)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def dump_root_class_stats(intf: str):
tc_cmd = "tc -s -s -d q ls dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def get_class_rate(intf: str, qid: int) -> Optional[str]:
qid_hex = hex(qid)
tc_cmd = "tc class show dev {} classid 1:{}".format(intf, qid_hex)
args = argSplit(tc_cmd)
try:
# output: class htb 1:3 parent 1:2 prio 2 rate 250Kbit ceil 500Kbit burst 1600b cburst 1600b
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
# return all config from 'rate' onwards
config = output.split("rate")
try:
return config[1]
except IndexError:
LOG.error("could not find rate: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
@staticmethod
def _get_qdisc_type(intf: str) -> Optional[str]:
tc_cmd = "tc qdisc show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
# output: qdisc htb 1: root refcnt 2 r2q 10 default 0 direct_packets_stat 314 direct_qlen 1000
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
config = output.split()
try:
return config[1]
except IndexError:
LOG.error("could not qdisc type: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
class TCManager(object):
"""
Creates/Deletes queues in linux. Using Qdiscs for flow based
rate limiting(traffic shaping) of user traffic.
Queues are created on an egress interface and flows
in OVS are programmed with qid to filter traffic to the queue.
Traffic matching a specific flow is filtered to a queue and is
rate limited based on configured value.
Traffic to flows with no QoS configuration are sent to a
default queue and are not rate limited.
"""
def __init__(
self,
datapath,
config,
) -> None:
self._datapath = datapath
self._uplink = config['nat_iface']
self._downlink = config['enodeb_iface']
self._max_rate = config["qos"]["max_rate"]
self._gbr_rate = config["qos"].get("gbr_rate", DEFAULT_RATE)
self._enable_pyroute2 = config["qos"].get('enable_pyroute2', False)
self._start_idx, self._max_idx = (
config['qos']['linux_tc']['min_idx'],
config['qos']['linux_tc']['max_idx'],
)
self._id_manager = IdManager(self._start_idx, self._max_idx)
self._initialized = True
LOG.info(
"Init LinuxTC module uplink:%s downlink:%s",
config['nat_iface'], config['enodeb_iface'],
)
def destroy(self):
if not TrafficClass.tc_ops:
LOG.info("TC not initialized, skip destroying existing qos classes")
return
LOG.info("destroying existing leaf qos classes")
# ensure ordering during deletion of classes, children should be deleted
# prior to the parent class ids
p_qids = set()
for intf in [self._uplink, self._downlink]:
qid_list = TrafficClass.read_all_classes(intf)
for qid_tuple in qid_list:
(qid, pqid) = qid_tuple
if self._start_idx <= qid < (self._max_idx - 1):
LOG.info("Attempting to delete class idx %d", qid)
TrafficClass.delete_class(intf, qid)
if self._start_idx <= pqid < (self._max_idx - 1):
p_qids.add((intf, pqid))
LOG.info("destroying existing parent classes")
for p_qid_tuple in p_qids:
(intf, pqid) = p_qid_tuple
LOG.info("Attempting to delete parent class idx %d", pqid)
TrafficClass.delete_class(intf, pqid, skip_filter=True)
LOG.info("destroying All qos classes: done")
def setup(self):
# initialize new qdisc
TrafficClass.init_qdisc(
self._uplink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
TrafficClass.init_qdisc(
self._downlink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
def get_action_instruction(self, qid: int):
# return an action and an instruction corresponding to this qid
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, no action/inst returned", qid)
return None, None
parser = self._datapath.ofproto_parser
return parser.OFPActionSetField(pkt_mark=qid), None, qid
def create_class_async(
self, d: FlowMatch.Direction, qos_info: QosInfo,
qid,
parent, skip_filter, cleanup_rule,
):
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
gbr = qos_info.gbr
if gbr is None:
gbr = self._gbr_rate
err = TrafficClass.create_class(
intf, qid, qos_info.mbr,
rate=gbr,
parent_qid=parent,
skip_filter=skip_filter,
)
# typecast to int to avoid MagicMock related error in unit test
err_no = int(err)
if err_no < 0:
if cleanup_rule:
cleanup_rule()
LOG.error("qos create error: qid %d err %d", qid, err_no)
return
LOG.debug("create done: if: %s qid %d err %s", intf, qid, err_no)
def add_qos(
self, d: FlowMatch.Direction, qos_info: QosInfo,
cleanup_rule=None, parent=None, skip_filter=False,
) -> int:
LOG.debug("add QoS: %s", qos_info)
qid = self._id_manager.allocate_idx()
self.create_class_async(
d, qos_info,
qid, parent, skip_filter, cleanup_rule,
)
LOG.debug("assigned qid: %d", qid)
return qid
def remove_qos(
self, qid: int, d: FlowMatch.Direction,
recovery_mode=False, skip_filter=False,
):
if not self._initialized and not recovery_mode:
return
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, removal failed", qid)
return
LOG.debug("deleting qos_handle %s, skip_filter %s", qid, skip_filter)
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
err = TrafficClass.delete_class(intf, qid, skip_filter)
if err == 0:
self._id_manager.release_idx(qid)
else:
LOG.error('error deleting class %d, not releasing idx', qid)
return
def read_all_state(self):
LOG.debug("read_all_state")
st = {}
apn_qid_list = set()
ul_qid_list = TrafficClass.read_all_classes(self._uplink)
dl_qid_list = TrafficClass.read_all_classes(self._downlink)
for (d, qid_list) in (
(FlowMatch.UPLINK, ul_qid_list),
(FlowMatch.DOWNLINK, dl_qid_list),
):
for qid_tuple in qid_list:
qid, pqid = qid_tuple
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.debug("qid %d out of range: (%d - %d)", qid, self._start_idx, self._max_idx)
continue
apn_qid = pqid if pqid != self._max_idx else 0
st[qid] = {
'direction': d,
'ambr_qid': apn_qid,
}
if apn_qid != 0:
apn_qid_list.add(apn_qid)
self._id_manager.restore_state(st)
return st, apn_qid_list
def same_qos_config(
self, d: FlowMatch.Direction,
qid1: int, qid2: int,
) -> bool:
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
config1 = TrafficClass.get_class_rate(intf, qid1)
config2 = TrafficClass.get_class_rate(intf, qid2)
return config1 == config2
| 36.228792
| 106
| 0.59597
|
import logging
import subprocess
from typing import Optional
from lte.protos.policydb_pb2 import FlowMatch
from .tc_ops_cmd import TcOpsCmd, argSplit, run_cmd
from .tc_ops_pyroute2 import TcOpsPyRoute2
from .types import QosInfo
from .utils import IdManager
LOG = logging.getLogger('pipelined.qos.qos_tc_impl')
ROOT_QID = 65534
DEFAULT_RATE = '80Kbit'
DEFAULT_INTF_SPEED = '1000'
class TrafficClass:
tc_ops = None
@staticmethod
def delete_class(intf: str, qid: int, skip_filter=False) -> int:
qid_hex = hex(qid)
if not skip_filter:
TrafficClass.tc_ops.del_filter(intf, qid_hex, qid_hex)
return TrafficClass.tc_ops.del_htb(intf, qid_hex)
@staticmethod
def create_class(
intf: str, qid: int, max_bw: int, rate=None,
parent_qid=None, skip_filter=False,
) -> int:
if not rate:
rate = DEFAULT_RATE
if not parent_qid:
parent_qid = ROOT_QID
if parent_qid == qid:
LOG.error('parent and self qid equal, setting parent_qid to root')
parent_qid = ROOT_QID
qid_hex = hex(qid)
parent_qid_hex = '1:' + hex(parent_qid)
err = TrafficClass.tc_ops.create_htb(intf, qid_hex, max_bw, rate, parent_qid_hex)
if err < 0 or skip_filter:
return err
return TrafficClass.tc_ops.create_filter(intf, qid_hex, qid_hex)
@staticmethod
def init_qdisc(
intf: str, show_error=False, enable_pyroute2=False,
default_gbr=DEFAULT_RATE,
) -> int:
if TrafficClass.tc_ops is None:
if enable_pyroute2:
TrafficClass.tc_ops = TcOpsPyRoute2()
else:
TrafficClass.tc_ops = TcOpsCmd()
cmd_list = []
speed = DEFAULT_INTF_SPEED
qid_hex = hex(ROOT_QID)
fn = "/sys/class/net/{intf}/speed".format(intf=intf)
try:
with open(fn, encoding="utf-8") as f:
speed = f.read().strip()
except OSError:
LOG.error('unable to read speed from %s defaulting to %s', fn, speed)
qdisc_type = TrafficClass._get_qdisc_type(intf)
if qdisc_type != "htb":
qdisc_cmd = "tc qdisc add dev {intf} root handle 1: htb".format(intf=intf)
cmd_list.append(qdisc_cmd)
LOG.info("Created root qdisc")
parent_q_cmd = "tc class replace dev {intf} parent 1: classid 1:{root_qid} htb "
parent_q_cmd += "rate {speed}Mbit ceil {speed}Mbit"
parent_q_cmd = parent_q_cmd.format(intf=intf, root_qid=qid_hex, speed=speed)
cmd_list.append(parent_q_cmd)
tc_cmd = "tc class replace dev {intf} parent 1:{root_qid} classid 1:1 htb "
tc_cmd += "rate {rate} ceil {speed}Mbit"
tc_cmd = tc_cmd.format(
intf=intf, root_qid=qid_hex, rate=default_gbr,
speed=speed,
)
cmd_list.append(tc_cmd)
return run_cmd(cmd_list, show_error)
@staticmethod
def read_all_classes(intf: str):
qid_list = []
# 1599b cburst 1375b \nclass htb 1:fffe root rate 1Gbit ceil 1Gbit \
# burst 1375b cburst 1375b \n'
tc_cmd = "tc class show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
for ln in output.decode('utf-8').split("\n"):
ln = ln.strip()
if not ln:
continue
tok = ln.split()
if len(tok) < 5:
continue
if tok[1] != "htb":
continue
if tok[3] == 'root':
continue
qid_str = tok[2].split(':')[1]
qid = int(qid_str, 16)
pqid_str = tok[4].split(':')[1]
pqid = int(pqid_str, 16)
qid_list.append((qid, pqid))
LOG.debug("TC-dump: %s qid %d pqid %d", ln, qid, pqid)
except subprocess.CalledProcessError as e:
LOG.error('failed extracting classids from tc %s', e)
return qid_list
@staticmethod
def dump_class_state(intf: str, qid: int):
qid_hex = hex(qid)
tc_cmd = "tc -s -d class show dev {} classid 1:{}".format(
intf,
qid_hex,
)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def dump_root_class_stats(intf: str):
tc_cmd = "tc -s -s -d q ls dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def get_class_rate(intf: str, qid: int) -> Optional[str]:
qid_hex = hex(qid)
tc_cmd = "tc class show dev {} classid 1:{}".format(intf, qid_hex)
args = argSplit(tc_cmd)
try:
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
config = output.split("rate")
try:
return config[1]
except IndexError:
LOG.error("could not find rate: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
@staticmethod
def _get_qdisc_type(intf: str) -> Optional[str]:
tc_cmd = "tc qdisc show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
config = output.split()
try:
return config[1]
except IndexError:
LOG.error("could not qdisc type: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
class TCManager(object):
def __init__(
self,
datapath,
config,
) -> None:
self._datapath = datapath
self._uplink = config['nat_iface']
self._downlink = config['enodeb_iface']
self._max_rate = config["qos"]["max_rate"]
self._gbr_rate = config["qos"].get("gbr_rate", DEFAULT_RATE)
self._enable_pyroute2 = config["qos"].get('enable_pyroute2', False)
self._start_idx, self._max_idx = (
config['qos']['linux_tc']['min_idx'],
config['qos']['linux_tc']['max_idx'],
)
self._id_manager = IdManager(self._start_idx, self._max_idx)
self._initialized = True
LOG.info(
"Init LinuxTC module uplink:%s downlink:%s",
config['nat_iface'], config['enodeb_iface'],
)
def destroy(self):
if not TrafficClass.tc_ops:
LOG.info("TC not initialized, skip destroying existing qos classes")
return
LOG.info("destroying existing leaf qos classes")
p_qids = set()
for intf in [self._uplink, self._downlink]:
qid_list = TrafficClass.read_all_classes(intf)
for qid_tuple in qid_list:
(qid, pqid) = qid_tuple
if self._start_idx <= qid < (self._max_idx - 1):
LOG.info("Attempting to delete class idx %d", qid)
TrafficClass.delete_class(intf, qid)
if self._start_idx <= pqid < (self._max_idx - 1):
p_qids.add((intf, pqid))
LOG.info("destroying existing parent classes")
for p_qid_tuple in p_qids:
(intf, pqid) = p_qid_tuple
LOG.info("Attempting to delete parent class idx %d", pqid)
TrafficClass.delete_class(intf, pqid, skip_filter=True)
LOG.info("destroying All qos classes: done")
def setup(self):
TrafficClass.init_qdisc(
self._uplink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
TrafficClass.init_qdisc(
self._downlink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
def get_action_instruction(self, qid: int):
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, no action/inst returned", qid)
return None, None
parser = self._datapath.ofproto_parser
return parser.OFPActionSetField(pkt_mark=qid), None, qid
def create_class_async(
self, d: FlowMatch.Direction, qos_info: QosInfo,
qid,
parent, skip_filter, cleanup_rule,
):
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
gbr = qos_info.gbr
if gbr is None:
gbr = self._gbr_rate
err = TrafficClass.create_class(
intf, qid, qos_info.mbr,
rate=gbr,
parent_qid=parent,
skip_filter=skip_filter,
)
err_no = int(err)
if err_no < 0:
if cleanup_rule:
cleanup_rule()
LOG.error("qos create error: qid %d err %d", qid, err_no)
return
LOG.debug("create done: if: %s qid %d err %s", intf, qid, err_no)
def add_qos(
self, d: FlowMatch.Direction, qos_info: QosInfo,
cleanup_rule=None, parent=None, skip_filter=False,
) -> int:
LOG.debug("add QoS: %s", qos_info)
qid = self._id_manager.allocate_idx()
self.create_class_async(
d, qos_info,
qid, parent, skip_filter, cleanup_rule,
)
LOG.debug("assigned qid: %d", qid)
return qid
def remove_qos(
self, qid: int, d: FlowMatch.Direction,
recovery_mode=False, skip_filter=False,
):
if not self._initialized and not recovery_mode:
return
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, removal failed", qid)
return
LOG.debug("deleting qos_handle %s, skip_filter %s", qid, skip_filter)
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
err = TrafficClass.delete_class(intf, qid, skip_filter)
if err == 0:
self._id_manager.release_idx(qid)
else:
LOG.error('error deleting class %d, not releasing idx', qid)
return
def read_all_state(self):
LOG.debug("read_all_state")
st = {}
apn_qid_list = set()
ul_qid_list = TrafficClass.read_all_classes(self._uplink)
dl_qid_list = TrafficClass.read_all_classes(self._downlink)
for (d, qid_list) in (
(FlowMatch.UPLINK, ul_qid_list),
(FlowMatch.DOWNLINK, dl_qid_list),
):
for qid_tuple in qid_list:
qid, pqid = qid_tuple
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.debug("qid %d out of range: (%d - %d)", qid, self._start_idx, self._max_idx)
continue
apn_qid = pqid if pqid != self._max_idx else 0
st[qid] = {
'direction': d,
'ambr_qid': apn_qid,
}
if apn_qid != 0:
apn_qid_list.add(apn_qid)
self._id_manager.restore_state(st)
return st, apn_qid_list
def same_qos_config(
self, d: FlowMatch.Direction,
qid1: int, qid2: int,
) -> bool:
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
config1 = TrafficClass.get_class_rate(intf, qid1)
config2 = TrafficClass.get_class_rate(intf, qid2)
return config1 == config2
| true
| true
|
790453b226512eb02d4203b856bf40e88e19d5cb
| 348
|
py
|
Python
|
standard/client.py
|
Lasx/gb688_downloader
|
7e9711e7784c15bcd15a6129ab1fef99c8d44f23
|
[
"Apache-2.0"
] | 119
|
2020-02-27T04:27:15.000Z
|
2022-03-01T07:02:34.000Z
|
standard/client.py
|
Lasx/gb688_downloader
|
7e9711e7784c15bcd15a6129ab1fef99c8d44f23
|
[
"Apache-2.0"
] | 12
|
2020-03-26T04:50:13.000Z
|
2021-11-24T04:00:08.000Z
|
standard/client.py
|
Lasx/gb688_downloader
|
7e9711e7784c15bcd15a6129ab1fef99c8d44f23
|
[
"Apache-2.0"
] | 24
|
2020-03-11T22:50:24.000Z
|
2022-03-25T08:13:56.000Z
|
from . import GB, HDB
from typing import Literal
class Client:
def __init__(self, t: Literal["gb", "hbba", "dbba"]):
self.type = t
def create(self):
if self.type == "gb":
return GB()
elif self.type == "hb":
return HDB("hbba")
elif self.type == "db":
return HDB("dbba")
| 21.75
| 57
| 0.511494
|
from . import GB, HDB
from typing import Literal
class Client:
def __init__(self, t: Literal["gb", "hbba", "dbba"]):
self.type = t
def create(self):
if self.type == "gb":
return GB()
elif self.type == "hb":
return HDB("hbba")
elif self.type == "db":
return HDB("dbba")
| true
| true
|
7904549cf9a9e4c5cbe12df270bf0e7f8f9916c6
| 490
|
py
|
Python
|
scripts/run_pyright.py
|
gentle-knight-13/auto-derby
|
70593fea2c3d803487e6e0d2ce0c40d60bc6304d
|
[
"MIT"
] | null | null | null |
scripts/run_pyright.py
|
gentle-knight-13/auto-derby
|
70593fea2c3d803487e6e0d2ce0c40d60bc6304d
|
[
"MIT"
] | null | null | null |
scripts/run_pyright.py
|
gentle-knight-13/auto-derby
|
70593fea2c3d803487e6e0d2ce0c40d60bc6304d
|
[
"MIT"
] | null | null | null |
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import os
import sys
import subprocess
def main():
subprocess.call(
["npx", "pyright"],
env={
**os.environ,
"PATH": os.path.pathsep.join(
(
os.path.dirname(sys.executable),
os.getenv("PATH") or "",
)
),
},
shell=True,
)
if __name__ == "__main__":
main()
| 15.806452
| 52
| 0.453061
|
from __future__ import annotations
import os
import sys
import subprocess
def main():
subprocess.call(
["npx", "pyright"],
env={
**os.environ,
"PATH": os.path.pathsep.join(
(
os.path.dirname(sys.executable),
os.getenv("PATH") or "",
)
),
},
shell=True,
)
if __name__ == "__main__":
main()
| true
| true
|
790454bd666d9188fb3392f897386bc556ccd494
| 1,006
|
py
|
Python
|
17_process_thread/46_why_need_ThreadLocal.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
17_process_thread/46_why_need_ThreadLocal.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
17_process_thread/46_why_need_ThreadLocal.py
|
hemuke/python
|
bc99f2b5aee997083ae31f59a2b33db48c8255f3
|
[
"Apache-2.0"
] | null | null | null |
"""
多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的
"""
import threading
def do_sth(arg1, arg2, arg3):
local_var1 = arg1
local_var2 = arg2
local_var3 = arg3
fun1(local_var1, local_var2, local_var3)
fun2(local_var1, local_var2, local_var3)
fun3(local_var1, local_var2, local_var3)
def fun1(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun2(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun3(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
t1 = threading.Thread(target=do_sth, args=('a', 'b', 'c'))
t2 = threading.Thread(target=do_sth, args=('d', 'e', 'f'))
t1.start()
t2.start()
| 27.189189
| 78
| 0.61332
|
import threading
def do_sth(arg1, arg2, arg3):
local_var1 = arg1
local_var2 = arg2
local_var3 = arg3
fun1(local_var1, local_var2, local_var3)
fun2(local_var1, local_var2, local_var3)
fun3(local_var1, local_var2, local_var3)
def fun1(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun2(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun3(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
t1 = threading.Thread(target=do_sth, args=('a', 'b', 'c'))
t2 = threading.Thread(target=do_sth, args=('d', 'e', 'f'))
t1.start()
t2.start()
| true
| true
|
79045531c936fead982568606c85276efcfde4ca
| 2,393
|
py
|
Python
|
tests/test_it.py
|
eugenehp/jingtrang
|
245df07d40773fc09fa51def13149efe5aa53e4f
|
[
"MIT"
] | null | null | null |
tests/test_it.py
|
eugenehp/jingtrang
|
245df07d40773fc09fa51def13149efe5aa53e4f
|
[
"MIT"
] | null | null | null |
tests/test_it.py
|
eugenehp/jingtrang
|
245df07d40773fc09fa51def13149efe5aa53e4f
|
[
"MIT"
] | null | null | null |
import subprocess
import py
import pytest
@pytest.fixture(
params=["tests/dataset-rdstmc", "tests/dataset-wiki", "tests/dataset-rntutor"]
)
def datasetdir(request):
return py.path.local(request.param)
@pytest.fixture
def messages(datasetdir):
msgdir = datasetdir.join("messages")
return msgdir.listdir(fil="*.xml")
@pytest.fixture
def rncdir(datasetdir):
return datasetdir.join("schemas")
@pytest.fixture
def rootrnc(rncdir):
return rncdir.join("root.rnc")
@pytest.fixture
def rncschemas(rootrnc):
return rootrnc.dirpath().listdir("*.rnc")
def test_validate_by_rnc_onemsg(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rnc_allmsgs(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
def test_rnc2rng(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
"""RNG section ========================
"""
@pytest.fixture
def rngschemas(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
return rngnames
@pytest.fixture
def rootrng(rngschemas):
rootschema = rngschemas[0].new(basename="root.rng")
assert rootschema in rngschemas
rootschema.ensure()
return rootschema
def test_validate_by_rng_onemsg(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rng_allmsgs(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
| 23.93
| 82
| 0.695779
|
import subprocess
import py
import pytest
@pytest.fixture(
params=["tests/dataset-rdstmc", "tests/dataset-wiki", "tests/dataset-rntutor"]
)
def datasetdir(request):
return py.path.local(request.param)
@pytest.fixture
def messages(datasetdir):
msgdir = datasetdir.join("messages")
return msgdir.listdir(fil="*.xml")
@pytest.fixture
def rncdir(datasetdir):
return datasetdir.join("schemas")
@pytest.fixture
def rootrnc(rncdir):
return rncdir.join("root.rnc")
@pytest.fixture
def rncschemas(rootrnc):
return rootrnc.dirpath().listdir("*.rnc")
def test_validate_by_rnc_onemsg(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rnc_allmsgs(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
def test_rnc2rng(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
@pytest.fixture
def rngschemas(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
return rngnames
@pytest.fixture
def rootrng(rngschemas):
rootschema = rngschemas[0].new(basename="root.rng")
assert rootschema in rngschemas
rootschema.ensure()
return rootschema
def test_validate_by_rng_onemsg(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rng_allmsgs(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
| true
| true
|
7904569a26f2b9f75916725b719f6cab3b016a5f
| 2,363
|
py
|
Python
|
examples/AdAccountAdCreativesPostCreateCarouselCallToActionAppInstall.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 576
|
2018-05-01T19:09:32.000Z
|
2022-03-31T11:45:11.000Z
|
examples/AdAccountAdCreativesPostCreateCarouselCallToActionAppInstall.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 217
|
2018-05-03T07:31:59.000Z
|
2022-03-29T14:19:52.000Z
|
examples/AdAccountAdCreativesPostCreateCarouselCallToActionAppInstall.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 323
|
2018-05-01T20:32:26.000Z
|
2022-03-29T07:05:12.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.adcreative import AdCreative
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<AD_ACCOUNT_ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'name': 'Carousel app ad',
'object_story_spec': {'page_id':'<pageID>','link_data':{'message':'My message','link':'http://www.example.com/appstoreurl','caption':'WWW.ITUNES.COM','name':'The link name','description':'The link description','child_attachments':[{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}}],'multi_share_optimized':true}},
}
print AdAccount(id).create_ad_creative(
fields=fields,
params=params,
)
| 59.075
| 865
| 0.752857
|
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.adcreative import AdCreative
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<AD_ACCOUNT_ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'name': 'Carousel app ad',
'object_story_spec': {'page_id':'<pageID>','link_data':{'message':'My message','link':'http://www.example.com/appstoreurl','caption':'WWW.ITUNES.COM','name':'The link name','description':'The link description','child_attachments':[{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}},{'link':'http://www.example.com/appstoreurl','image_hash':'<imageHash>','call_to_action':{'type':'USE_MOBILE_APP','value':{'app_link':'<deepLink>'}}}],'multi_share_optimized':true}},
}
print AdAccount(id).create_ad_creative(
fields=fields,
params=params,
)
| false
| true
|
79045746d76c6cba54bceacab6b61a83a5825c4e
| 1,328
|
py
|
Python
|
components/contrib/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
components/contrib/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
components/contrib/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <alexey.volkov@ark-kun.com>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <alexey.volkov@ark-kun.com>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
)
| 39.058824
| 182
| 0.734187
|
from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <alexey.volkov@ark-kun.com>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
)
| true
| true
|
79045777ea1ab9f9218effad7ad0135b3b82b730
| 254
|
py
|
Python
|
src/packages/tests/test_company_details.py
|
fbeltrao/az-func-gh-deployment
|
92d3bdfc3f9e3c554dc76a46f9dde28d5e04b98e
|
[
"MIT"
] | null | null | null |
src/packages/tests/test_company_details.py
|
fbeltrao/az-func-gh-deployment
|
92d3bdfc3f9e3c554dc76a46f9dde28d5e04b98e
|
[
"MIT"
] | 1
|
2021-12-26T15:30:40.000Z
|
2021-12-26T15:30:40.000Z
|
src/packages/tests/test_company_details.py
|
fbeltrao/az-func-gh-deployment
|
92d3bdfc3f9e3c554dc76a46f9dde28d5e04b98e
|
[
"MIT"
] | null | null | null |
import pytest
from contoso import get_company_name, get_company_address
def test_get_company_name():
assert get_company_name() == "Contoso"
def test_get_company_address():
assert get_company_address() == "Contosostrasse 1, Zurich, Switzerland"
| 28.222222
| 75
| 0.791339
|
import pytest
from contoso import get_company_name, get_company_address
def test_get_company_name():
assert get_company_name() == "Contoso"
def test_get_company_address():
assert get_company_address() == "Contosostrasse 1, Zurich, Switzerland"
| true
| true
|
790457b446819e1456316a24dadb693fe44bdd5f
| 7,761
|
py
|
Python
|
Projects/1_Sudoku/solution.py
|
justinlnx/artificial-intelligence
|
5d742d49bc43adf5e6a17ba57a8fef5831ba2ed4
|
[
"MIT"
] | null | null | null |
Projects/1_Sudoku/solution.py
|
justinlnx/artificial-intelligence
|
5d742d49bc43adf5e6a17ba57a8fef5831ba2ed4
|
[
"MIT"
] | null | null | null |
Projects/1_Sudoku/solution.py
|
justinlnx/artificial-intelligence
|
5d742d49bc43adf5e6a17ba57a8fef5831ba2ed4
|
[
"MIT"
] | null | null | null |
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
# TODO: Update the unit list to add the new diagonal units
diagonal1 = [['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9']]
diagonal2 = [['A9', 'B8', 'C7', 'D6', 'E5', 'F4', 'G3', 'H2', 'I1']]
unitlist = unitlist + diagonal1 + diagonal2
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
The naked twins strategy says that if you have two or more unallocated boxes
in a unit and there are only two digits that can go in those two boxes, then
those two digits can be eliminated from the possible assignments of all other
boxes in the same unit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
See Also
--------
Pseudocode for this algorithm on github:
https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md
"""
"""
out = values.copy()
len_2_boxes = [box for box in values if len(values[box]) == 2]
for boxA in len_2_boxes:
boxAPeers = peers[boxA]
for boxB in boxAPeers:
if values[boxA] == values[boxB]:
intersect = [val for val in boxAPeers if val in peers[boxB]]
for peer in intersect:
out[peer] = out[peer].replace(values[boxA], '')
return out
"""
out = values.copy()
for boxA in values:
boxAPeers = peers[boxA]
for boxB in boxAPeers:
if values[boxA] == values[boxB] and len(values[boxA]) == 2:
intersect = [val for val in boxAPeers if val in peers[boxB]]
for peer in intersect:
for digit in values[boxA]:
out[peer] = out[peer].replace(digit, '')
return out
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| 33.309013
| 113
| 0.633552
|
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
diagonal1 = [['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9']]
diagonal2 = [['A9', 'B8', 'C7', 'D6', 'E5', 'F4', 'G3', 'H2', 'I1']]
unitlist = unitlist + diagonal1 + diagonal2
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
out = values.copy()
for boxA in values:
boxAPeers = peers[boxA]
for boxB in boxAPeers:
if values[boxA] == values[boxB] and len(values[boxA]) == 2:
intersect = [val for val in boxAPeers if val in peers[boxB]]
for peer in intersect:
for digit in values[boxA]:
out[peer] = out[peer].replace(digit, '')
return out
def eliminate(values):
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
values = reduce_puzzle(values)
if values is False:
return False values[s]) == 1 for s in boxes):
return values n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| true
| true
|
790457e163f06b17aa5b58b9b26ab399be037c71
| 7,258
|
py
|
Python
|
tests/accsr/test_remote_storage.py
|
AnesBenmerzoug/accsr
|
0485509854fb8ff4919a1d321f57773850a4c98e
|
[
"MIT"
] | null | null | null |
tests/accsr/test_remote_storage.py
|
AnesBenmerzoug/accsr
|
0485509854fb8ff4919a1d321f57773850a4c98e
|
[
"MIT"
] | null | null | null |
tests/accsr/test_remote_storage.py
|
AnesBenmerzoug/accsr
|
0485509854fb8ff4919a1d321f57773850a4c98e
|
[
"MIT"
] | null | null | null |
import logging
import os
from typing import Generator
import pytest
@pytest.fixture(scope="module", autouse=True)
def change_to_resources_dir(test_resources, request):
os.chdir(test_resources)
yield
os.chdir(request.config.invocation_dir)
@pytest.fixture()
def test_filename(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a file to remote storage, yields its filename and then deletes it from remote storage"""
filename = request.param
storage.push_file(filename)
yield filename
storage.delete(filename)
NAME_COLLISIONS_DIR_NAME = "storage_name_collisions"
@pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
"""
Pushes files and dirs with colliding names to remote storage, yields files pushed
and deletes everything at cleanup
"""
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
yield pushed_objects
storage.delete(NAME_COLLISIONS_DIR_NAME)
@pytest.fixture()
def test_dirname(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a directory to remote storage, yields its name and then deletes it from remote storage"""
dirname = request.param
storage.push_directory(dirname)
yield dirname
storage.delete(dirname)
def test_delete_no_matches(storage, caplog):
with caplog.at_level(logging.WARNING):
deleted_files = storage.delete("there is no such file")
assert len(deleted_files) == 0
assert "Not deleting anything" in caplog.text
def test_delete_file(storage):
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert len(storage.list_objects("sample.txt")) == 0
def test_delete_with_base_path(storage):
base_path = "base_path"
storage.set_remote_base_path(base_path)
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert deleted_objects[0].name == f"{base_path}/sample.txt"
def test_delete_dir(storage):
storage.push_directory("sample_dir", overwrite_existing=True)
assert len(storage.list_objects("sample_dir")) == 2
deleted_objects = storage.delete("sample_dir")
assert len(deleted_objects) == 2
assert len(storage.list_objects("sample_dir")) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_empty_base_path(storage, test_filename):
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
# we need lstrip because s3 paths (and names) start with "/" while google storage paths start without it...
assert remote_objects[0].name.lstrip("/") == test_filename
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_nonempty_base_path(storage, test_filename):
base_path = "base_path"
storage.set_remote_base_path(base_path)
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
assert remote_objects[0].name.lstrip("/") == f"{base_path}/{test_filename}"
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_push_directory(storage, test_dirname):
remote_objects = storage.push(test_dirname)
assert len(remote_objects) == 2
assert len(storage.list_objects(test_dirname)) == 2
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_push_non_existing(storage, file_or_dir_name):
with pytest.raises(
FileNotFoundError, match="does not refer to a file or directory"
):
storage.push(file_or_dir_name)
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_filename, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, test_filename))
pulled_files = storage.pull(test_filename)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file_to_existing_dir_path(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
local_base_dir.mkdir(test_filename)
with pytest.raises(
FileExistsError,
match="Cannot pull file to a path which is an existing directory:",
):
storage.pull(test_filename, local_base_dir=local_base_dir)
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_pull_dir(storage, test_dirname, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_dirname, local_base_dir=local_base_dir)
assert os.path.isdir(os.path.join(local_base_dir, test_dirname))
assert len(os.listdir(os.path.join(local_base_dir, test_dirname))) == 2
pulled_files = storage.pull(test_dirname)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_pull_non_existing(storage, file_or_dir_name, caplog):
with caplog.at_level(logging.WARNING):
pulled_files = storage.pull(file_or_dir_name)
assert len(pulled_files) == 0
assert "No such remote file or directory" in caplog.text
def test_name_collisions_pulling_properly(setup_name_collision, storage, tmpdir):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
local_base_dir = tmpdir.mkdir("remote_storage")
colliding_file_name = "file.txt.collision"
colliding_dir_name = "dir_name_collision"
storage.pull("file.txt", local_base_dir=local_base_dir)
storage.pull("dir_name", local_base_dir=local_base_dir)
assert not os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
assert os.path.isfile(os.path.join(local_base_dir, "file.txt"))
assert not os.path.isdir(os.path.join(local_base_dir, colliding_dir_name))
assert os.path.isdir(os.path.join(local_base_dir, "dir_name"))
storage.pull(colliding_file_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
storage.pull(colliding_dir_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_dir_name, "file.txt"))
def test_name_collisions_deleting_properly(setup_name_collision, storage):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
storage.delete("file.txt")
remaining_object_names = [
obj.name.lstrip("/").lstrip(f"{NAME_COLLISIONS_DIR_NAME}/")
for obj in storage.list_objects("")
]
assert "file.txt" not in remaining_object_names
assert "file.txt.collision" in remaining_object_names
assert "dir_name/file.txt" in remaining_object_names
# TODO or not TODO: many cases are missing - checking names, testing overwriting.
| 33.601852
| 111
| 0.74194
|
import logging
import os
from typing import Generator
import pytest
@pytest.fixture(scope="module", autouse=True)
def change_to_resources_dir(test_resources, request):
os.chdir(test_resources)
yield
os.chdir(request.config.invocation_dir)
@pytest.fixture()
def test_filename(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
filename = request.param
storage.push_file(filename)
yield filename
storage.delete(filename)
NAME_COLLISIONS_DIR_NAME = "storage_name_collisions"
@pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
yield pushed_objects
storage.delete(NAME_COLLISIONS_DIR_NAME)
@pytest.fixture()
def test_dirname(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
dirname = request.param
storage.push_directory(dirname)
yield dirname
storage.delete(dirname)
def test_delete_no_matches(storage, caplog):
with caplog.at_level(logging.WARNING):
deleted_files = storage.delete("there is no such file")
assert len(deleted_files) == 0
assert "Not deleting anything" in caplog.text
def test_delete_file(storage):
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert len(storage.list_objects("sample.txt")) == 0
def test_delete_with_base_path(storage):
base_path = "base_path"
storage.set_remote_base_path(base_path)
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert deleted_objects[0].name == f"{base_path}/sample.txt"
def test_delete_dir(storage):
storage.push_directory("sample_dir", overwrite_existing=True)
assert len(storage.list_objects("sample_dir")) == 2
deleted_objects = storage.delete("sample_dir")
assert len(deleted_objects) == 2
assert len(storage.list_objects("sample_dir")) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_empty_base_path(storage, test_filename):
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
assert remote_objects[0].name.lstrip("/") == test_filename
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_nonempty_base_path(storage, test_filename):
base_path = "base_path"
storage.set_remote_base_path(base_path)
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
assert remote_objects[0].name.lstrip("/") == f"{base_path}/{test_filename}"
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_push_directory(storage, test_dirname):
remote_objects = storage.push(test_dirname)
assert len(remote_objects) == 2
assert len(storage.list_objects(test_dirname)) == 2
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_push_non_existing(storage, file_or_dir_name):
with pytest.raises(
FileNotFoundError, match="does not refer to a file or directory"
):
storage.push(file_or_dir_name)
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_filename, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, test_filename))
pulled_files = storage.pull(test_filename)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file_to_existing_dir_path(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
local_base_dir.mkdir(test_filename)
with pytest.raises(
FileExistsError,
match="Cannot pull file to a path which is an existing directory:",
):
storage.pull(test_filename, local_base_dir=local_base_dir)
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_pull_dir(storage, test_dirname, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_dirname, local_base_dir=local_base_dir)
assert os.path.isdir(os.path.join(local_base_dir, test_dirname))
assert len(os.listdir(os.path.join(local_base_dir, test_dirname))) == 2
pulled_files = storage.pull(test_dirname)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_pull_non_existing(storage, file_or_dir_name, caplog):
with caplog.at_level(logging.WARNING):
pulled_files = storage.pull(file_or_dir_name)
assert len(pulled_files) == 0
assert "No such remote file or directory" in caplog.text
def test_name_collisions_pulling_properly(setup_name_collision, storage, tmpdir):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
local_base_dir = tmpdir.mkdir("remote_storage")
colliding_file_name = "file.txt.collision"
colliding_dir_name = "dir_name_collision"
storage.pull("file.txt", local_base_dir=local_base_dir)
storage.pull("dir_name", local_base_dir=local_base_dir)
assert not os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
assert os.path.isfile(os.path.join(local_base_dir, "file.txt"))
assert not os.path.isdir(os.path.join(local_base_dir, colliding_dir_name))
assert os.path.isdir(os.path.join(local_base_dir, "dir_name"))
storage.pull(colliding_file_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
storage.pull(colliding_dir_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_dir_name, "file.txt"))
def test_name_collisions_deleting_properly(setup_name_collision, storage):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
storage.delete("file.txt")
remaining_object_names = [
obj.name.lstrip("/").lstrip(f"{NAME_COLLISIONS_DIR_NAME}/")
for obj in storage.list_objects("")
]
assert "file.txt" not in remaining_object_names
assert "file.txt.collision" in remaining_object_names
assert "dir_name/file.txt" in remaining_object_names
| true
| true
|
7904580d93de3c3f61a13af6030eb25dbfa0ba0e
| 16,297
|
py
|
Python
|
cptac/pancan/file_download.py
|
PayneLab/cptac
|
531ec27a618270a2405bf876443fa58d0362b3c2
|
[
"Apache-2.0"
] | 53
|
2019-05-30T02:05:04.000Z
|
2022-03-16T00:38:58.000Z
|
cptac/pancan/file_download.py
|
PayneLab/cptac
|
531ec27a618270a2405bf876443fa58d0362b3c2
|
[
"Apache-2.0"
] | 20
|
2020-02-16T23:50:43.000Z
|
2021-09-26T10:07:59.000Z
|
cptac/pancan/file_download.py
|
PayneLab/cptac
|
531ec27a618270a2405bf876443fa58d0362b3c2
|
[
"Apache-2.0"
] | 17
|
2019-09-27T20:55:09.000Z
|
2021-10-19T07:18:06.000Z
|
# Copyright 2018 Samuel Payne sam_payne@byu.edu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239", # Prospective Breast BI Acetylome
"phosphoproteome": "PDC000121", # Prospective BRCA Phosphoproteome S039-2
"proteome": "PDC000120", # Prospective BRCA Proteome S039-1
},
"pdcccrcc": {
"phosphoproteome": "PDC000128", # CPTAC CCRCC Discovery Study - Phosphoproteme S044-2
"proteome": "PDC000127", # CPTAC CCRCC Discovery Study - Proteome S044-1
},
"pdccoad": {
"phosphoproteome": "PDC000117", # Prospective COAD Phosphoproteome S037-3
"proteome": "PDC000116", # Prospective COAD Proteome S037-2
},
"pdcgbm": {
"acetylome": "PDC000245", # CPTAC GBM Discovery Study - Acetylome
"phosphoproteome": "PDC000205", # CPTAC GBM Discovery Study - Phosphoproteome
"proteome": "PDC000204", # CPTAC GBM Discovery Study - Proteome
},
"pdchnscc": {
"phosphoproteome": "PDC000222", # CPTAC HNSCC Discovery Study - Phosphoproteome
"proteome": "PDC000221", # CPTAC HNSCC Discovery Study - Proteome
},
"pdclscc": {
"acetylome": "PDC000233", # CPTAC LSCC Discovery Study - Acetylome
"phosphoproteome": "PDC000232", # CPTAC LSCC Discovery Study - Phosphoproteome
"proteome": "PDC000234", # CPTAC LSCC Discovery Study - Proteome
"ubiquitylome": "PDC000237", # CPTAC LSCC Discovery Study - Ubiquitylome
},
"pdcluad": {
"acetylome": "PDC000224", # CPTAC LUAD Discovery Study - Acetylome
"phosphoproteome": "PDC000149", # CPTAC LUAD Discovery Study - Phosphoproteome
"proteome": "PDC000153", # CPTAC LUAD Discovery Study - Proteome
},
"pdcov": {
"phosphoproteome": "PDC000119", # Prospective OV Phosphoproteome S038-3
"proteome": "PDC000118", # Prospective OV Proteome S038-2
},
"pdcpdac": {
"proteome": "PDC000270", # CPTAC PDAC Discovery Study - Proteome
"phosphoproteome": "PDC000271", # CPTAC PDAC Discovery Study - Phosphoproteome
},
"pdcucec": {
"acetylome": "PDC000226", # CPTAC UCEC Discovery Study - Acetylome
"phosphoproteome": "PDC000126", # UCEC Discovery - Phosphoproteome S043-2
"proteome": "PDC000125", # UCEC Discovery - Proteome S043-1
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca': # pdcbrca is the only dataset that doesn't need a mapping file for PDC
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
def download_pdc_id(pdc_id, _download_msg=True):
"""Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
"""
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
"""Download data for the specified cancer type from the PDC."""
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
# We'll combine all the clinical tables in case there are differences
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
"""Download PDC clinical data for a particular study."""
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
"""Download PDC biospecimen data for a particular study."""
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
"""Download PDC quantitative data for a particular study."""
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
"""Send a GraphQL query to the PDC and return the results."""
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
"""Check that the ids in the download function's STUDY_IDS_MAP match up."""
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
| 38.894988
| 241
| 0.666196
|
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239",
"phosphoproteome": "PDC000121",
"proteome": "PDC000120",
},
"pdcccrcc": {
"phosphoproteome": "PDC000128",
"proteome": "PDC000127",
},
"pdccoad": {
"phosphoproteome": "PDC000117",
"proteome": "PDC000116",
},
"pdcgbm": {
"acetylome": "PDC000245",
"phosphoproteome": "PDC000205",
"proteome": "PDC000204",
},
"pdchnscc": {
"phosphoproteome": "PDC000222",
"proteome": "PDC000221",
},
"pdclscc": {
"acetylome": "PDC000233",
"phosphoproteome": "PDC000232",
"proteome": "PDC000234",
"ubiquitylome": "PDC000237",
},
"pdcluad": {
"acetylome": "PDC000224",
"phosphoproteome": "PDC000149",
"proteome": "PDC000153",
},
"pdcov": {
"phosphoproteome": "PDC000119",
"proteome": "PDC000118",
},
"pdcpdac": {
"proteome": "PDC000270",
"phosphoproteome": "PDC000271",
},
"pdcucec": {
"acetylome": "PDC000226",
"phosphoproteome": "PDC000126",
"proteome": "PDC000125",
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca':
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
def download_pdc_id(pdc_id, _download_msg=True):
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
| true
| true
|
790459598df3387a1374776d11f59ba20fc062de
| 14,433
|
py
|
Python
|
astropy/units/quantity_helper/helpers.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/units/quantity_helper/helpers.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T14:18:55.000Z
|
2020-01-21T10:36:05.000Z
|
astropy/units/quantity_helper/helpers.py
|
PriyankaH21/astropy
|
159fb9637ce4acdc60329d20517ed3dc7ba79581
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
from ..core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from ..si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from ..si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from ..si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# As found out in gh-7058, some numpy 1.13 conda installations also provide
# np.erf, even though upstream doesn't have it. We include it if present.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
| 35.461916
| 79
| 0.677406
|
from fractions import Fraction
import numpy as np
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
from ..core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
if unit2 is None:
if unit1 is None:
return converters, dimensionless_unscaled
changeable = 0
unit2 = unit1
unit1 = None
elif unit2 is unit1:
return converters, unit1
else:
changeable = 1
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from ..si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from ..si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from ..si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
NCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
| true
| true
|
79045986de02b0ab0ac2765c45bafb29950cbc19
| 12,353
|
py
|
Python
|
wintersdeep_postcode/postcode_types/standard_postcode/standard_postcode_validator.py
|
WintersDeep/wintersdeep_postcode
|
b5f53484d2209d48919a4db663f05c9d39a396fa
|
[
"MIT"
] | 2
|
2021-12-03T11:41:05.000Z
|
2022-01-31T13:33:29.000Z
|
wintersdeep_postcode/postcode_types/standard_postcode/standard_postcode_validator.py
|
WintersDeep/wintersdeep_postcode
|
b5f53484d2209d48919a4db663f05c9d39a396fa
|
[
"MIT"
] | null | null | null |
wintersdeep_postcode/postcode_types/standard_postcode/standard_postcode_validator.py
|
WintersDeep/wintersdeep_postcode
|
b5f53484d2209d48919a4db663f05c9d39a396fa
|
[
"MIT"
] | null | null | null |
# python3 imports
from re import compile as compile_regex
from gettext import gettext as _
# project imports
from wintersdeep_postcode.postcode import Postcode
from wintersdeep_postcode.exceptions.validation_fault import ValidationFault
## A wrapper for validation of standard postcodes
# @remarks see \ref wintersdeep_postcode.postcode_types.standard_postcode
class StandardPostcodeValidator(object):
## Areas that only have single digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlySingleDigitDistricts = []
## Checks if a postcode is in an area with only single digit districts and if
# so - that the district specified is only a single digit.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlySingleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district >= 10:
single_digit_districts = cls.AreasWithOnlySingleDigitDistricts
impacted_by_rule = postcode.outward_area in single_digit_districts
return impacted_by_rule
## Areas that only have double digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlyDoubleDigitDistricts = []
## Checks if a postcode is in an area with only double digit districts and
# if so - that the district specified has two digits as required.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlyDoubleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district <= 9:
double_digit_districts = cls.AreasWithOnlyDoubleDigitDistricts
impacted_by_rule = postcode.outward_area in double_digit_districts
return impacted_by_rule
## Areas that have a district zero.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithDistrictZero = []
## Checks if a postcode has a district zero if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithDistrictZero(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 0:
areas_with_district_zero = cls.AreasWithDistrictZero
impacted_by_rule = not postcode.outward_area in areas_with_district_zero
return impacted_by_rule
## Areas that do not have a district 10
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithoutDistrictTen = []
## Checks if a postcode has a district ten if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithoutDistrictTen(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 10:
areas_without_district_ten = cls.AreasWithoutDistrictTen
impacted_by_rule = postcode.outward_area in areas_without_district_ten
return impacted_by_rule
## Only a few areas have subdivided districts
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithSubdistricts = {}
## If a postcode has subdistricts, check its supposed to.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
impacted_by_rule = not postcode.outward_area in areas_with_subdistricts
if not impacted_by_rule:
subdivided_districts_in_area = areas_with_subdistricts[postcode.outward_area]
if subdivided_districts_in_area:
impacted_by_rule = not postcode.outward_district in subdivided_districts_in_area
return impacted_by_rule
## If a postcode has a limited selection of subdistricts, makes sure any set are in scope.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSpecificSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
subdivided_districts_in_area = areas_with_subdistricts.get(postcode.outward_area, {})
specific_subdistrict_codes = subdivided_districts_in_area.get(postcode.outward_district, None)
impacted_by_rule = specific_subdistrict_codes and \
not postcode.outward_subdistrict in specific_subdistrict_codes
return impacted_by_rule
## Charactesr that are not used in the first position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
FirstPositionExcludes = []
## Checks that a postcode does not include usued characters in the first postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstPositionExcludes(cls, postcode):
first_postion_char = postcode.outward_area[0]
impacted_by_rule = first_postion_char in cls.FirstPositionExcludes
return impacted_by_rule
## Charactesr that are not used in the second position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SecondPositionExcludes = []
## Checks that a postcode does not include unused characters in the second postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondPositionExcludes(cls, postcode):
impacted_by_rule = False
if len(postcode.outward_area) > 1:
second_postion_char = postcode.outward_area[1]
impacted_by_rule = second_postion_char in cls.SecondPositionExcludes
return impacted_by_rule
## Charactesr that are used in the third apha position (for single digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SingleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for single digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSingleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 1:
allowed_subdistricts = cls.SingleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are used in the fourth apha position (for double digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
DoubleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for double digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckDoubleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 2:
allowed_subdistricts = cls.DoubleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are not used in the unit string.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
UnitExcludes = []
## Checks that a postcode does not include characters in the first character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[0]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Checks that a postcode does not include characters in the second character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[1]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Loads various static members used for validation of standard postcodes from
# a JSON file - this is expected to be co-located with this class.
def load_validator_params_from_json():
from json import load
from os.path import dirname, join
json_configuration_file = join( dirname(__file__), "standard_postcode_validator.json" )
with open(json_configuration_file, 'r') as file_handle:
config_json = load(file_handle)
StandardPostcodeValidator.AreasWithDistrictZero = config_json['has-district-zero']
StandardPostcodeValidator.AreasWithoutDistrictTen = config_json['no-district-ten']
StandardPostcodeValidator.AreasWithOnlyDoubleDigitDistricts = config_json['double-digit-districts']
StandardPostcodeValidator.AreasWithOnlySingleDigitDistricts = config_json['single-digit-districts']
StandardPostcodeValidator.SingleDigitAreaSubdistricts = config_json['single-digit-area-subdistricts']
StandardPostcodeValidator.DoubleDigitAreaSubdistricts = config_json['double-digit-area-subdistricts']
StandardPostcodeValidator.SecondPositionExcludes = config_json['second-position-excludes']
StandardPostcodeValidator.FirstPositionExcludes = config_json['first-position-excludes']
StandardPostcodeValidator.UnitExcludes = config_json['unit-excludes']
subdivision_map = config_json["subdivided-districts"]
StandardPostcodeValidator.AreasWithSubdistricts = { k: {
int(k1): v1 for k1, v1 in v.items()
} for k, v in subdivision_map.items() }
load_validator_params_from_json()
if __name__ == "__main__":
##
## If this is the main entry point - someone might be a little lost?
##
print(f"{__file__} ran, but doesn't do anything on its own.")
print(f"Check 'https://www.github.com/wintersdeep/wintersdeep_postcode' for usage.")
| 47.148855
| 117
| 0.727273
|
from re import compile as compile_regex
from gettext import gettext as _
from wintersdeep_postcode.postcode import Postcode
from wintersdeep_postcode.exceptions.validation_fault import ValidationFault
tcode):
impacted_by_rule = False
if postcode.outward_district >= 10:
single_digit_districts = cls.AreasWithOnlySingleDigitDistricts
impacted_by_rule = postcode.outward_area in single_digit_districts
return impacted_by_rule
ts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district <= 9:
double_digit_districts = cls.AreasWithOnlyDoubleDigitDistricts
impacted_by_rule = postcode.outward_area in double_digit_districts
return impacted_by_rule
[]
Zero(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 0:
areas_with_district_zero = cls.AreasWithDistrictZero
impacted_by_rule = not postcode.outward_area in areas_with_district_zero
return impacted_by_rule
rictTen(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 10:
areas_without_district_ten = cls.AreasWithoutDistrictTen
impacted_by_rule = postcode.outward_area in areas_without_district_ten
return impacted_by_rule
districts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
impacted_by_rule = not postcode.outward_area in areas_with_subdistricts
if not impacted_by_rule:
subdivided_districts_in_area = areas_with_subdistricts[postcode.outward_area]
if subdivided_districts_in_area:
impacted_by_rule = not postcode.outward_district in subdivided_districts_in_area
return impacted_by_rule
):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
subdivided_districts_in_area = areas_with_subdistricts.get(postcode.outward_area, {})
specific_subdistrict_codes = subdivided_districts_in_area.get(postcode.outward_district, None)
impacted_by_rule = specific_subdistrict_codes and \
not postcode.outward_subdistrict in specific_subdistrict_codes
return impacted_by_rule
first_postion_char = postcode.outward_area[0]
impacted_by_rule = first_postion_char in cls.FirstPositionExcludes
return impacted_by_rule
impacted_by_rule = False
if len(postcode.outward_area) > 1:
second_postion_char = postcode.outward_area[1]
impacted_by_rule = second_postion_char in cls.SecondPositionExcludes
return impacted_by_rule
le = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 1:
allowed_subdistricts = cls.SingleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
e = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 2:
allowed_subdistricts = cls.DoubleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
nward_unit[0]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
= postcode.inward_unit[1]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
rom os.path import dirname, join
json_configuration_file = join( dirname(__file__), "standard_postcode_validator.json" )
with open(json_configuration_file, 'r') as file_handle:
config_json = load(file_handle)
StandardPostcodeValidator.AreasWithDistrictZero = config_json['has-district-zero']
StandardPostcodeValidator.AreasWithoutDistrictTen = config_json['no-district-ten']
StandardPostcodeValidator.AreasWithOnlyDoubleDigitDistricts = config_json['double-digit-districts']
StandardPostcodeValidator.AreasWithOnlySingleDigitDistricts = config_json['single-digit-districts']
StandardPostcodeValidator.SingleDigitAreaSubdistricts = config_json['single-digit-area-subdistricts']
StandardPostcodeValidator.DoubleDigitAreaSubdistricts = config_json['double-digit-area-subdistricts']
StandardPostcodeValidator.SecondPositionExcludes = config_json['second-position-excludes']
StandardPostcodeValidator.FirstPositionExcludes = config_json['first-position-excludes']
StandardPostcodeValidator.UnitExcludes = config_json['unit-excludes']
subdivision_map = config_json["subdivided-districts"]
StandardPostcodeValidator.AreasWithSubdistricts = { k: {
int(k1): v1 for k1, v1 in v.items()
} for k, v in subdivision_map.items() }
load_validator_params_from_json()
if __name__ == "__main__":
.")
print(f"Check 'https://www.github.com/wintersdeep/wintersdeep_postcode' for usage.")
| true
| true
|
79045ab4b5e373ef0c67cc5edf88b799cf876f33
| 4,654
|
py
|
Python
|
cellularcaves.py
|
nmmarzano/CellularCaves.py
|
34135c0ae9260df18c9bdd723265122398893947
|
[
"MIT"
] | 3
|
2020-05-20T02:48:10.000Z
|
2022-01-13T12:28:28.000Z
|
cellularcaves.py
|
nmmarzano/CellularCaves.py
|
34135c0ae9260df18c9bdd723265122398893947
|
[
"MIT"
] | null | null | null |
cellularcaves.py
|
nmmarzano/CellularCaves.py
|
34135c0ae9260df18c9bdd723265122398893947
|
[
"MIT"
] | 1
|
2022-02-10T22:25:49.000Z
|
2022-02-10T22:25:49.000Z
|
import sys
import random
from collections import deque
def printGrid(grid, wallChar, emptyChar):
finalstr = ""
finalstr += "\n"
for i in range(len(grid[0])):
for j in range(len(grid)):
if grid[j][i]==1:
finalstr += wallChar
else:
finalstr += emptyChar
finalstr += "\n"
finalstr += "\n"
print(finalstr)
def makeGrid(width, height):
newgrid = [[0 for x in range(height)] for y in range(width)]
for i in range(len(newgrid)):
for j in range(len(newgrid[i])):
if i==0 or j==0 or i==len(newgrid)-1 or j==len(newgrid[0])-1:
newgrid[i][j]=1
return newgrid
def populateGrid(grid, chance):
for i in range(len(grid)): # reminder to test with: for index, value in enumerate(grid)
for j in range(len(grid[0])):
if(random.randint(0,100)<=chance): # test with list comprehension instead??
grid[i][j]=1
return grid
def automataIteration(grid, minCount, makePillars):
new_grid = [row[:] for row in grid]
for i in range(1, len(grid)-1):
for j in range(1, len(grid[0])-1):
count = 0
for k in range(-1,2):
for l in range(-1,2):
if grid[i+k][j+l]==1:
count+=1
if count>=minCount or (count==0 and makePillars==1):
new_grid[i][j]=1
else:
new_grid[i][j]=0
return new_grid
def floodFindEmpty(grid, tries, goal):
times_remade = 0
percentage = 0
while times_remade<tries and percentage<goal:
copy_grid = [row[:] for row in grid]
open_count = 0
times_remade+=1
unvisited = deque([])
new_grid = [[1 for x in range(len(grid[0]))] for y in range(len(grid))]
#find a random empty space, hope it's the biggest cave
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
while(grid[randx][randy] == 1):
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
unvisited.append([randx, randy])
while len(unvisited)>0:
current = unvisited.popleft()
new_grid[current[0]][current[1]] = 0
for k in range(-1,2):
for l in range(-1,2):
if current[0]+k >= 0 and current[0]+k<len(grid) and current[1]+l >= 0 and current[1]+l < len(grid[0]): #if we're not out of bounds
if copy_grid[current[0]+k][current[1]+l]==0: #if it's an empty space
copy_grid[current[0]+k][current[1]+l]=2 #mark visited
open_count += 1
unvisited.append([current[0]+k, current[1]+l])
percentage = open_count*100/(len(grid)*len(grid[0]))
print("counted {0}, {1}%...".format(open_count,percentage))
return new_grid, percentage
def main():
width = int(input("Enter the width: "))
height = int(input("Enter the height: "))
#chance = 100 - int(input("Enter the percentage chance of randomly generating a wall: "))
#count = int(input("Enter the min count of surrounding walls for the automata rules: "))
chance = 40
count = 5
iterations = int(input("Enter the number of regular iterations: "))
pillarIterations = int(input("Enter the number of pillar-generating iterations: "))
floodTries = 5
goalPercentage = 30 # above 30% seems to be a good target
grid = makeGrid(width, height)
print("\nRandomly populated grid:")
grid = populateGrid(grid, chance)
printGrid(grid, '# ', '· ')
for i in range(pillarIterations):
print("{0} iteration(s) of automata with pillars:".format(i+1))
grid = automataIteration(grid, count, 1)
printGrid(grid, '# ', '· ')
for i in range(iterations):
print("{0} iteration(s) of regular automata:".format(i+1))
grid = automataIteration(grid, count, 0)
printGrid(grid, '# ', '· ')
print("\nAfter flood algorithm to find the biggest cave:")
grid, percentage = floodFindEmpty(grid, floodTries, goalPercentage)
if percentage<goalPercentage:
print("Failed to produce a big enough cave after {0} tries...".format(floodTries))
else:
print("Percentage of open space: {0}%".format(percentage))
printGrid(grid, '# ', '· ')
# self reminder to try checking map size https://stackoverflow.com/questions/1331471/in-memory-size-of-a-python-structure
print("")
main()
if __name__ == "__main__":
main()
| 37.532258
| 150
| 0.572841
|
import sys
import random
from collections import deque
def printGrid(grid, wallChar, emptyChar):
finalstr = ""
finalstr += "\n"
for i in range(len(grid[0])):
for j in range(len(grid)):
if grid[j][i]==1:
finalstr += wallChar
else:
finalstr += emptyChar
finalstr += "\n"
finalstr += "\n"
print(finalstr)
def makeGrid(width, height):
newgrid = [[0 for x in range(height)] for y in range(width)]
for i in range(len(newgrid)):
for j in range(len(newgrid[i])):
if i==0 or j==0 or i==len(newgrid)-1 or j==len(newgrid[0])-1:
newgrid[i][j]=1
return newgrid
def populateGrid(grid, chance):
for i in range(len(grid)):
for j in range(len(grid[0])):
if(random.randint(0,100)<=chance):
grid[i][j]=1
return grid
def automataIteration(grid, minCount, makePillars):
new_grid = [row[:] for row in grid]
for i in range(1, len(grid)-1):
for j in range(1, len(grid[0])-1):
count = 0
for k in range(-1,2):
for l in range(-1,2):
if grid[i+k][j+l]==1:
count+=1
if count>=minCount or (count==0 and makePillars==1):
new_grid[i][j]=1
else:
new_grid[i][j]=0
return new_grid
def floodFindEmpty(grid, tries, goal):
times_remade = 0
percentage = 0
while times_remade<tries and percentage<goal:
copy_grid = [row[:] for row in grid]
open_count = 0
times_remade+=1
unvisited = deque([])
new_grid = [[1 for x in range(len(grid[0]))] for y in range(len(grid))]
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
while(grid[randx][randy] == 1):
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
unvisited.append([randx, randy])
while len(unvisited)>0:
current = unvisited.popleft()
new_grid[current[0]][current[1]] = 0
for k in range(-1,2):
for l in range(-1,2):
if current[0]+k >= 0 and current[0]+k<len(grid) and current[1]+l >= 0 and current[1]+l < len(grid[0]): #if we're not out of bounds
if copy_grid[current[0]+k][current[1]+l]==0:
copy_grid[current[0]+k][current[1]+l]=2 #mark visited
open_count += 1
unvisited.append([current[0]+k, current[1]+l])
percentage = open_count*100/(len(grid)*len(grid[0]))
print("counted {0}, {1}%...".format(open_count,percentage))
return new_grid, percentage
def main():
width = int(input("Enter the width: "))
height = int(input("Enter the height: "))
#chance = 100 - int(input("Enter the percentage chance of randomly generating a wall: "))
#count = int(input("Enter the min count of surrounding walls for the automata rules: "))
chance = 40
count = 5
iterations = int(input("Enter the number of regular iterations: "))
pillarIterations = int(input("Enter the number of pillar-generating iterations: "))
floodTries = 5
goalPercentage = 30 # above 30% seems to be a good target
grid = makeGrid(width, height)
print("\nRandomly populated grid:")
grid = populateGrid(grid, chance)
printGrid(grid, '
for i in range(pillarIterations):
print("{0} iteration(s) of automata with pillars:".format(i+1))
grid = automataIteration(grid, count, 1)
printGrid(grid, '
for i in range(iterations):
print("{0} iteration(s) of regular automata:".format(i+1))
grid = automataIteration(grid, count, 0)
printGrid(grid, '
print("\nAfter flood algorithm to find the biggest cave:")
grid, percentage = floodFindEmpty(grid, floodTries, goalPercentage)
if percentage<goalPercentage:
print("Failed to produce a big enough cave after {0} tries...".format(floodTries))
else:
print("Percentage of open space: {0}%".format(percentage))
printGrid(grid, '
# self reminder to try checking map size https://stackoverflow.com/questions/1331471/in-memory-size-of-a-python-structure
print("")
main()
if __name__ == "__main__":
main()
| true
| true
|
79045b035efe97fd4687cc0734f65bd708a1ae46
| 6,691
|
py
|
Python
|
packages/python/plotly/plotly/validators/splom/_marker.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/_marker.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/_marker.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="splom", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.splom.marker.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for `symbol`.
""",
),
**kwargs,
)
| 47.792857
| 76
| 0.54521
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="splom", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.splom.marker.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for `symbol`.
""",
),
**kwargs,
)
| true
| true
|
79045be7ad9a966185639451d30dc7256170507a
| 581
|
py
|
Python
|
wildlifecompliance/migrations/0147_returntype_return_type.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 1
|
2020-12-07T17:12:40.000Z
|
2020-12-07T17:12:40.000Z
|
wildlifecompliance/migrations/0147_returntype_return_type.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 14
|
2020-01-08T08:08:26.000Z
|
2021-03-19T22:59:46.000Z
|
wildlifecompliance/migrations/0147_returntype_return_type.py
|
preranaandure/wildlifecompliance
|
bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5
|
[
"Apache-2.0"
] | 15
|
2020-01-08T08:02:28.000Z
|
2021-11-03T06:48:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-18 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0146_auto_20190308_1626'),
]
operations = [
migrations.AddField(
model_name='returntype',
name='return_type',
field=models.CharField(choices=[('sheet', 'Sheet'), ('question', 'Question'), ('data', 'Data')], default='sheet', max_length=30, verbose_name='Type'),
),
]
| 27.666667
| 162
| 0.628227
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0146_auto_20190308_1626'),
]
operations = [
migrations.AddField(
model_name='returntype',
name='return_type',
field=models.CharField(choices=[('sheet', 'Sheet'), ('question', 'Question'), ('data', 'Data')], default='sheet', max_length=30, verbose_name='Type'),
),
]
| true
| true
|
79045c37c7d32490269243291686615239285ef8
| 7,534
|
py
|
Python
|
test/functional/txn_clone.py
|
plc-ultima/plcu
|
d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f
|
[
"MIT"
] | 1
|
2022-03-28T02:13:10.000Z
|
2022-03-28T02:13:10.000Z
|
test/functional/txn_clone.py
|
plc-ultima/plcu
|
d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f
|
[
"MIT"
] | null | null | null |
test/functional/txn_clone.py
|
plc-ultima/plcu
|
d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f
|
[
"MIT"
] | 2
|
2022-03-26T23:59:01.000Z
|
2022-03-31T13:27:08.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
import struct
from test_framework.test_framework import BitcoinTestFramework
from test_framework.mininode import *
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
miner_reward = Decimal('0.005')
# All nodes should start with starting_balance:
starting_balance = BASE_CB_AMOUNT * 25
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
burn_foo = -find_burned_amount_in_tx(fund_foo_tx)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
burn_bar = -find_burned_amount_in_tx(fund_bar_tx)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
outputs_count = 4 # dest, change, burn1, burn2
assert_equal(len(rawtx1['vout']), outputs_count)
tx1_cl = CTransaction()
tx1_cl.nVersion = 2
tx1_cl.vin = [CTxIn(COutPoint(int(rawtx1['vin'][0]['txid'], 16), rawtx1['vin'][0]['vout']), b'', 0xFFFFFFFE)]
for out in rawtx1['vout']:
tx1_cl.vout.append(CTxOut(ToSatoshi(out['value']), hex_str_to_bytes(out['scriptPubKey']['hex'])))
tx1_cl.nLockTime = rawtx1['locktime']
clone_raw = bytes_to_hex_str(tx1_cl.serialize())
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50 PLCU for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar
if self.options.mine_block: expected += miner_reward
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + miner_reward * 2 PLCU for 2 matured,
# less possible orphaned matured subsidy
expected += miner_reward * 2
if (self.options.mine_block):
expected -= miner_reward
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"] - burn_foo
- 29
+ fund_bar_tx["fee"] - burn_bar
+ miner_reward * 2)
# Node1's "from0" account balance
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
if __name__ == '__main__':
TxnMallTest().main()
| 47.683544
| 117
| 0.623706
|
import struct
from test_framework.test_framework import BitcoinTestFramework
from test_framework.mininode import *
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
miner_reward = Decimal('0.005')
starting_balance = BASE_CB_AMOUNT * 25
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
burn_foo = -find_burned_amount_in_tx(fund_foo_tx)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
burn_bar = -find_burned_amount_in_tx(fund_bar_tx)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar)
node1_address = self.nodes[1].getnewaddress("from0")
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
outputs_count = 4 # dest, change, burn1, burn2
assert_equal(len(rawtx1['vout']), outputs_count)
tx1_cl = CTransaction()
tx1_cl.nVersion = 2
tx1_cl.vin = [CTxIn(COutPoint(int(rawtx1['vin'][0]['txid'], 16), rawtx1['vin'][0]['vout']), b'', 0xFFFFFFFE)]
for out in rawtx1['vout']:
tx1_cl.vout.append(CTxOut(ToSatoshi(out['value']), hex_str_to_bytes(out['scriptPubKey']['hex'])))
tx1_cl.nLockTime = rawtx1['locktime']
clone_raw = bytes_to_hex_str(tx1_cl.serialize())
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar
if self.options.mine_block: expected += miner_reward
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
self.nodes[2].generate(1)
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1)
sync_blocks(self.nodes)
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# less possible orphaned matured subsidy
expected += miner_reward * 2
if (self.options.mine_block):
expected -= miner_reward
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"] - burn_foo
- 29
+ fund_bar_tx["fee"] - burn_bar
+ miner_reward * 2)
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
if __name__ == '__main__':
TxnMallTest().main()
| true
| true
|
79045c5821bbd8e25e9af3fc644f6d43a278d730
| 229
|
py
|
Python
|
toBusUsege/service_module/service_core/__init__.py
|
sherry0429/tobus
|
aa694024e18bc977c4f8c45dda4b2c1708aaef06
|
[
"Apache-2.0"
] | 2
|
2018-01-30T02:28:28.000Z
|
2018-01-30T02:28:32.000Z
|
toBusUsege/service_module/service_core/__init__.py
|
sherry0429/tobus
|
aa694024e18bc977c4f8c45dda4b2c1708aaef06
|
[
"Apache-2.0"
] | null | null | null |
toBusUsege/service_module/service_core/__init__.py
|
sherry0429/tobus
|
aa694024e18bc977c4f8c45dda4b2c1708aaef06
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 tianyou pan <sherry0429 at SOAPython>
"""
from engine import ServiceEngineModule
from template import ServiceParamTemplate
__all__ = ['ServiceEngineModule', 'ServiceParamTemplate']
| 25.444444
| 57
| 0.759825
|
from engine import ServiceEngineModule
from template import ServiceParamTemplate
__all__ = ['ServiceEngineModule', 'ServiceParamTemplate']
| true
| true
|
79045c662b4bb442f9bac1c70677d6b9400c64e9
| 6,563
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/model/outer_enum_integer_default_value.py
|
jetbridge/openapi-generator
|
e4701ed5288ee6b7015ff5a88c60d320c9af8ac2
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/model/outer_enum_integer_default_value.py
|
jetbridge/openapi-generator
|
e4701ed5288ee6b7015ff5a88c60d320c9af8ac2
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/model/outer_enum_integer_default_value.py
|
jetbridge/openapi-generator
|
e4701ed5288ee6b7015ff5a88c60d320c9af8ac2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class (ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
}
@cached_property
def discriminator():
return None
attribute_map = {
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OuterEnumIntegerDefaultValue - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.380117
| 174
| 0.590279
|
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class (ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
}
@cached_property
def discriminator():
return None
attribute_map = {
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OuterEnumIntegerDefaultValue - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| false
| true
|
79045cac6b1256f61939708bfa576d89374ae0e4
| 2,243
|
py
|
Python
|
src/models.py
|
DhenryD/CrowdCount-mcnn
|
a44bcbfd25ca681f7b57e2f92f10b06f602dd93f
|
[
"MIT"
] | 2
|
2019-06-11T02:16:01.000Z
|
2019-09-20T12:01:12.000Z
|
src/models.py
|
DhenryD/CrowdCount-mcnn
|
a44bcbfd25ca681f7b57e2f92f10b06f602dd93f
|
[
"MIT"
] | 1
|
2019-05-16T08:05:03.000Z
|
2019-05-16T08:09:53.000Z
|
src/models.py
|
DhenryD/CrowdCount-mcnn
|
a44bcbfd25ca681f7b57e2f92f10b06f602dd93f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from src.network import Conv2d
class MCNN(nn.Module):
def __init__(self, bn=False):
super(MCNN, self).__init__()
self.branch1 = nn.Sequential(Conv2d(1, 16, 9, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(16, 32, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(32, 16, 7, same_padding=True, bn=bn),
Conv2d(16, 8, 7, same_padding=True, bn=bn))
self.branch2 = nn.Sequential(Conv2d(1, 20, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(20, 40, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(40, 20, 5, same_padding=True, bn=bn),
Conv2d(20, 10, 5, same_padding=True, bn=bn))
self.branch3 = nn.Sequential(Conv2d(1, 24, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(24, 48, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(48, 24, 3, same_padding=True, bn=bn),
Conv2d(24, 12, 3, same_padding=True, bn=bn))
self.branch4 = nn.Sequential(Conv2d(1, 28, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(28, 56, 1, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(56, 28, 1, same_padding=True, bn=bn),
Conv2d(28, 14, 1, same_padding=True, bn=bn))
self.fuse = nn.Sequential(Conv2d(44, 1, 1, same_padding=True, bn=bn))
def forward(self, im_data):
x1 = self.branch1(im_data)
x2 = self.branch2(im_data)
x3 = self.branch3(im_data)
x4 = self.branch4(im_data)
x = torch.cat((x1, x2, x3, x4), 1)
x = self.fuse(x)
return x
| 45.77551
| 81
| 0.436469
|
import torch
import torch.nn as nn
from src.network import Conv2d
class MCNN(nn.Module):
def __init__(self, bn=False):
super(MCNN, self).__init__()
self.branch1 = nn.Sequential(Conv2d(1, 16, 9, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(16, 32, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(32, 16, 7, same_padding=True, bn=bn),
Conv2d(16, 8, 7, same_padding=True, bn=bn))
self.branch2 = nn.Sequential(Conv2d(1, 20, 7, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(20, 40, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(40, 20, 5, same_padding=True, bn=bn),
Conv2d(20, 10, 5, same_padding=True, bn=bn))
self.branch3 = nn.Sequential(Conv2d(1, 24, 5, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(24, 48, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(48, 24, 3, same_padding=True, bn=bn),
Conv2d(24, 12, 3, same_padding=True, bn=bn))
self.branch4 = nn.Sequential(Conv2d(1, 28, 3, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(28, 56, 1, same_padding=True, bn=bn),
nn.MaxPool2d(2),
Conv2d(56, 28, 1, same_padding=True, bn=bn),
Conv2d(28, 14, 1, same_padding=True, bn=bn))
self.fuse = nn.Sequential(Conv2d(44, 1, 1, same_padding=True, bn=bn))
def forward(self, im_data):
x1 = self.branch1(im_data)
x2 = self.branch2(im_data)
x3 = self.branch3(im_data)
x4 = self.branch4(im_data)
x = torch.cat((x1, x2, x3, x4), 1)
x = self.fuse(x)
return x
| true
| true
|
79045cb5ac0082d3e9a772647c2cb6fbc430de27
| 857
|
py
|
Python
|
main.py
|
kymotsujason/crossybot
|
68f585ee21b68394e1b09a63f39f8d2b54ac5f0c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
kymotsujason/crossybot
|
68f585ee21b68394e1b09a63f39f8d2b54ac5f0c
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
kymotsujason/crossybot
|
68f585ee21b68394e1b09a63f39f8d2b54ac5f0c
|
[
"Apache-2.0"
] | null | null | null |
import cv2
from PIL import ImageGrab
import numpy as np
def main():
while True:
# bbox specifies specific region (bbox= x,y,width,height)
img = ImageGrab.grab(bbox=(0, 40, 1075, 640))
vanilla = img_np = np.array(img)
img_np = np.array(img)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(
binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(img_np, contours, -1, (0, 255, 0), 2)
cv2.imshow("test", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
print("test")
break
else:
cv2.waitKey(1)
# cv2.waitKey(0)
if __name__ == "__main__":
main()
| 29.551724
| 72
| 0.57993
|
import cv2
from PIL import ImageGrab
import numpy as np
def main():
while True:
img = ImageGrab.grab(bbox=(0, 40, 1075, 640))
vanilla = img_np = np.array(img)
img_np = np.array(img)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(
binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(img_np, contours, -1, (0, 255, 0), 2)
cv2.imshow("test", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
print("test")
break
else:
cv2.waitKey(1)
if __name__ == "__main__":
main()
| true
| true
|
79045cc5021493b8d26f6e1ca37c30e05ff0efa6
| 5,115
|
py
|
Python
|
import_and_model.py
|
kimhyuns91/bird_call
|
eea20c6e305a2ac322a94f90075d489742e7295c
|
[
"Apache-2.0"
] | 1
|
2020-10-21T20:45:25.000Z
|
2020-10-21T20:45:25.000Z
|
import_and_model.py
|
kimhyuns91/bird_call
|
eea20c6e305a2ac322a94f90075d489742e7295c
|
[
"Apache-2.0"
] | null | null | null |
import_and_model.py
|
kimhyuns91/bird_call
|
eea20c6e305a2ac322a94f90075d489742e7295c
|
[
"Apache-2.0"
] | 1
|
2020-12-12T03:58:01.000Z
|
2020-12-12T03:58:01.000Z
|
import pandas as pd
import numpy as np
import wave
from scipy.io import wavfile
import os
import librosa
import pydub
import ffmpeg
from librosa.feature import melspectrogram
import warnings
from sklearn.utils import shuffle
from sklearn.utils import class_weight
from PIL import Image
import sklearn
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, LSTM, SimpleRNN, Conv1D, Input, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from keras.models import load_model
import boto3
import botocore
def model_input():
# Load the trained model
model = load_model("best_model.h5")
#Access S3 Bucket and Download the audio file
BUCKET_NAME = 'thunderstruck-duck' # replace with your bucket name
KEY = "sample_mp3.mp3" # replace with your object key
s3 = boto3.client('s3',
aws_access_key_id='AKIAISITTOGCJRNF46HQ',
aws_secret_access_key= 'bq/VRAme7BxDMqf3hgEMLZdrJNVvrtdQ4VmoGAdB',
)
BUCKET_NAME = "thunderstruck-duck"
try:
s3.download_file(BUCKET_NAME, KEY, "sample_mp3.mp3")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
# else:
# raise
#Load the audio data using librosa
wave_data, wave_rate = librosa.load("sample_mp3.mp3")
wave_data, _ = librosa.effects.trim(wave_data)
#only take 5s samples and add them to the dataframe
song_sample = []
sample_length = 5*wave_rate
#The variable below is chosen mainly to create a 216x216 image
N_mels=216
for idx in range(0,len(wave_data),sample_length):
song_sample = wave_data[idx:idx+sample_length]
if len(song_sample)>=sample_length:
mel = melspectrogram(song_sample, n_mels=N_mels)
db = librosa.power_to_db(mel)
normalised_db = sklearn.preprocessing.minmax_scale(db)
filename = "sample_mel.tif"
db_array = (np.asarray(normalised_db)*255).astype(np.uint8)
db_image = Image.fromarray(np.array([db_array, db_array, db_array]).T)
db_image.save("{}{}".format("upload_mel/",filename))
#Create a DF that will take the created Melspectogram directory
data_df = pd.DataFrame([{'bird': "sample bird", 'song_sample': f"/app/upload_mel/{filename}"}])
# Users/HyunsooKim/Desktop/Boot_Camp/Homework/BIRD_CALL/upload_mel/{filename}"}])
#Compile the model
callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=0.7),
EarlyStopping(monitor='val_loss', patience=5),
ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
model.compile(loss="categorical_crossentropy", optimizer='adam')
#Since we only have 1 melspectogram passing into the model, set batch size to 1 and the size of that image so the model can take the image file.
validation_batch_size_full = 1
target_size = (216,216)
train_datagen_full = ImageDataGenerator(
rescale=1. / 255
)
#Pass the columns into the model
validation_datagen_full = ImageDataGenerator(rescale=1. / 255)
validation_generator_full = validation_datagen_full.flow_from_dataframe(
dataframe = data_df,
x_col='song_sample',
y_col='bird',
directory='/',
target_size=target_size,
shuffle=False,
batch_size=validation_batch_size_full,
class_mode='categorical')
#Run the model
preds = model.predict_generator(validation_generator_full)
#We want to find the "INDEX" of maximum value within the pred, a numpy array. Use np.argmax and index into 0th element.
result = np.argmax(preds[0])
#load in the index dataframe, so we can find the name of the bird that matches the index of our result
index_df = pd.read_csv('xeno-canto_ca-nv_index.csv')
#rename the english_cname to birds for better access and clearity
bird_list = pd.DataFrame(index_df.english_cname.unique())
bird_list.columns = ["birds"]
#We are almost done. Save the percentage and the name of the bird into a variable and print it out!
percentage = preds[0][result]
Name_of_bird = bird_list['birds'][result]
print(f"This bird is {percentage} likely {Name_of_bird}")
final_data = {"likelihood": percentage, "name_of_bird": Name_of_bird}
return final_data
if __name__ == "__main__":
print(model_input())
| 39.045802
| 148
| 0.714761
|
import pandas as pd
import numpy as np
import wave
from scipy.io import wavfile
import os
import librosa
import pydub
import ffmpeg
from librosa.feature import melspectrogram
import warnings
from sklearn.utils import shuffle
from sklearn.utils import class_weight
from PIL import Image
import sklearn
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, LSTM, SimpleRNN, Conv1D, Input, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from keras.models import load_model
import boto3
import botocore
def model_input():
model = load_model("best_model.h5")
BUCKET_NAME = 'thunderstruck-duck'
KEY = "sample_mp3.mp3"
s3 = boto3.client('s3',
aws_access_key_id='AKIAISITTOGCJRNF46HQ',
aws_secret_access_key= 'bq/VRAme7BxDMqf3hgEMLZdrJNVvrtdQ4VmoGAdB',
)
BUCKET_NAME = "thunderstruck-duck"
try:
s3.download_file(BUCKET_NAME, KEY, "sample_mp3.mp3")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
wave_data, wave_rate = librosa.load("sample_mp3.mp3")
wave_data, _ = librosa.effects.trim(wave_data)
song_sample = []
sample_length = 5*wave_rate
N_mels=216
for idx in range(0,len(wave_data),sample_length):
song_sample = wave_data[idx:idx+sample_length]
if len(song_sample)>=sample_length:
mel = melspectrogram(song_sample, n_mels=N_mels)
db = librosa.power_to_db(mel)
normalised_db = sklearn.preprocessing.minmax_scale(db)
filename = "sample_mel.tif"
db_array = (np.asarray(normalised_db)*255).astype(np.uint8)
db_image = Image.fromarray(np.array([db_array, db_array, db_array]).T)
db_image.save("{}{}".format("upload_mel/",filename))
data_df = pd.DataFrame([{'bird': "sample bird", 'song_sample': f"/app/upload_mel/{filename}"}])
#Compile the model
callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=0.7),
EarlyStopping(monitor='val_loss', patience=5),
ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
model.compile(loss="categorical_crossentropy", optimizer='adam')
#Since we only have 1 melspectogram passing into the model, set batch size to 1 and the size of that image so the model can take the image file.
validation_batch_size_full = 1
target_size = (216,216)
train_datagen_full = ImageDataGenerator(
rescale=1. / 255
)
#Pass the columns into the model
validation_datagen_full = ImageDataGenerator(rescale=1. / 255)
validation_generator_full = validation_datagen_full.flow_from_dataframe(
dataframe = data_df,
x_col='song_sample',
y_col='bird',
directory='/',
target_size=target_size,
shuffle=False,
batch_size=validation_batch_size_full,
class_mode='categorical')
#Run the model
preds = model.predict_generator(validation_generator_full)
#We want to find the "INDEX" of maximum value within the pred, a numpy array. Use np.argmax and index into 0th element.
result = np.argmax(preds[0])
#load in the index dataframe, so we can find the name of the bird that matches the index of our result
index_df = pd.read_csv('xeno-canto_ca-nv_index.csv')
#rename the english_cname to birds for better access and clearity
bird_list = pd.DataFrame(index_df.english_cname.unique())
bird_list.columns = ["birds"]
#We are almost done. Save the percentage and the name of the bird into a variable and print it out!
percentage = preds[0][result]
Name_of_bird = bird_list['birds'][result]
print(f"This bird is {percentage} likely {Name_of_bird}")
final_data = {"likelihood": percentage, "name_of_bird": Name_of_bird}
return final_data
if __name__ == "__main__":
print(model_input())
| true
| true
|
79045d44774a4846bb1582b88e414a7c94219c8a
| 7,156
|
py
|
Python
|
bioconda_utils/githubhandler.py
|
sndrtj/bioconda-utils
|
c10c9ae9055380b36114c0db65415787f0ad3785
|
[
"MIT"
] | null | null | null |
bioconda_utils/githubhandler.py
|
sndrtj/bioconda-utils
|
c10c9ae9055380b36114c0db65415787f0ad3785
|
[
"MIT"
] | null | null | null |
bioconda_utils/githubhandler.py
|
sndrtj/bioconda-utils
|
c10c9ae9055380b36114c0db65415787f0ad3785
|
[
"MIT"
] | null | null | null |
"""Highlevel API for managing PRs on Github"""
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: State for Github Issues
IssueState = Enum("IssueState", "open closed all") # pylint: disable=invalid-name
class GitHubHandler:
"""Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
"""
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
# filled in by login():
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
"""Create API object"""
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
"""Format domain relative url for **path** on **branch_name**"""
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
"""Log into API (fills `self.username`)"""
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
"""Check if **username** is member of current org"""
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
# pylint: disable=too-many-arguments
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
"""Retrieve list of PRs matching parameters
Arguments:
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
(default: from auth)
to_branch: Name of branch into which to pull (default: master)
number: PR number
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
# pylint: disable=too-many-arguments
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
"""Create new PR
Arguments:
title: Title of new PR
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
to_branch: Name of branch into which to pull (default: master)
body: Body text of PR
maintainer_can_modify: Whether to allow maintainer to modify from_branch
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
"""Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
"""
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
class AiohttpGitHubHandler(GitHubHandler):
"""GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
"""
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
| 35.078431
| 85
| 0.55981
|
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__)
IssueState = Enum("IssueState", "open closed all")
class GitHubHandler:
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
class AiohttpGitHubHandler(GitHubHandler):
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
| true
| true
|
79045d4b8dee7f78fb0980dfb14ddea8470a6b4c
| 5,411
|
py
|
Python
|
restapi/utilities/meta.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 8
|
2018-07-04T09:54:46.000Z
|
2022-03-17T08:21:06.000Z
|
restapi/utilities/meta.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 19
|
2018-04-18T07:24:55.000Z
|
2022-03-04T01:03:15.000Z
|
restapi/utilities/meta.py
|
rapydo/http-api
|
ef0a299173195145303069534d45d446ea4da93a
|
[
"MIT"
] | 7
|
2018-07-03T12:17:50.000Z
|
2021-05-05T04:33:32.000Z
|
"""
Meta thinking: python objects & introspection
usefull documentation:
http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Metaprogramming.html
"""
import inspect
import pkgutil
from importlib import import_module
from types import ModuleType
from typing import Any, Callable, Dict, List, Optional, Type
from restapi.config import BACKEND_PACKAGE, CUSTOM_PACKAGE
from restapi.utilities import print_and_exit
from restapi.utilities.logs import log
class Meta:
"""Utilities with meta in mind"""
@staticmethod
def get_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]:
"""
Find classes inside a python module file.
"""
try:
return {
name: cls
for name, cls in module.__dict__.items()
if isinstance(cls, type)
}
except AttributeError:
log.warning("Could not find any class in module {}", module)
return {}
@staticmethod
def get_new_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]:
"""
Skip classes not originated inside the module.
"""
classes = {}
for name, value in Meta.get_classes_from_module(module).items():
if module.__name__ in value.__module__:
classes[name] = value
return classes
# Should return `from types import ModuleType` -> Optional[ModuleType]
@staticmethod
def get_module_from_string(
modulestring: str, exit_on_fail: bool = False
) -> Optional[ModuleType]:
"""
Getting a module import
when your module is stored as a string in a variable
"""
try:
return import_module(modulestring)
except ModuleNotFoundError as e:
if exit_on_fail:
log.error(e)
raise e
return None
except Exception as e: # pragma: no cover
if exit_on_fail:
log.error(e)
raise e
log.error("Module {} not found.\nError: {}", modulestring, e)
return None
@staticmethod
def get_self_reference_from_args(*args: Any) -> Optional[Any]:
"""
Useful in decorators:
being able to call the internal method by getting
the 'self' reference from the decorated method
(when it's there)
"""
if len(args) > 0:
candidate_as_self = args[0]
cls_attribute = getattr(candidate_as_self, "__class__", None)
if cls_attribute is not None and inspect.isclass(cls_attribute):
return args[0]
return None
@staticmethod
def import_models(
name: str, package: str, mandatory: bool = False
) -> Dict[str, Type[Any]]:
if package == BACKEND_PACKAGE:
module_name = f"{package}.connectors.{name}.models"
else:
module_name = f"{package}.models.{name}"
try:
module = Meta.get_module_from_string(module_name, exit_on_fail=True)
except Exception as e:
module = None
if mandatory:
log.critical(e)
if not module:
if mandatory:
print_and_exit("Cannot load {} models from {}", name, module_name)
return {}
return Meta.get_new_classes_from_module(module)
@staticmethod
def get_celery_tasks(package_name: str) -> List[Callable[..., Any]]:
"""
Extract all celery tasks from a module.
Celery tasks are functions decorated by @CeleryExt.celery_app.task(...)
This decorator transform the function into a class child of
celery.local.PromiseProxy
"""
tasks: List[Callable[..., Any]] = []
# package = tasks folder
package = Meta.get_module_from_string(package_name)
if package is None:
return tasks
# get all modules in package (i.e. py files)
path = package.__path__
for _, module_name, ispkg in pkgutil.iter_modules(path):
# skip modules (i.e. subfolders)
if ispkg: # pragma: no cover
continue
module_path = f"{package_name}.{module_name}"
log.debug("Loading module '{}'", module_path)
# convert file name in submodule, i.e.
# tasks.filename
submodule = Meta.get_module_from_string(
module_path,
exit_on_fail=True,
)
# get all functions in py file
functions = inspect.getmembers(submodule)
for func in functions:
obj_type = type(func[1])
if obj_type.__module__ != "celery.local":
continue
# This was a dict name => func
# tasks[func[0]] = func[1]
# Now it is a list
tasks.append(func[1])
return tasks
@staticmethod
def get_class(module_relpath: str, class_name: str) -> Optional[Any]:
abspath = f"{CUSTOM_PACKAGE}.{module_relpath}"
module = Meta.get_module_from_string(abspath)
if module is None:
log.debug("{} path does not exist", abspath)
return None
if not hasattr(module, class_name):
return None
return getattr(module, class_name)
| 30.398876
| 83
| 0.582887
|
import inspect
import pkgutil
from importlib import import_module
from types import ModuleType
from typing import Any, Callable, Dict, List, Optional, Type
from restapi.config import BACKEND_PACKAGE, CUSTOM_PACKAGE
from restapi.utilities import print_and_exit
from restapi.utilities.logs import log
class Meta:
@staticmethod
def get_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]:
try:
return {
name: cls
for name, cls in module.__dict__.items()
if isinstance(cls, type)
}
except AttributeError:
log.warning("Could not find any class in module {}", module)
return {}
@staticmethod
def get_new_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]:
classes = {}
for name, value in Meta.get_classes_from_module(module).items():
if module.__name__ in value.__module__:
classes[name] = value
return classes
@staticmethod
def get_module_from_string(
modulestring: str, exit_on_fail: bool = False
) -> Optional[ModuleType]:
try:
return import_module(modulestring)
except ModuleNotFoundError as e:
if exit_on_fail:
log.error(e)
raise e
return None
except Exception as e:
if exit_on_fail:
log.error(e)
raise e
log.error("Module {} not found.\nError: {}", modulestring, e)
return None
@staticmethod
def get_self_reference_from_args(*args: Any) -> Optional[Any]:
if len(args) > 0:
candidate_as_self = args[0]
cls_attribute = getattr(candidate_as_self, "__class__", None)
if cls_attribute is not None and inspect.isclass(cls_attribute):
return args[0]
return None
@staticmethod
def import_models(
name: str, package: str, mandatory: bool = False
) -> Dict[str, Type[Any]]:
if package == BACKEND_PACKAGE:
module_name = f"{package}.connectors.{name}.models"
else:
module_name = f"{package}.models.{name}"
try:
module = Meta.get_module_from_string(module_name, exit_on_fail=True)
except Exception as e:
module = None
if mandatory:
log.critical(e)
if not module:
if mandatory:
print_and_exit("Cannot load {} models from {}", name, module_name)
return {}
return Meta.get_new_classes_from_module(module)
@staticmethod
def get_celery_tasks(package_name: str) -> List[Callable[..., Any]]:
tasks: List[Callable[..., Any]] = []
package = Meta.get_module_from_string(package_name)
if package is None:
return tasks
path = package.__path__
for _, module_name, ispkg in pkgutil.iter_modules(path):
if ispkg:
continue
module_path = f"{package_name}.{module_name}"
log.debug("Loading module '{}'", module_path)
submodule = Meta.get_module_from_string(
module_path,
exit_on_fail=True,
)
functions = inspect.getmembers(submodule)
for func in functions:
obj_type = type(func[1])
if obj_type.__module__ != "celery.local":
continue
tasks.append(func[1])
return tasks
@staticmethod
def get_class(module_relpath: str, class_name: str) -> Optional[Any]:
abspath = f"{CUSTOM_PACKAGE}.{module_relpath}"
module = Meta.get_module_from_string(abspath)
if module is None:
log.debug("{} path does not exist", abspath)
return None
if not hasattr(module, class_name):
return None
return getattr(module, class_name)
| true
| true
|
79045e05f9262916be303b3c10f61de600268472
| 767
|
py
|
Python
|
ruobr_api/__init__.py
|
raitonoberu/ruobr_api
|
bd78be0cb020990a4e039000ab93d495e8569341
|
[
"Apache-2.0"
] | 10
|
2020-05-05T17:47:12.000Z
|
2022-02-13T15:31:40.000Z
|
ruobr_api/__init__.py
|
raitonoberu/ruobr_api
|
bd78be0cb020990a4e039000ab93d495e8569341
|
[
"Apache-2.0"
] | null | null | null |
ruobr_api/__init__.py
|
raitonoberu/ruobr_api
|
bd78be0cb020990a4e039000ab93d495e8569341
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ruobr.ru/api
~~~~~~~~~~~~
Библиотека для доступа к API электронного дневника.
Пример:
>>> from ruobr_api import Ruobr
>>> r = Ruobr('username', 'password')
>>> r.getUser()
User(id=7592904, status='child', first_name='Иван', last_name='Иванов', middle_name='Иванович', school='69-МБОУ "СОШ №69"', school_is_tourniquet=False, readonly=False, school_is_food=True, group='10А', gps_tracker=False)
:authors: raitonoberu
:license: Apache License, Version 2.0, see LICENSE file
:copyright: (c) 2021 raitonoberu
"""
from .__main__ import (
Ruobr,
AsyncRuobr,
AuthenticationException,
NoSuccessException,
NoChildrenException,
)
__author__ = "raitonoberu"
__version__ = "1.2.1"
__email__ = "raitonoberu@mail.ru"
| 25.566667
| 223
| 0.694915
|
from .__main__ import (
Ruobr,
AsyncRuobr,
AuthenticationException,
NoSuccessException,
NoChildrenException,
)
__author__ = "raitonoberu"
__version__ = "1.2.1"
__email__ = "raitonoberu@mail.ru"
| true
| true
|
79045f85c18da745ed09eda968f5bc480749c4e5
| 1,988
|
py
|
Python
|
research/cv/resnext152_64x4d/postprocess.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/resnext152_64x4d/postprocess.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/resnext152_64x4d/postprocess.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import json
import argparse
import numpy as np
parser = argparse.ArgumentParser(description="resnet inference")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--label_path", type=str, required=True, help="image file path.")
args = parser.parse_args()
batch_size = 1
num_classes = 1000
def get_result(result_path, label_path):
"""calculate the result"""
files = os.listdir(result_path)
with open(label_path, "r") as label:
labels = json.load(label)
top1 = 0
top5 = 0
total_data = len(files)
for file in files:
img_ids_name = file.split('_0.')[0]
data_path = os.path.join(result_path, img_ids_name + "_0.bin")
result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes)
for batch in range(batch_size):
predict = np.argsort(-result[batch], axis=-1)
if labels[img_ids_name+".JPEG"] == predict[0]:
top1 += 1
if labels[img_ids_name+".JPEG"] in predict[:5]:
top5 += 1
print(f"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.")
if __name__ == '__main__':
get_result(args.result_path, args.label_path)
| 37.509434
| 107
| 0.666499
|
import os
import json
import argparse
import numpy as np
parser = argparse.ArgumentParser(description="resnet inference")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--label_path", type=str, required=True, help="image file path.")
args = parser.parse_args()
batch_size = 1
num_classes = 1000
def get_result(result_path, label_path):
files = os.listdir(result_path)
with open(label_path, "r") as label:
labels = json.load(label)
top1 = 0
top5 = 0
total_data = len(files)
for file in files:
img_ids_name = file.split('_0.')[0]
data_path = os.path.join(result_path, img_ids_name + "_0.bin")
result = np.fromfile(data_path, dtype=np.float16).reshape(batch_size, num_classes)
for batch in range(batch_size):
predict = np.argsort(-result[batch], axis=-1)
if labels[img_ids_name+".JPEG"] == predict[0]:
top1 += 1
if labels[img_ids_name+".JPEG"] in predict[:5]:
top5 += 1
print(f"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.")
if __name__ == '__main__':
get_result(args.result_path, args.label_path)
| true
| true
|
79045f9cd5e72a52096e8052bb76b0d7b07f58cd
| 22,693
|
py
|
Python
|
credstash.py
|
traveloka/credstash
|
85a1f93ebeebffc89539aaa4b3e653c02f0da39b
|
[
"Apache-2.0"
] | null | null | null |
credstash.py
|
traveloka/credstash
|
85a1f93ebeebffc89539aaa4b3e653c02f0da39b
|
[
"Apache-2.0"
] | null | null | null |
credstash.py
|
traveloka/credstash
|
85a1f93ebeebffc89539aaa4b3e653c02f0da39b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015 Luminal, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import operator
import os
import os.path
import sys
import time
import re
import boto3
import botocore.exceptions
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import yaml
NO_YAML = False
except ImportError:
NO_YAML = True
from base64 import b64encode, b64decode
from boto3.dynamodb.conditions import Attr
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Hash.HMAC import HMAC
from Crypto.Util import Counter
DEFAULT_REGION = "us-east-1"
PAD_LEN = 19 # number of digits in sys.maxint
WILDCARD_CHAR = "*"
class KmsError(Exception):
def __init__(self, value=""):
self.value = "KMS ERROR: " + value if value is not "" else "KMS ERROR"
def __str__(self):
return self.value
class IntegrityError(Exception):
def __init__(self, value=""):
self.value = "INTEGRITY ERROR: " + value if value is not "" else \
"INTEGRITY ERROR"
def __str__(self):
return self.value
class ItemNotFound(Exception):
pass
class KeyValueToDictionary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace,
self.dest,
dict((x[0], x[1]) for x in values))
def printStdErr(s):
sys.stderr.write(str(s))
sys.stderr.write("\n")
def key_value_pair(string):
output = string.split('=')
if len(output) != 2:
msg = "%r is not the form of \"key=value\"" % string
raise argparse.ArgumentTypeError(msg)
return output
def expand_wildcard(string, secrets):
prog = re.compile('^' + string.replace(WILDCARD_CHAR, '.*') + '$')
output = []
for secret in secrets:
if prog.search(secret) is not None:
output.append(secret)
return output
def value_or_filename(string):
# argparse running on old version of python (<2.7) will pass an empty
# string to this function before it passes the actual value.
# If an empty string is passes in, just return an empty string
if string == "":
return ""
if string[0] == "@":
filename = string[1:]
try:
with open(os.path.expanduser(filename)) as f:
output = f.read()
except IOError as e:
raise argparse.ArgumentTypeError("Unable to read file %s" %
filename)
else:
output = string
return output
def csv_dump(dictionary):
csvfile = StringIO()
csvwriter = csv.writer(csvfile)
for key in dictionary:
csvwriter.writerow([key, dictionary[key]])
return csvfile.getvalue()
def paddedInt(i):
'''
return a string that contains `i`, left-padded with 0's up to PAD_LEN digits
'''
i_str = str(i)
pad = PAD_LEN - len(i_str)
return (pad * "0") + i_str
def getHighestVersion(name, region="us-east-1", table="credential-store"):
'''
Return the highest version of `name` in the table
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name),
ProjectionExpression="version")
if response["Count"] == 0:
return 0
return response["Items"][0]["version"]
def listSecrets(region="us-east-1", table="credential-store"):
'''
do a full-table scan of the credential-store,
and return the names and versions of every credential
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
return response["Items"]
def putSecret(name, secret, version, kms_key="alias/credstash",
region="us-east-1", table="credential-store", context=None):
'''
put a secret called `name` into the secret-store,
protected by the key kms_key
'''
if not context:
context = {}
kms = boto3.client('kms', region_name=region)
# generate a a 64 byte key.
# Half will be for data encryption, the other half for HMAC
try:
kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64)
except:
raise KmsError("Could not generate key using KMS key %s" % kms_key)
data_key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
wrapped_key = kms_response['CiphertextBlob']
enc_ctr = Counter.new(128)
encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr)
c_text = encryptor.encrypt(secret)
# compute an HMAC using the hmac key and the ciphertext
hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256)
b64hmac = hmac.hexdigest()
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
data = {}
data['name'] = name
data['version'] = version if version != "" else paddedInt(1)
data['key'] = b64encode(wrapped_key).decode('utf-8')
data['contents'] = b64encode(c_text).decode('utf-8')
data['hmac'] = b64hmac
return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())
def getAllSecrets(version="", region="us-east-1",
table="credential-store", context=None):
'''
fetch and decrypt all secrets
'''
output = {}
secrets = listSecrets(region, table)
for credential in set([x["name"] for x in secrets]):
try:
output[credential] = getSecret(credential,
version,
region,
table,
context)
except:
pass
return output
def getSecret(name, version="", region="us-east-1",
table="credential-store", context=None):
'''
fetch and decrypt the secret called `name`
'''
if not context:
context = {}
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
if version == "":
# do a consistent fetch of the credential with the highest version
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name))
if response["Count"] == 0:
raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name)
material = response["Items"][0]
else:
response = secrets.get_item(Key={"name": name, "version": version})
if "Item" not in response:
raise ItemNotFound("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version))
material = response["Item"]
kms = boto3.client('kms', region_name=region)
# Check the HMAC before we decrypt to verify ciphertext integrity
try:
kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidCiphertextException":
if context is None:
msg = ("Could not decrypt hmac key with KMS. The credential may "
"require that an encryption context be provided to decrypt "
"it.")
else:
msg = ("Could not decrypt hmac key with KMS. The encryption "
"context provided may not match the one used when the "
"credential was stored.")
else:
msg = "Decryption error %s" % e
raise KmsError(msg)
except Exception as e:
raise KmsError("Decryption error %s" % e)
key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
hmac = HMAC(hmac_key, msg=b64decode(material['contents']),
digestmod=SHA256)
if hmac.hexdigest() != material['hmac']:
raise IntegrityError("Computed HMAC on %s does not match stored HMAC"
% name)
dec_ctr = Counter.new(128)
decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr)
plaintext = decryptor.decrypt(b64decode(material['contents'])).decode("utf-8")
return plaintext
def deleteSecrets(name, region="us-east-1", table="credential-store"):
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(FilterExpression=boto3.dynamodb.conditions.Attr("name").eq(name),
ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
for secret in response["Items"]:
print("Deleting %s -- version %s" % (secret["name"], secret["version"]))
secrets.delete_item(Key=secret)
def createDdbTable(region="us-east-1", table="credential-store"):
'''
create the secret store table in DDB in the specified region
'''
dynamodb = boto3.resource("dynamodb", region_name=region)
if table in (t.name for t in dynamodb.tables.all()):
print("Credential Store table already exists")
return
print("Creating table...")
response = dynamodb.create_table(
TableName=table,
KeySchema=[
{
"AttributeName": "name",
"KeyType": "HASH",
},
{
"AttributeName": "version",
"KeyType": "RANGE",
}
],
AttributeDefinitions=[
{
"AttributeName": "name",
"AttributeType": "S",
},
{
"AttributeName": "version",
"AttributeType": "S",
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
}
)
print("Waiting for table to be created...")
client = boto3.client("dynamodb", region_name=region)
client.get_waiter("table_exists").wait(TableName=table)
print("Table has been created. "
"Go read the README about how to create your KMS key")
def main():
parsers = {}
parsers['super'] = argparse.ArgumentParser(
description="A credential/secret storage system")
parsers['super'].add_argument("-r", "--region",
help="the AWS region in which to operate."
"If a region is not specified, credstash "
"will use the value of the "
"AWS_DEFAULT_REGION env variable, "
"or if that is not set, us-east-1")
parsers['super'].add_argument("-t", "--table", default="credential-store",
help="DynamoDB table to use for "
"credential storage")
subparsers = parsers['super'].add_subparsers(help='Try commands like '
'"{name} get -h" or "{name}'
'put --help" to get each'
'sub command\'s options'
.format(name=os.path.basename(
__file__)))
action = 'delete'
parsers[action] = subparsers.add_parser(action,
help='Delete a credential " \
"from the store')
parsers[action].add_argument("credential", type=str,
help="the name of the credential to delete")
parsers[action].set_defaults(action=action)
action = 'get'
parsers[action] = subparsers.add_parser(action, help="Get a credential "
"from the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to get."
"Using the wildcard character '%s' will "
"search for credentials that match the "
"pattern" % WILDCARD_CHAR)
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-n", "--noline", action="store_true",
help="Don't append newline to returned "
"value (useful in scripts or with "
"binary files)")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].set_defaults(action=action)
action = 'getall'
parsers[action] = subparsers.add_parser(action,
help="Get all credentials from "
"the store")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].add_argument("-f", "--format", default="json",
choices=["json", "csv"] +
([] if NO_YAML else ["yaml"]),
help="Output format. json(default) " +
("" if NO_YAML else "yaml ") + "or csv.")
parsers[action].set_defaults(action=action)
action = 'list'
parsers[action] = subparsers.add_parser(action,
help="list credentials and "
"their versions")
parsers[action].set_defaults(action=action)
action = 'put'
parsers[action] = subparsers.add_parser(action,
help="Put a credential into "
"the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to store")
parsers[action].add_argument("value", type=value_or_filename,
help="the value of the credential to store "
"or, if beginning with the \"@\" character, "
"the filename of the file containing "
"the value", default="")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-k", "--key", default="alias/credstash",
help="the KMS key-id of the master key "
"to use. See the README for more "
"information. Defaults to alias/credstash")
parsers[action].add_argument("-v", "--version", default="",
help="Put a specific version of the "
"credential (update the credential; "
"defaults to version `1`).")
parsers[action].add_argument("-a", "--autoversion", action="store_true",
help="Automatically increment the version of "
"the credential to be stored. This option "
"causes the `-v` flag to be ignored. "
"(This option will fail if the currently stored "
"version is not numeric.)")
parsers[action].set_defaults(action=action)
action = 'setup'
parsers[action] = subparsers.add_parser(action,
help='setup the credential store')
parsers[action].set_defaults(action=action)
args = parsers['super'].parse_args()
region = os.getenv(
"AWS_DEFAULT_REGION", DEFAULT_REGION) if not args.region \
else args.region
if "action" in vars(args):
if args.action == "delete":
deleteSecrets(args.credential, region=region, table=args.table)
return
if args.action == "list":
credential_list = listSecrets(region=region, table=args.table)
if credential_list:
# print list of credential names and versions,
# sorted by name and then by version
max_len = max([len(x["name"]) for x in credential_list])
for cred in sorted(credential_list,
key=operator.itemgetter("name", "version")):
print("{0:{1}} -- version {2:>}".format(
cred["name"], max_len, cred["version"]))
else:
return
if args.action == "put":
if args.autoversion:
latestVersion = getHighestVersion(args.credential, region,
args.table)
try:
version = paddedInt(int(latestVersion) + 1)
except ValueError:
printStdErr("Can not autoincrement version. The current "
"version: %s is not an int" % latestVersion)
return
else:
version = args.version
try:
if putSecret(args.credential, args.value, version,
kms_key=args.key, region=region, table=args.table,
context=args.context):
print("{0} has been stored".format(args.credential))
except KmsError as e:
printStdErr(e)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
latestVersion = getHighestVersion(args.credential, region,
args.table)
printStdErr("%s version %s is already in the credential store. "
"Use the -v flag to specify a new version" %
(args.credential, latestVersion))
return
if args.action == "get":
try:
if WILDCARD_CHAR in args.credential:
names = expand_wildcard(args.credential,
[x["name"]
for x
in listSecrets(region=region,
table=args.table)])
print(json.dumps(dict((name,
getSecret(name,
args.version,
region=region,
table=args.table,
context=args.context))
for name in names)))
else:
sys.stdout.write(getSecret(args.credential, args.version,
region=region, table=args.table,
context=args.context))
if not args.noline:
sys.stdout.write("\n")
except ItemNotFound as e:
printStdErr(e)
except KmsError as e:
printStdErr(e)
except IntegrityError as e:
printStdErr(e)
return
if args.action == "getall":
secrets = getAllSecrets(args.version,
region=region,
table=args.table,
context=args.context)
if args.format == "json":
output_func = json.dumps
output_args = {"sort_keys": True,
"indent": 4,
"separators": (',', ': ')}
elif not NO_YAML and args.format == "yaml":
output_func = yaml.dump
output_args = {"default_flow_style": False}
elif args.format == 'csv':
output_func = csv_dump
output_args = {}
print(output_func(secrets, **output_args))
return
if args.action == "setup":
createDdbTable(region=region, table=args.table)
return
else:
parsers['super'].print_help()
if __name__ == '__main__':
main()
| 40.668459
| 107
| 0.524567
|
import argparse
import csv
import json
import operator
import os
import os.path
import sys
import time
import re
import boto3
import botocore.exceptions
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import yaml
NO_YAML = False
except ImportError:
NO_YAML = True
from base64 import b64encode, b64decode
from boto3.dynamodb.conditions import Attr
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Hash.HMAC import HMAC
from Crypto.Util import Counter
DEFAULT_REGION = "us-east-1"
PAD_LEN = 19
WILDCARD_CHAR = "*"
class KmsError(Exception):
def __init__(self, value=""):
self.value = "KMS ERROR: " + value if value is not "" else "KMS ERROR"
def __str__(self):
return self.value
class IntegrityError(Exception):
def __init__(self, value=""):
self.value = "INTEGRITY ERROR: " + value if value is not "" else \
"INTEGRITY ERROR"
def __str__(self):
return self.value
class ItemNotFound(Exception):
pass
class KeyValueToDictionary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace,
self.dest,
dict((x[0], x[1]) for x in values))
def printStdErr(s):
sys.stderr.write(str(s))
sys.stderr.write("\n")
def key_value_pair(string):
output = string.split('=')
if len(output) != 2:
msg = "%r is not the form of \"key=value\"" % string
raise argparse.ArgumentTypeError(msg)
return output
def expand_wildcard(string, secrets):
prog = re.compile('^' + string.replace(WILDCARD_CHAR, '.*') + '$')
output = []
for secret in secrets:
if prog.search(secret) is not None:
output.append(secret)
return output
def value_or_filename(string):
if string == "":
return ""
if string[0] == "@":
filename = string[1:]
try:
with open(os.path.expanduser(filename)) as f:
output = f.read()
except IOError as e:
raise argparse.ArgumentTypeError("Unable to read file %s" %
filename)
else:
output = string
return output
def csv_dump(dictionary):
csvfile = StringIO()
csvwriter = csv.writer(csvfile)
for key in dictionary:
csvwriter.writerow([key, dictionary[key]])
return csvfile.getvalue()
def paddedInt(i):
i_str = str(i)
pad = PAD_LEN - len(i_str)
return (pad * "0") + i_str
def getHighestVersion(name, region="us-east-1", table="credential-store"):
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name),
ProjectionExpression="version")
if response["Count"] == 0:
return 0
return response["Items"][0]["version"]
def listSecrets(region="us-east-1", table="credential-store"):
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
return response["Items"]
def putSecret(name, secret, version, kms_key="alias/credstash",
region="us-east-1", table="credential-store", context=None):
if not context:
context = {}
kms = boto3.client('kms', region_name=region)
try:
kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64)
except:
raise KmsError("Could not generate key using KMS key %s" % kms_key)
data_key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
wrapped_key = kms_response['CiphertextBlob']
enc_ctr = Counter.new(128)
encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr)
c_text = encryptor.encrypt(secret)
hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256)
b64hmac = hmac.hexdigest()
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
data = {}
data['name'] = name
data['version'] = version if version != "" else paddedInt(1)
data['key'] = b64encode(wrapped_key).decode('utf-8')
data['contents'] = b64encode(c_text).decode('utf-8')
data['hmac'] = b64hmac
return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())
def getAllSecrets(version="", region="us-east-1",
table="credential-store", context=None):
output = {}
secrets = listSecrets(region, table)
for credential in set([x["name"] for x in secrets]):
try:
output[credential] = getSecret(credential,
version,
region,
table,
context)
except:
pass
return output
def getSecret(name, version="", region="us-east-1",
table="credential-store", context=None):
if not context:
context = {}
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
if version == "":
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name))
if response["Count"] == 0:
raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name)
material = response["Items"][0]
else:
response = secrets.get_item(Key={"name": name, "version": version})
if "Item" not in response:
raise ItemNotFound("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version))
material = response["Item"]
kms = boto3.client('kms', region_name=region)
try:
kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidCiphertextException":
if context is None:
msg = ("Could not decrypt hmac key with KMS. The credential may "
"require that an encryption context be provided to decrypt "
"it.")
else:
msg = ("Could not decrypt hmac key with KMS. The encryption "
"context provided may not match the one used when the "
"credential was stored.")
else:
msg = "Decryption error %s" % e
raise KmsError(msg)
except Exception as e:
raise KmsError("Decryption error %s" % e)
key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
hmac = HMAC(hmac_key, msg=b64decode(material['contents']),
digestmod=SHA256)
if hmac.hexdigest() != material['hmac']:
raise IntegrityError("Computed HMAC on %s does not match stored HMAC"
% name)
dec_ctr = Counter.new(128)
decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr)
plaintext = decryptor.decrypt(b64decode(material['contents'])).decode("utf-8")
return plaintext
def deleteSecrets(name, region="us-east-1", table="credential-store"):
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(FilterExpression=boto3.dynamodb.conditions.Attr("name").eq(name),
ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
for secret in response["Items"]:
print("Deleting %s -- version %s" % (secret["name"], secret["version"]))
secrets.delete_item(Key=secret)
def createDdbTable(region="us-east-1", table="credential-store"):
dynamodb = boto3.resource("dynamodb", region_name=region)
if table in (t.name for t in dynamodb.tables.all()):
print("Credential Store table already exists")
return
print("Creating table...")
response = dynamodb.create_table(
TableName=table,
KeySchema=[
{
"AttributeName": "name",
"KeyType": "HASH",
},
{
"AttributeName": "version",
"KeyType": "RANGE",
}
],
AttributeDefinitions=[
{
"AttributeName": "name",
"AttributeType": "S",
},
{
"AttributeName": "version",
"AttributeType": "S",
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
}
)
print("Waiting for table to be created...")
client = boto3.client("dynamodb", region_name=region)
client.get_waiter("table_exists").wait(TableName=table)
print("Table has been created. "
"Go read the README about how to create your KMS key")
def main():
parsers = {}
parsers['super'] = argparse.ArgumentParser(
description="A credential/secret storage system")
parsers['super'].add_argument("-r", "--region",
help="the AWS region in which to operate."
"If a region is not specified, credstash "
"will use the value of the "
"AWS_DEFAULT_REGION env variable, "
"or if that is not set, us-east-1")
parsers['super'].add_argument("-t", "--table", default="credential-store",
help="DynamoDB table to use for "
"credential storage")
subparsers = parsers['super'].add_subparsers(help='Try commands like '
'"{name} get -h" or "{name}'
'put --help" to get each'
'sub command\'s options'
.format(name=os.path.basename(
__file__)))
action = 'delete'
parsers[action] = subparsers.add_parser(action,
help='Delete a credential " \
"from the store')
parsers[action].add_argument("credential", type=str,
help="the name of the credential to delete")
parsers[action].set_defaults(action=action)
action = 'get'
parsers[action] = subparsers.add_parser(action, help="Get a credential "
"from the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to get."
"Using the wildcard character '%s' will "
"search for credentials that match the "
"pattern" % WILDCARD_CHAR)
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-n", "--noline", action="store_true",
help="Don't append newline to returned "
"value (useful in scripts or with "
"binary files)")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].set_defaults(action=action)
action = 'getall'
parsers[action] = subparsers.add_parser(action,
help="Get all credentials from "
"the store")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].add_argument("-f", "--format", default="json",
choices=["json", "csv"] +
([] if NO_YAML else ["yaml"]),
help="Output format. json(default) " +
("" if NO_YAML else "yaml ") + "or csv.")
parsers[action].set_defaults(action=action)
action = 'list'
parsers[action] = subparsers.add_parser(action,
help="list credentials and "
"their versions")
parsers[action].set_defaults(action=action)
action = 'put'
parsers[action] = subparsers.add_parser(action,
help="Put a credential into "
"the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to store")
parsers[action].add_argument("value", type=value_or_filename,
help="the value of the credential to store "
"or, if beginning with the \"@\" character, "
"the filename of the file containing "
"the value", default="")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-k", "--key", default="alias/credstash",
help="the KMS key-id of the master key "
"to use. See the README for more "
"information. Defaults to alias/credstash")
parsers[action].add_argument("-v", "--version", default="",
help="Put a specific version of the "
"credential (update the credential; "
"defaults to version `1`).")
parsers[action].add_argument("-a", "--autoversion", action="store_true",
help="Automatically increment the version of "
"the credential to be stored. This option "
"causes the `-v` flag to be ignored. "
"(This option will fail if the currently stored "
"version is not numeric.)")
parsers[action].set_defaults(action=action)
action = 'setup'
parsers[action] = subparsers.add_parser(action,
help='setup the credential store')
parsers[action].set_defaults(action=action)
args = parsers['super'].parse_args()
region = os.getenv(
"AWS_DEFAULT_REGION", DEFAULT_REGION) if not args.region \
else args.region
if "action" in vars(args):
if args.action == "delete":
deleteSecrets(args.credential, region=region, table=args.table)
return
if args.action == "list":
credential_list = listSecrets(region=region, table=args.table)
if credential_list:
max_len = max([len(x["name"]) for x in credential_list])
for cred in sorted(credential_list,
key=operator.itemgetter("name", "version")):
print("{0:{1}} -- version {2:>}".format(
cred["name"], max_len, cred["version"]))
else:
return
if args.action == "put":
if args.autoversion:
latestVersion = getHighestVersion(args.credential, region,
args.table)
try:
version = paddedInt(int(latestVersion) + 1)
except ValueError:
printStdErr("Can not autoincrement version. The current "
"version: %s is not an int" % latestVersion)
return
else:
version = args.version
try:
if putSecret(args.credential, args.value, version,
kms_key=args.key, region=region, table=args.table,
context=args.context):
print("{0} has been stored".format(args.credential))
except KmsError as e:
printStdErr(e)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
latestVersion = getHighestVersion(args.credential, region,
args.table)
printStdErr("%s version %s is already in the credential store. "
"Use the -v flag to specify a new version" %
(args.credential, latestVersion))
return
if args.action == "get":
try:
if WILDCARD_CHAR in args.credential:
names = expand_wildcard(args.credential,
[x["name"]
for x
in listSecrets(region=region,
table=args.table)])
print(json.dumps(dict((name,
getSecret(name,
args.version,
region=region,
table=args.table,
context=args.context))
for name in names)))
else:
sys.stdout.write(getSecret(args.credential, args.version,
region=region, table=args.table,
context=args.context))
if not args.noline:
sys.stdout.write("\n")
except ItemNotFound as e:
printStdErr(e)
except KmsError as e:
printStdErr(e)
except IntegrityError as e:
printStdErr(e)
return
if args.action == "getall":
secrets = getAllSecrets(args.version,
region=region,
table=args.table,
context=args.context)
if args.format == "json":
output_func = json.dumps
output_args = {"sort_keys": True,
"indent": 4,
"separators": (',', ': ')}
elif not NO_YAML and args.format == "yaml":
output_func = yaml.dump
output_args = {"default_flow_style": False}
elif args.format == 'csv':
output_func = csv_dump
output_args = {}
print(output_func(secrets, **output_args))
return
if args.action == "setup":
createDdbTable(region=region, table=args.table)
return
else:
parsers['super'].print_help()
if __name__ == '__main__':
main()
| true
| true
|
7904600ba07e0805f4a8c9ab8409b728dbe04859
| 1,069
|
py
|
Python
|
src/cluster/sort_dataset_by_column/test.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
src/cluster/sort_dataset_by_column/test.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
src/cluster/sort_dataset_by_column/test.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
import argparse
import datasets
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to the directory with input dataset')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.input).shuffle()
for part in dataset:
print()
print('part', part)
xs = []
ys = []
for i, x in enumerate(dataset[part]):
print(x['tse'], len(x['input_ids']))
xs.append(len(x['input_ids']))
ys.append(x['tse'])
if i >= 10000:
break
plt.clf()
plt.cla()
plt.title(f'{part} CDF')
# plt.xlabel('len')
# plt.ylabel('tse / len')
# plt.scatter(xs, ys)
# plt.hist(ys, bins=5000)
ys.sort()
ys = np.array(ys)
plt.plot(ys, np.arange(len(ys)))
plt.savefig(f'{part}.png')
| 27.410256
| 109
| 0.529467
|
import argparse
import datasets
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to the directory with input dataset')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.input).shuffle()
for part in dataset:
print()
print('part', part)
xs = []
ys = []
for i, x in enumerate(dataset[part]):
print(x['tse'], len(x['input_ids']))
xs.append(len(x['input_ids']))
ys.append(x['tse'])
if i >= 10000:
break
plt.clf()
plt.cla()
plt.title(f'{part} CDF')
ys.sort()
ys = np.array(ys)
plt.plot(ys, np.arange(len(ys)))
plt.savefig(f'{part}.png')
| true
| true
|
7904633fa6b03cf05460ce54575952cbcbeed1ec
| 16,245
|
py
|
Python
|
tools/infer/utility.py
|
xiaolao/PaddleOCR
|
21b9bd63646fdca95f63062d94fd62f35cfa61cc
|
[
"Apache-2.0"
] | 1
|
2021-08-12T16:59:40.000Z
|
2021-08-12T16:59:40.000Z
|
tools/infer/utility.py
|
xiaolao/PaddleOCR
|
21b9bd63646fdca95f63062d94fd62f35cfa61cc
|
[
"Apache-2.0"
] | null | null | null |
tools/infer/utility.py
|
xiaolao/PaddleOCR
|
21b9bd63646fdca95f63062d94fd62f35cfa61cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import cv2
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import math
from paddle import inference
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
# params for text detector
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
# DB parmas
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument(
"--rec_char_dict_path",
type=str,
default="./ppocr/utils/ppocr_keys_v1.txt")
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument(
"--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
parser.add_argument("--drop_score", type=float, default=0.5)
# params for e2e
parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
parser.add_argument("--e2e_model_dir", type=str)
parser.add_argument("--e2e_limit_side_len", type=float, default=768)
parser.add_argument("--e2e_limit_type", type=str, default='max')
# PGNet parmas
parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
parser.add_argument(
"--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt")
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
# params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_dir", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--cpu_threads", type=int, default=10)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
parser.add_argument("--use_mp", type=str2bool, default=False)
parser.add_argument("--total_process_num", type=int, default=1)
parser.add_argument("--process_id", type=int, default=0)
return parser.parse_args()
def create_predictor(args, mode, logger):
if mode == "det":
model_dir = args.det_model_dir
elif mode == 'cls':
model_dir = args.cls_model_dir
elif mode == 'rec':
model_dir = args.rec_model_dir
else:
model_dir = args.e2e_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/inference.pdmodel"
params_file_path = model_dir + "/inference.pdiparams"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
if not os.path.exists(params_file_path):
logger.info("not find params file path {}".format(params_file_path))
sys.exit(0)
config = inference.Config(model_file_path, params_file_path)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=inference.PrecisionType.Half
if args.use_fp16 else inference.PrecisionType.Float32,
max_batch_size=args.max_batch_size)
else:
config.disable_gpu()
cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10
config.set_cpu_math_library_num_threads(cpu_threads)
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
# enable memory optim
config.enable_memory_optim()
config.disable_glog_info()
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
config.switch_use_feed_fetch_ops(False)
# create predictor
predictor = inference.create_predictor(config)
input_names = predictor.get_input_names()
for name in input_names:
input_tensor = predictor.get_input_handle(name)
output_names = predictor.get_output_names()
output_tensors = []
for output_name in output_names:
output_tensor = predictor.get_output_handle(output_name)
output_tensors.append(output_tensor)
return predictor, input_tensor, output_tensors
def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs):
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
cv2.putText(
src_im,
str,
org=(int(box[0, 0, 0]), int(box[0, 0, 1])),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(0, 255, 0),
thickness=1)
return src_im
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
"""
resize img and limit the longest side of the image to input_size
"""
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr(image,
boxes,
txts=None,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
"""
Visualize the results of OCR detection and recognition
args:
image(Image|array): RGB image
boxes(list): boxes with shape(N, 4, 2)
txts(list): the texts
scores(list): txxs corresponding scores
drop_score(float): only scores greater than drop_threshold will be visualized
font_path: the path of font which is used to draw text
return(array):
the visualized img
"""
if scores is None:
scores = [1] * len(boxes)
box_num = len(boxes)
for i in range(box_num):
if scores is not None and (scores[i] < drop_score or
math.isnan(scores[i])):
continue
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if txts is not None:
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(
txts,
scores,
img_h=img.shape[0],
img_w=600,
threshold=drop_score,
font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
"""
Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters
"""
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
"""
create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array):
"""
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
# whether add new blank img or not
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import base64
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
return image
if __name__ == '__main__':
test_img = "./doc/test_v2"
predict_txt = "./doc/predict.txt"
f = open(predict_txt, 'r')
data = f.readlines()
img_path, anno = data[0].strip().split('\t')
img_name = os.path.basename(img_path)
img_path = os.path.join(test_img, img_name)
image = Image.open(img_path)
data = json.loads(anno)
boxes, txts, scores = [], [], []
for dic in data:
boxes.append(dic['points'])
txts.append(dic['transcription'])
scores.append(round(dic['scores'], 3))
new_img = draw_ocr(image, boxes, txts, scores)
cv2.imwrite(img_name, new_img)
| 37.004556
| 85
| 0.620499
|
import argparse
import os
import sys
import cv2
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import math
from paddle import inference
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument(
"--rec_char_dict_path",
type=str,
default="./ppocr/utils/ppocr_keys_v1.txt")
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument(
"--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
parser.add_argument("--drop_score", type=float, default=0.5)
parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
parser.add_argument("--e2e_model_dir", type=str)
parser.add_argument("--e2e_limit_side_len", type=float, default=768)
parser.add_argument("--e2e_limit_type", type=str, default='max')
parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
parser.add_argument(
"--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt")
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_dir", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--cpu_threads", type=int, default=10)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
parser.add_argument("--use_mp", type=str2bool, default=False)
parser.add_argument("--total_process_num", type=int, default=1)
parser.add_argument("--process_id", type=int, default=0)
return parser.parse_args()
def create_predictor(args, mode, logger):
if mode == "det":
model_dir = args.det_model_dir
elif mode == 'cls':
model_dir = args.cls_model_dir
elif mode == 'rec':
model_dir = args.rec_model_dir
else:
model_dir = args.e2e_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/inference.pdmodel"
params_file_path = model_dir + "/inference.pdiparams"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
if not os.path.exists(params_file_path):
logger.info("not find params file path {}".format(params_file_path))
sys.exit(0)
config = inference.Config(model_file_path, params_file_path)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=inference.PrecisionType.Half
if args.use_fp16 else inference.PrecisionType.Float32,
max_batch_size=args.max_batch_size)
else:
config.disable_gpu()
cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10
config.set_cpu_math_library_num_threads(cpu_threads)
if args.enable_mkldnn:
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.enable_memory_optim()
config.disable_glog_info()
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
config.switch_use_feed_fetch_ops(False)
predictor = inference.create_predictor(config)
input_names = predictor.get_input_names()
for name in input_names:
input_tensor = predictor.get_input_handle(name)
output_names = predictor.get_output_names()
output_tensors = []
for output_name in output_names:
output_tensor = predictor.get_output_handle(output_name)
output_tensors.append(output_tensor)
return predictor, input_tensor, output_tensors
def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs):
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
cv2.putText(
src_im,
str,
org=(int(box[0, 0, 0]), int(box[0, 0, 1])),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(0, 255, 0),
thickness=1)
return src_im
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr(image,
boxes,
txts=None,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
if scores is None:
scores = [1] * len(boxes)
box_num = len(boxes)
for i in range(box_num):
if scores is not None and (scores[i] < drop_score or
math.isnan(scores[i])):
continue
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if txts is not None:
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(
txts,
scores,
img_h=img.shape[0],
img_w=600,
threshold=drop_score,
font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import base64
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
return image
if __name__ == '__main__':
test_img = "./doc/test_v2"
predict_txt = "./doc/predict.txt"
f = open(predict_txt, 'r')
data = f.readlines()
img_path, anno = data[0].strip().split('\t')
img_name = os.path.basename(img_path)
img_path = os.path.join(test_img, img_name)
image = Image.open(img_path)
data = json.loads(anno)
boxes, txts, scores = [], [], []
for dic in data:
boxes.append(dic['points'])
txts.append(dic['transcription'])
scores.append(round(dic['scores'], 3))
new_img = draw_ocr(image, boxes, txts, scores)
cv2.imwrite(img_name, new_img)
| true
| true
|
790463736501d4403b193052a2d4cff7625baeeb
| 13,560
|
py
|
Python
|
edb/lang/ir/inference/types.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
edb/lang/ir/inference/types.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
edb/lang/ir/inference/types.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2015-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import functools
import typing
from edb.lang.common import ast
from edb.lang.schema import basetypes as s_basetypes
from edb.lang.schema import inheriting as s_inh
from edb.lang.schema import name as s_name
from edb.lang.schema import objects as s_obj
from edb.lang.schema import types as s_types
from edb.lang.schema import utils as s_utils
from edb.lang.edgeql import ast as qlast
from edb.lang.edgeql import errors as ql_errors
from edb.lang.ir import ast as irast
def is_polymorphic_type(t):
if isinstance(t, s_types.Collection):
return any(is_polymorphic_type(st) for st in t.get_subtypes())
else:
return t.name == 'std::any'
def amend_empty_set_type(es: irast.EmptySet, t: s_obj.Object, schema) -> None:
alias = es.path_id.target.name.name
scls_name = s_name.Name(module='__expr__', name=alias)
scls = t.__class__(name=scls_name, bases=[t])
scls.acquire_ancestor_inheritance(schema)
es.path_id = irast.PathId(scls)
es.scls = t
def _infer_common_type(irs: typing.List[irast.Base], schema):
if not irs:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
col_type = None
arg_types = []
empties = []
for i, arg in enumerate(irs):
if isinstance(arg, irast.EmptySet) and arg.scls is None:
empties.append(i)
continue
arg_type = infer_type(arg, schema)
arg_types.append(arg_type)
if isinstance(arg_type, s_types.Collection):
col_type = arg_type
if not arg_types:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
if col_type is not None:
if not all(col_type.issubclass(t) for t in arg_types):
raise ql_errors.EdgeQLError(
'cannot determine common type',
context=irs[0].context)
common_type = col_type
else:
common_type = s_utils.get_class_nearest_common_ancestor(arg_types)
for i in empties:
amend_empty_set_type(irs[i], common_type, schema)
return common_type
@functools.singledispatch
def _infer_type(ir, schema):
return
@_infer_type.register(type(None))
def __infer_none(ir, schema):
# Here for debugging purposes.
raise ValueError('invalid infer_type(None, schema) call')
@_infer_type.register(irast.Statement)
def __infer_statement(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.Set)
def __infer_set(ir, schema):
return ir.scls
@_infer_type.register(irast.FunctionCall)
def __infer_func_call(ir, schema):
rtype = ir.func.returntype
if is_polymorphic_type(rtype):
# Polymorphic function, determine the result type from
# the argument type.
if isinstance(rtype, s_types.Tuple):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = collections.OrderedDict(rtype.element_types)
for sn, st in stypes.items():
if is_polymorphic_type(st):
stypes[sn] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
elif isinstance(rtype, s_types.Collection):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = list(rtype.get_subtypes())
for si, st in enumerate(stypes):
if is_polymorphic_type(st):
stypes[si] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
else:
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
if isinstance(arg_type, s_types.Collection):
stypes = list(arg_type.get_subtypes())
return stypes[-1]
else:
return rtype
@_infer_type.register(irast.Constant)
@_infer_type.register(irast.Parameter)
def __infer_const_or_param(ir, schema):
return ir.type
@_infer_type.register(irast.Coalesce)
def __infer_coalesce(ir, schema):
result = _infer_common_type([ir.left, ir.right], schema)
if result is None:
raise ql_errors.EdgeQLError(
'coalescing operator must have operands of related types',
context=ir.context)
return result
@_infer_type.register(irast.SetOp)
def __infer_setop(ir, schema):
left_type = infer_type(ir.left, schema).material_type()
right_type = infer_type(ir.right, schema).material_type()
# for purposes of type inference UNION and UNION ALL work almost
# the same way
if ir.op == qlast.UNION:
if left_type.issubclass(right_type):
result = left_type
elif right_type.issubclass(left_type):
result = right_type
else:
result = s_inh.create_virtual_parent(
schema, [left_type, right_type])
else:
result = infer_type(ir.left, schema)
# create_virtual_parent will raise if types are incompatible.
s_inh.create_virtual_parent(schema, [left_type, right_type])
return result
@_infer_type.register(irast.DistinctOp)
def __infer_distinctop(ir, schema):
result = infer_type(ir.expr, schema)
return result
def _infer_binop_args(left, right, schema):
if not isinstance(left, irast.EmptySet) or left.scls is not None:
left_type = infer_type(left, schema)
else:
left_type = None
if not isinstance(right, irast.EmptySet) or right.scls is not None:
right_type = infer_type(right, schema)
else:
right_type = None
if left_type is None and right_type is None:
raise ql_errors.EdgeQLError(
'cannot determine the type of an empty set',
context=left.context)
elif left_type is None:
amend_empty_set_type(left, right_type, schema)
left_type = right_type
elif right_type is None:
amend_empty_set_type(right, left_type, schema)
right_type = left_type
return left_type, right_type
@_infer_type.register(irast.BinOp)
def __infer_binop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
if isinstance(ir.op, (ast.ops.ComparisonOperator,
ast.ops.MembershipOperator)):
result = schema.get('std::bool')
else:
result = s_basetypes.TypeRules.get_result(
ir.op, (left_type, right_type), schema)
if result is None:
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
if right_type.implicitly_castable_to(left_type, schema):
right_type = left_type
elif left_type.implicitly_castable_to(right_type, schema):
left_type = right_type
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'binary operator `{ir.op.upper()}` is not defined for types '
f'{left_type.name} and {right_type.name}',
context=ir.left.context)
return result
@_infer_type.register(irast.EquivalenceOp)
def __infer_equivop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.TypeCheckOp)
def __infer_typecheckop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.UnaryOp)
def __infer_unaryop(ir, schema):
result = None
operand_type = infer_type(ir.expr, schema)
if ir.op == ast.ops.NOT:
if operand_type.name == 'std::bool':
result = operand_type
else:
if ir.op not in {ast.ops.UPLUS, ast.ops.UMINUS}:
raise ql_errors.EdgeQLError(
f'unknown unary operator: {ir.op}',
context=ir.context)
result = s_basetypes.TypeRules.get_result(
ir.op, (operand_type,), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'unary operator `{ir.op.upper()}` is not defined '
f'for type {operand_type.name}',
context=ir.context)
return result
@_infer_type.register(irast.IfElseExpr)
def __infer_ifelse(ir, schema):
if_expr_type = infer_type(ir.if_expr, schema)
else_expr_type = infer_type(ir.else_expr, schema)
result = s_utils.get_class_nearest_common_ancestor(
[if_expr_type, else_expr_type])
if result is None:
raise ql_errors.EdgeQLError(
'if/else clauses must be of related types, got: {}/{}'.format(
if_expr_type.name, else_expr_type.name),
context=ir.if_expr.context)
return result
@_infer_type.register(irast.TypeRef)
def __infer_typeref(ir, schema):
if ir.subtypes:
coll = s_types.Collection.get_class(ir.maintype)
result = coll.from_subtypes(
[infer_type(t, schema) for t in ir.subtypes])
else:
result = schema.get(ir.maintype)
return result
@_infer_type.register(irast.TypeCast)
def __infer_typecast(ir, schema):
return infer_type(ir.type, schema)
@_infer_type.register(irast.Stmt)
def __infer_stmt(ir, schema):
return infer_type(ir.result, schema)
@_infer_type.register(irast.ExistPred)
def __infer_exist(ir, schema):
bool_t = schema.get('std::bool')
if isinstance(ir.expr, irast.EmptySet) and ir.expr.scls is None:
amend_empty_set_type(ir.expr, bool_t, schema=schema)
return bool_t
@_infer_type.register(irast.SliceIndirection)
def __infer_slice(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.IndexIndirection)
def __infer_index(ir, schema):
node_type = infer_type(ir.expr, schema)
index_type = infer_type(ir.index, schema)
str_t = schema.get('std::str')
int_t = schema.get('std::int64')
result = None
if node_type.issubclass(str_t):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index string by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = str_t
elif isinstance(node_type, s_types.Array):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index array by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = node_type.element_type
return result
@_infer_type.register(irast.Array)
def __infer_array(ir, schema):
if ir.elements:
element_type = _infer_common_type(ir.elements, schema)
if element_type is None:
raise ql_errors.EdgeQLError('could not determine array type',
context=ir.context)
else:
raise ql_errors.EdgeQLError(
'could not determine type of empty array',
context=ir.context)
return s_types.Array(element_type=element_type)
@_infer_type.register(irast.Tuple)
def __infer_struct(ir, schema):
element_types = {el.name: infer_type(el.val, schema) for el in ir.elements}
return s_types.Tuple(element_types=element_types, named=ir.named)
@_infer_type.register(irast.TupleIndirection)
def __infer_struct_indirection(ir, schema):
struct_type = infer_type(ir.expr, schema)
result = struct_type.element_types.get(ir.name)
if result is None:
raise ql_errors.EdgeQLError('could not determine struct element type',
context=ir.context)
return result
def infer_type(ir, schema):
try:
return ir._inferred_type_
except AttributeError:
pass
result = _infer_type(ir, schema)
if (result is not None and
not isinstance(result, (s_obj.Object, s_obj.ObjectMeta))):
raise ql_errors.EdgeQLError(
f'infer_type({ir!r}) retured {result!r} instead of a Object',
context=ir.context)
if result is None or result.name == 'std::any':
raise ql_errors.EdgeQLError('could not determine expression type',
context=ir.context)
ir._inferred_type_ = result
return result
| 30.609481
| 79
| 0.652212
|
import collections
import functools
import typing
from edb.lang.common import ast
from edb.lang.schema import basetypes as s_basetypes
from edb.lang.schema import inheriting as s_inh
from edb.lang.schema import name as s_name
from edb.lang.schema import objects as s_obj
from edb.lang.schema import types as s_types
from edb.lang.schema import utils as s_utils
from edb.lang.edgeql import ast as qlast
from edb.lang.edgeql import errors as ql_errors
from edb.lang.ir import ast as irast
def is_polymorphic_type(t):
if isinstance(t, s_types.Collection):
return any(is_polymorphic_type(st) for st in t.get_subtypes())
else:
return t.name == 'std::any'
def amend_empty_set_type(es: irast.EmptySet, t: s_obj.Object, schema) -> None:
alias = es.path_id.target.name.name
scls_name = s_name.Name(module='__expr__', name=alias)
scls = t.__class__(name=scls_name, bases=[t])
scls.acquire_ancestor_inheritance(schema)
es.path_id = irast.PathId(scls)
es.scls = t
def _infer_common_type(irs: typing.List[irast.Base], schema):
if not irs:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
col_type = None
arg_types = []
empties = []
for i, arg in enumerate(irs):
if isinstance(arg, irast.EmptySet) and arg.scls is None:
empties.append(i)
continue
arg_type = infer_type(arg, schema)
arg_types.append(arg_type)
if isinstance(arg_type, s_types.Collection):
col_type = arg_type
if not arg_types:
raise ql_errors.EdgeQLError(
'cannot determine common type of an empty set',
context=irs[0].context)
if col_type is not None:
if not all(col_type.issubclass(t) for t in arg_types):
raise ql_errors.EdgeQLError(
'cannot determine common type',
context=irs[0].context)
common_type = col_type
else:
common_type = s_utils.get_class_nearest_common_ancestor(arg_types)
for i in empties:
amend_empty_set_type(irs[i], common_type, schema)
return common_type
@functools.singledispatch
def _infer_type(ir, schema):
return
@_infer_type.register(type(None))
def __infer_none(ir, schema):
raise ValueError('invalid infer_type(None, schema) call')
@_infer_type.register(irast.Statement)
def __infer_statement(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.Set)
def __infer_set(ir, schema):
return ir.scls
@_infer_type.register(irast.FunctionCall)
def __infer_func_call(ir, schema):
rtype = ir.func.returntype
if is_polymorphic_type(rtype):
if isinstance(rtype, s_types.Tuple):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = collections.OrderedDict(rtype.element_types)
for sn, st in stypes.items():
if is_polymorphic_type(st):
stypes[sn] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
elif isinstance(rtype, s_types.Collection):
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
stypes = list(rtype.get_subtypes())
for si, st in enumerate(stypes):
if is_polymorphic_type(st):
stypes[si] = arg_type
break
return rtype.from_subtypes(stypes, rtype.get_typemods())
else:
for i, arg in enumerate(ir.args):
if is_polymorphic_type(ir.func.paramtypes[i]):
arg_type = infer_type(arg, schema)
if isinstance(arg_type, s_types.Collection):
stypes = list(arg_type.get_subtypes())
return stypes[-1]
else:
return rtype
@_infer_type.register(irast.Constant)
@_infer_type.register(irast.Parameter)
def __infer_const_or_param(ir, schema):
return ir.type
@_infer_type.register(irast.Coalesce)
def __infer_coalesce(ir, schema):
result = _infer_common_type([ir.left, ir.right], schema)
if result is None:
raise ql_errors.EdgeQLError(
'coalescing operator must have operands of related types',
context=ir.context)
return result
@_infer_type.register(irast.SetOp)
def __infer_setop(ir, schema):
left_type = infer_type(ir.left, schema).material_type()
right_type = infer_type(ir.right, schema).material_type()
if ir.op == qlast.UNION:
if left_type.issubclass(right_type):
result = left_type
elif right_type.issubclass(left_type):
result = right_type
else:
result = s_inh.create_virtual_parent(
schema, [left_type, right_type])
else:
result = infer_type(ir.left, schema)
s_inh.create_virtual_parent(schema, [left_type, right_type])
return result
@_infer_type.register(irast.DistinctOp)
def __infer_distinctop(ir, schema):
result = infer_type(ir.expr, schema)
return result
def _infer_binop_args(left, right, schema):
if not isinstance(left, irast.EmptySet) or left.scls is not None:
left_type = infer_type(left, schema)
else:
left_type = None
if not isinstance(right, irast.EmptySet) or right.scls is not None:
right_type = infer_type(right, schema)
else:
right_type = None
if left_type is None and right_type is None:
raise ql_errors.EdgeQLError(
'cannot determine the type of an empty set',
context=left.context)
elif left_type is None:
amend_empty_set_type(left, right_type, schema)
left_type = right_type
elif right_type is None:
amend_empty_set_type(right, left_type, schema)
right_type = left_type
return left_type, right_type
@_infer_type.register(irast.BinOp)
def __infer_binop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
if isinstance(ir.op, (ast.ops.ComparisonOperator,
ast.ops.MembershipOperator)):
result = schema.get('std::bool')
else:
result = s_basetypes.TypeRules.get_result(
ir.op, (left_type, right_type), schema)
if result is None:
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
if right_type.implicitly_castable_to(left_type, schema):
right_type = left_type
elif left_type.implicitly_castable_to(right_type, schema):
left_type = right_type
result = s_basetypes.TypeRules.get_result(
(ir.op, 'reversed'), (right_type, left_type), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'binary operator `{ir.op.upper()}` is not defined for types '
f'{left_type.name} and {right_type.name}',
context=ir.left.context)
return result
@_infer_type.register(irast.EquivalenceOp)
def __infer_equivop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.TypeCheckOp)
def __infer_typecheckop(ir, schema):
left_type, right_type = _infer_binop_args(ir.left, ir.right, schema)
return schema.get('std::bool')
@_infer_type.register(irast.UnaryOp)
def __infer_unaryop(ir, schema):
result = None
operand_type = infer_type(ir.expr, schema)
if ir.op == ast.ops.NOT:
if operand_type.name == 'std::bool':
result = operand_type
else:
if ir.op not in {ast.ops.UPLUS, ast.ops.UMINUS}:
raise ql_errors.EdgeQLError(
f'unknown unary operator: {ir.op}',
context=ir.context)
result = s_basetypes.TypeRules.get_result(
ir.op, (operand_type,), schema)
if result is None:
raise ql_errors.EdgeQLError(
f'unary operator `{ir.op.upper()}` is not defined '
f'for type {operand_type.name}',
context=ir.context)
return result
@_infer_type.register(irast.IfElseExpr)
def __infer_ifelse(ir, schema):
if_expr_type = infer_type(ir.if_expr, schema)
else_expr_type = infer_type(ir.else_expr, schema)
result = s_utils.get_class_nearest_common_ancestor(
[if_expr_type, else_expr_type])
if result is None:
raise ql_errors.EdgeQLError(
'if/else clauses must be of related types, got: {}/{}'.format(
if_expr_type.name, else_expr_type.name),
context=ir.if_expr.context)
return result
@_infer_type.register(irast.TypeRef)
def __infer_typeref(ir, schema):
if ir.subtypes:
coll = s_types.Collection.get_class(ir.maintype)
result = coll.from_subtypes(
[infer_type(t, schema) for t in ir.subtypes])
else:
result = schema.get(ir.maintype)
return result
@_infer_type.register(irast.TypeCast)
def __infer_typecast(ir, schema):
return infer_type(ir.type, schema)
@_infer_type.register(irast.Stmt)
def __infer_stmt(ir, schema):
return infer_type(ir.result, schema)
@_infer_type.register(irast.ExistPred)
def __infer_exist(ir, schema):
bool_t = schema.get('std::bool')
if isinstance(ir.expr, irast.EmptySet) and ir.expr.scls is None:
amend_empty_set_type(ir.expr, bool_t, schema=schema)
return bool_t
@_infer_type.register(irast.SliceIndirection)
def __infer_slice(ir, schema):
return infer_type(ir.expr, schema)
@_infer_type.register(irast.IndexIndirection)
def __infer_index(ir, schema):
node_type = infer_type(ir.expr, schema)
index_type = infer_type(ir.index, schema)
str_t = schema.get('std::str')
int_t = schema.get('std::int64')
result = None
if node_type.issubclass(str_t):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index string by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = str_t
elif isinstance(node_type, s_types.Array):
if not index_type.issubclass(int_t):
raise ql_errors.EdgeQLError(
f'cannot index array by {index_type.name}, '
f'{int_t.name} was expected',
context=ir.index.context)
result = node_type.element_type
return result
@_infer_type.register(irast.Array)
def __infer_array(ir, schema):
if ir.elements:
element_type = _infer_common_type(ir.elements, schema)
if element_type is None:
raise ql_errors.EdgeQLError('could not determine array type',
context=ir.context)
else:
raise ql_errors.EdgeQLError(
'could not determine type of empty array',
context=ir.context)
return s_types.Array(element_type=element_type)
@_infer_type.register(irast.Tuple)
def __infer_struct(ir, schema):
element_types = {el.name: infer_type(el.val, schema) for el in ir.elements}
return s_types.Tuple(element_types=element_types, named=ir.named)
@_infer_type.register(irast.TupleIndirection)
def __infer_struct_indirection(ir, schema):
struct_type = infer_type(ir.expr, schema)
result = struct_type.element_types.get(ir.name)
if result is None:
raise ql_errors.EdgeQLError('could not determine struct element type',
context=ir.context)
return result
def infer_type(ir, schema):
try:
return ir._inferred_type_
except AttributeError:
pass
result = _infer_type(ir, schema)
if (result is not None and
not isinstance(result, (s_obj.Object, s_obj.ObjectMeta))):
raise ql_errors.EdgeQLError(
f'infer_type({ir!r}) retured {result!r} instead of a Object',
context=ir.context)
if result is None or result.name == 'std::any':
raise ql_errors.EdgeQLError('could not determine expression type',
context=ir.context)
ir._inferred_type_ = result
return result
| true
| true
|
7904640078814a724b5f91b401860607bf111587
| 3,568
|
py
|
Python
|
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
2021/day20/day20.py
|
tcmitchell/AdventOfCode
|
caaac1aa37c999d4804f9f4154bf7033a06e98af
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import argparse
import logging
from typing import TextIO
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args(args)
return args
def init_logging(debug=False):
msg_format = '%(asctime)s %(levelname)s %(message)s'
date_format = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msg_format, datefmt=date_format, level=level)
class Image:
def __init__(self, pixels: dict[tuple[int, int], str], void_pixel: str):
self.pixels = pixels
self.void_pixel = void_pixel
def __getitem__(self, key: tuple[int, int]) -> str:
try:
return self.pixels[key]
except KeyError:
return self.void_pixel
@staticmethod
def from_grid(grid: list[list[str]]) -> Image:
pixels = Image.grid2pixel(grid)
return Image(pixels, '.')
@staticmethod
def grid2pixel(grid: list[list[str]]) -> dict[tuple[int, int], str]:
image = {}
for y in range(len(grid)):
for x in range(len(grid[0])):
image[(x, y)] = grid[y][x]
return image
@staticmethod
def neighbors(pixel: tuple[int, int]) -> list[tuple[int, int]]:
x = pixel[0]
y = pixel[1]
return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]
def pixel2idx(self, pixel: str) -> int:
bin_rep = pixel.replace('#', '1').replace('.', '0')
return int(bin_rep, base=2)
def enhance_pixel(self, iea: str, pixel: tuple[int, int]) -> str:
surround = [self[n] for n in self.neighbors(pixel)]
idx = self.pixel2idx(''.join(surround))
return iea[idx]
def bounds(self) -> tuple[int, ...]:
x_values = [p[0] for p in self.pixels]
y_values = [p[1] for p in self.pixels]
return min(x_values), min(y_values), max(x_values), max(y_values)
def enhance(self, iea: str) -> Image:
new_pixels = {}
min_x, min_y, max_x, max_y = self.bounds()
for x in range(min_x - 2, max_x + 2):
for y in range(min_y - 2, max_y + 2):
new_pixels[(x, y)] = self.enhance_pixel(iea, (x, y))
void_pixel = iea[self.pixel2idx(self.void_pixel * 9)]
return Image(new_pixels, void_pixel)
def lit_count(self):
return len([v for v in self.pixels.values() if v == '#'])
def load_input(fp: TextIO):
data = fp.read().strip().split('\n\n')
iea = data[0]
assert len(iea) == 512
grid = []
for line in data[1].strip().split('\n'):
grid.append(list(line))
image = Image.from_grid(grid)
return iea, image
def puzzle1(iea: str, image: Image) -> int:
for i in range(2):
image = image.enhance(iea)
return image.lit_count()
def puzzle2(iea, image) -> int:
for i in range(50):
image = image.enhance(iea)
return image.lit_count()
def main(argv=None):
args = parse_args(argv)
# Init logging
init_logging(args.debug)
iea, image = load_input(args.input)
answer = puzzle1(iea, image)
logging.info('Puzzle 1: %d', answer)
answer = puzzle2(iea, image)
logging.info('Puzzle 2: %d', answer)
if __name__ == '__main__':
main()
| 29.00813
| 76
| 0.575673
|
from __future__ import annotations
import argparse
import logging
from typing import TextIO
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args(args)
return args
def init_logging(debug=False):
msg_format = '%(asctime)s %(levelname)s %(message)s'
date_format = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msg_format, datefmt=date_format, level=level)
class Image:
def __init__(self, pixels: dict[tuple[int, int], str], void_pixel: str):
self.pixels = pixels
self.void_pixel = void_pixel
def __getitem__(self, key: tuple[int, int]) -> str:
try:
return self.pixels[key]
except KeyError:
return self.void_pixel
@staticmethod
def from_grid(grid: list[list[str]]) -> Image:
pixels = Image.grid2pixel(grid)
return Image(pixels, '.')
@staticmethod
def grid2pixel(grid: list[list[str]]) -> dict[tuple[int, int], str]:
image = {}
for y in range(len(grid)):
for x in range(len(grid[0])):
image[(x, y)] = grid[y][x]
return image
@staticmethod
def neighbors(pixel: tuple[int, int]) -> list[tuple[int, int]]:
x = pixel[0]
y = pixel[1]
return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]
def pixel2idx(self, pixel: str) -> int:
bin_rep = pixel.replace('#', '1').replace('.', '0')
return int(bin_rep, base=2)
def enhance_pixel(self, iea: str, pixel: tuple[int, int]) -> str:
surround = [self[n] for n in self.neighbors(pixel)]
idx = self.pixel2idx(''.join(surround))
return iea[idx]
def bounds(self) -> tuple[int, ...]:
x_values = [p[0] for p in self.pixels]
y_values = [p[1] for p in self.pixels]
return min(x_values), min(y_values), max(x_values), max(y_values)
def enhance(self, iea: str) -> Image:
new_pixels = {}
min_x, min_y, max_x, max_y = self.bounds()
for x in range(min_x - 2, max_x + 2):
for y in range(min_y - 2, max_y + 2):
new_pixels[(x, y)] = self.enhance_pixel(iea, (x, y))
void_pixel = iea[self.pixel2idx(self.void_pixel * 9)]
return Image(new_pixels, void_pixel)
def lit_count(self):
return len([v for v in self.pixels.values() if v == '#'])
def load_input(fp: TextIO):
data = fp.read().strip().split('\n\n')
iea = data[0]
assert len(iea) == 512
grid = []
for line in data[1].strip().split('\n'):
grid.append(list(line))
image = Image.from_grid(grid)
return iea, image
def puzzle1(iea: str, image: Image) -> int:
for i in range(2):
image = image.enhance(iea)
return image.lit_count()
def puzzle2(iea, image) -> int:
for i in range(50):
image = image.enhance(iea)
return image.lit_count()
def main(argv=None):
args = parse_args(argv)
init_logging(args.debug)
iea, image = load_input(args.input)
answer = puzzle1(iea, image)
logging.info('Puzzle 1: %d', answer)
answer = puzzle2(iea, image)
logging.info('Puzzle 2: %d', answer)
if __name__ == '__main__':
main()
| true
| true
|
790464bacf98a71049b09f7ad26f6dff004f3be9
| 5,314
|
py
|
Python
|
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_cert_manager_acme_v1_challenge_spec_solver_dns01_cloudflare_api_token_secret_ref.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'name': 'str'
}
attribute_map = {
'key': 'key',
'name': 'name'
}
def __init__(self, key=None, name=None, local_vars_configuration=None): # noqa: E501
"""IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self.discriminator = None
if key is not None:
self.key = key
self.name = name
@property
def key(self):
"""Gets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:return: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. # noqa: E501
:param key: The key of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
self._key = key
@property
def name(self):
"""Gets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef.
Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return True
return self.to_dict() != other.to_dict()
| 34.960526
| 169
| 0.641325
|
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef(object):
openapi_types = {
'key': 'str',
'name': 'str'
}
attribute_map = {
'key': 'key',
'name': 'name'
}
def __init__(self, key=None, name=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._name = None
self.discriminator = None
if key is not None:
self.key = key
self.name = name
@property
def key(self):
return self._key
@key.setter
def key(self, key):
self._key = key
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if self.local_vars_configuration.client_side_validation and name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, IoCertManagerAcmeV1ChallengeSpecSolverDns01CloudflareApiTokenSecretRef):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
7904669d97c9b36099795440d324d684fc85b945
| 462
|
py
|
Python
|
analyze_results.py
|
HamidSajjadi/slr_query_formulation
|
5164d7ecd1a798089df284459a451e1e3d1e20e5
|
[
"MIT"
] | null | null | null |
analyze_results.py
|
HamidSajjadi/slr_query_formulation
|
5164d7ecd1a798089df284459a451e1e3d1e20e5
|
[
"MIT"
] | null | null | null |
analyze_results.py
|
HamidSajjadi/slr_query_formulation
|
5164d7ecd1a798089df284459a451e1e3d1e20e5
|
[
"MIT"
] | null | null | null |
import pandas as pd
import plotly.express as px
df = pd.read_csv('data/query_result.csv')
max_df = df.groupby(by='topic_id').max().reset_index()
df = df[df['topic_id'].isin(max_df[max_df['recall'] > 0]['topic_id'].to_list())]
for t in df['topic_id'].unique().tolist():
temp_df = df[df['topic_id'] == t]
fig = px.box(df, x="topic_id", y="recall")
fig.update_traces(quartilemethod="exclusive") # or "inclusive", or "linear" by default
fig.show()
| 38.5
| 91
| 0.668831
|
import pandas as pd
import plotly.express as px
df = pd.read_csv('data/query_result.csv')
max_df = df.groupby(by='topic_id').max().reset_index()
df = df[df['topic_id'].isin(max_df[max_df['recall'] > 0]['topic_id'].to_list())]
for t in df['topic_id'].unique().tolist():
temp_df = df[df['topic_id'] == t]
fig = px.box(df, x="topic_id", y="recall")
fig.update_traces(quartilemethod="exclusive")
fig.show()
| true
| true
|
790466d394c7d2b5d9c6dba66e00e134d898c559
| 2,077
|
py
|
Python
|
internal/twirptest/service_method_same_name/service_method_same_name_pb2.py
|
guide-century/twirp
|
4728e5128a7c4d5f21b431e3e53ff2b3f755c124
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-05-30T21:55:33.000Z
|
2019-05-30T21:55:33.000Z
|
internal/twirptest/service_method_same_name/service_method_same_name_pb2.py
|
guide-century/twirp
|
4728e5128a7c4d5f21b431e3e53ff2b3f755c124
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-24T10:00:04.000Z
|
2021-05-24T10:00:04.000Z
|
internal/twirptest/service_method_same_name/service_method_same_name_pb2.py
|
h2oai/twirp
|
f779546e4b627ff0ef33a15caa4668a415124b9f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: service_method_same_name.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='service_method_same_name.proto',
package='',
syntax='proto3',
serialized_options=_b('Z\030service_method_same_name'),
serialized_pb=_b('\n\x1eservice_method_same_name.proto\"\x05\n\x03Msg2\x1c\n\x04\x45\x63ho\x12\x14\n\x04\x45\x63ho\x12\x04.Msg\x1a\x04.Msg\"\x00\x42\x1aZ\x18service_method_same_nameb\x06proto3')
)
_MSG = _descriptor.Descriptor(
name='Msg',
full_name='Msg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=39,
)
DESCRIPTOR.message_types_by_name['Msg'] = _MSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Msg = _reflection.GeneratedProtocolMessageType('Msg', (_message.Message,), dict(
DESCRIPTOR = _MSG,
__module__ = 'service_method_same_name_pb2'
# @@protoc_insertion_point(class_scope:Msg)
))
_sym_db.RegisterMessage(Msg)
DESCRIPTOR._options = None
_ECHO = _descriptor.ServiceDescriptor(
name='Echo',
full_name='Echo',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=41,
serialized_end=69,
methods=[
_descriptor.MethodDescriptor(
name='Echo',
full_name='Echo.Echo',
index=0,
containing_service=None,
input_type=_MSG,
output_type=_MSG,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ECHO)
DESCRIPTOR.services_by_name['Echo'] = _ECHO
# @@protoc_insertion_point(module_scope)
| 23.602273
| 196
| 0.755416
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='service_method_same_name.proto',
package='',
syntax='proto3',
serialized_options=_b('Z\030service_method_same_name'),
serialized_pb=_b('\n\x1eservice_method_same_name.proto\"\x05\n\x03Msg2\x1c\n\x04\x45\x63ho\x12\x14\n\x04\x45\x63ho\x12\x04.Msg\x1a\x04.Msg\"\x00\x42\x1aZ\x18service_method_same_nameb\x06proto3')
)
_MSG = _descriptor.Descriptor(
name='Msg',
full_name='Msg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=39,
)
DESCRIPTOR.message_types_by_name['Msg'] = _MSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Msg = _reflection.GeneratedProtocolMessageType('Msg', (_message.Message,), dict(
DESCRIPTOR = _MSG,
__module__ = 'service_method_same_name_pb2'
))
_sym_db.RegisterMessage(Msg)
DESCRIPTOR._options = None
_ECHO = _descriptor.ServiceDescriptor(
name='Echo',
full_name='Echo',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=41,
serialized_end=69,
methods=[
_descriptor.MethodDescriptor(
name='Echo',
full_name='Echo.Echo',
index=0,
containing_service=None,
input_type=_MSG,
output_type=_MSG,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ECHO)
DESCRIPTOR.services_by_name['Echo'] = _ECHO
| true
| true
|
790467499ef4dd809fbfbac7e1f50238519438c1
| 648
|
py
|
Python
|
checkout/migrations/0009_auto_20201105_1324.py
|
GBrachetta/guillermo
|
39e3ae082d2844482adf46143598edd2339447af
|
[
"PostgreSQL"
] | null | null | null |
checkout/migrations/0009_auto_20201105_1324.py
|
GBrachetta/guillermo
|
39e3ae082d2844482adf46143598edd2339447af
|
[
"PostgreSQL"
] | 8
|
2021-06-09T18:23:43.000Z
|
2022-03-12T00:56:01.000Z
|
checkout/migrations/0009_auto_20201105_1324.py
|
GBrachetta/guillermo
|
39e3ae082d2844482adf46143598edd2339447af
|
[
"PostgreSQL"
] | 1
|
2020-11-10T21:33:31.000Z
|
2020-11-10T21:33:31.000Z
|
# Generated by Django 3.1.2 on 2020-11-05 12:24
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0008_order_user_profile'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['date']},
),
migrations.AlterField(
model_name='orderlineitem',
name='quantity',
field=models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(99), django.core.validators.MinValueValidator(1)]),
),
]
| 27
| 153
| 0.62963
|
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0008_order_user_profile'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['date']},
),
migrations.AlterField(
model_name='orderlineitem',
name='quantity',
field=models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(99), django.core.validators.MinValueValidator(1)]),
),
]
| true
| true
|
790467607d660c81d2051ed7b65b4ec4fa4657ca
| 6,390
|
py
|
Python
|
src/blockchain/block.py
|
thewh1teagle/yoyocoin
|
588ec462e29ed9881fb3e0e413ff0636df06ebe9
|
[
"MIT"
] | null | null | null |
src/blockchain/block.py
|
thewh1teagle/yoyocoin
|
588ec462e29ed9881fb3e0e413ff0636df06ebe9
|
[
"MIT"
] | null | null | null |
src/blockchain/block.py
|
thewh1teagle/yoyocoin
|
588ec462e29ed9881fb3e0e413ff0636df06ebe9
|
[
"MIT"
] | null | null | null |
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
"""
Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
"""
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
"""
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
"""
block_dict = self._raw_data()
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
"""
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
"""
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
"""
Check if block signature is valid
:return: bool
"""
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
"""
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
"""
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
def validate(self, blockchain_state, is_test_net=False):
"""
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
"""
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
| 35.10989
| 103
| 0.637715
|
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
block_dict = self._raw_data()
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
def validate(self, blockchain_state, is_test_net=False):
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
| true
| true
|
7904677ccbc32aa8ea206966e99aefa4e699566f
| 403
|
py
|
Python
|
enCount/__init__.py
|
mstrazar/enCount
|
dcff565ce96afe37aa8a41995637d00cce02360d
|
[
"MIT"
] | null | null | null |
enCount/__init__.py
|
mstrazar/enCount
|
dcff565ce96afe37aa8a41995637d00cce02360d
|
[
"MIT"
] | null | null | null |
enCount/__init__.py
|
mstrazar/enCount
|
dcff565ce96afe37aa8a41995637d00cce02360d
|
[
"MIT"
] | null | null | null |
"""
enCount tasks and analyses.
enCount is a Python library for processing RNA-Seq data from ENCODE.
"""
# from ._version import __version__
from . import config # load from myconfig.py if it exists
from . import db
from . import queues
from . import encode
from . import externals
from . import gtfs
from . import fastqs
from . import experiments
from . import mappings
from . import integration
| 19.190476
| 68
| 0.756824
|
from . import config
from . import db
from . import queues
from . import encode
from . import externals
from . import gtfs
from . import fastqs
from . import experiments
from . import mappings
from . import integration
| true
| true
|
7904679cdbd5df8dee25365f35073d4646a7898a
| 3,401
|
py
|
Python
|
inference/tf_inference.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
inference/tf_inference.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
inference/tf_inference.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 by LatentAI Inc.
# All rights reserved.
# This file is part of the LEIP(tm) SDK,
# and is released under the "LatentAI Commercial Software License".
# Please see the LICENSE file that should have been included as part of
# this package.
#
# @file tf_inference.py
#
# @author Videet Parekh
#
# @date Wed 16 Dec 20
#
# @brief TF inference engine designed with the same interface as leip_inference for parallel comparison
# from time import time
# import tensorflow as tf
import glob
import os
import logging
import utils.common_utils as utils
import argparse
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# tf.debugging.set_log_device_placement(True)
class TFModel():
def __init__(self, base_path, context, config):
self.context = context
self.config = config
self.load(base_path)
def load(self, base):
h5_path = glob.glob(os.path.join(base, '*.h5'))[0]
self.model = utils.load_keras_model(h5_path)
def infer(self, data):
# Here's how you may measure runtime speed
# start = time()
output_data = self.model.predict(data)
# end = time()
pred = {'label': output_data}
return pred
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default=None, required=True, help='Path to model directory.')
parser.add_argument('--test_path', type=str, default=None, required=True, help='Path to output test file')
parser.add_argument('--class_names', type=str, default=None, required=True, help='Path to class names list.')
parser.add_argument('--data_type', type=str, default="float32", required=False, help='Data Type.')
parser.add_argument('--preprocessor', type=str, default="none", required=False, help='Preprocessor function')
parser.add_argument('--inference_context', type=str, default="none", required=False, help='cpu/gpu/cuda.')
parser.add_argument('--loglevel', type=str, default="WARNING", required=False, help='Logging verbosity.')
args = parser.parse_args()
base = args.input_path
test_path = args.test_path
class_names = args.class_names
data_type = args.data_type
preprocessor = args.preprocessor
context = args.inference_context
loglevel = args.loglevel
# Set Logger Parameters
logging.basicConfig(level=utils.get_numeric_loglevel(loglevel))
# Get class_names for model
with open(class_names) as f:
synset = f.readlines()
config = utils.load_json(os.path.join(base, 'model_schema.json'))
config['input_shapes'] = utils.parse_input_shapes(config['input_shapes'])
# Load dataset and collect preprocessor function
data_index = utils.load_index(test_path)
preprocessor = utils.collect_preprocessor(preprocessor)
# Create model object for inference
model = TFModel(base, context, config)
acc = 0
# Loop over data and call infer()
for data in data_index:
# Load and preprocess image
img = utils.collect_image(data[0], data_type, preprocessor, config['input_shapes'])
# Infer
pred = model.infer(img)
pred_label = np.argmax(pred['label'])
acc += 1 if pred_label == data[1] else 0
print(acc*100/len(data_index))
| 33.673267
| 113
| 0.69362
|
import glob
import os
import logging
import utils.common_utils as utils
import argparse
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TFModel():
def __init__(self, base_path, context, config):
self.context = context
self.config = config
self.load(base_path)
def load(self, base):
h5_path = glob.glob(os.path.join(base, '*.h5'))[0]
self.model = utils.load_keras_model(h5_path)
def infer(self, data):
# start = time()
output_data = self.model.predict(data)
# end = time()
pred = {'label': output_data}
return pred
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default=None, required=True, help='Path to model directory.')
parser.add_argument('--test_path', type=str, default=None, required=True, help='Path to output test file')
parser.add_argument('--class_names', type=str, default=None, required=True, help='Path to class names list.')
parser.add_argument('--data_type', type=str, default="float32", required=False, help='Data Type.')
parser.add_argument('--preprocessor', type=str, default="none", required=False, help='Preprocessor function')
parser.add_argument('--inference_context', type=str, default="none", required=False, help='cpu/gpu/cuda.')
parser.add_argument('--loglevel', type=str, default="WARNING", required=False, help='Logging verbosity.')
args = parser.parse_args()
base = args.input_path
test_path = args.test_path
class_names = args.class_names
data_type = args.data_type
preprocessor = args.preprocessor
context = args.inference_context
loglevel = args.loglevel
# Set Logger Parameters
logging.basicConfig(level=utils.get_numeric_loglevel(loglevel))
# Get class_names for model
with open(class_names) as f:
synset = f.readlines()
config = utils.load_json(os.path.join(base, 'model_schema.json'))
config['input_shapes'] = utils.parse_input_shapes(config['input_shapes'])
# Load dataset and collect preprocessor function
data_index = utils.load_index(test_path)
preprocessor = utils.collect_preprocessor(preprocessor)
# Create model object for inference
model = TFModel(base, context, config)
acc = 0
# Loop over data and call infer()
for data in data_index:
# Load and preprocess image
img = utils.collect_image(data[0], data_type, preprocessor, config['input_shapes'])
# Infer
pred = model.infer(img)
pred_label = np.argmax(pred['label'])
acc += 1 if pred_label == data[1] else 0
print(acc*100/len(data_index))
| true
| true
|
79046849a3b562767b04a1500aec39c3e349f734
| 3,386
|
py
|
Python
|
src/cldfviz/colormap.py
|
cldf/cldfviz
|
c222a735a161b61b755584f62eb1ba1c64f797c0
|
[
"Apache-2.0"
] | 2
|
2021-09-09T19:53:12.000Z
|
2022-01-15T23:25:36.000Z
|
src/cldfviz/colormap.py
|
cldf/cldfviz
|
c222a735a161b61b755584f62eb1ba1c64f797c0
|
[
"Apache-2.0"
] | 20
|
2021-08-12T07:50:40.000Z
|
2022-03-31T12:56:20.000Z
|
src/cldfviz/colormap.py
|
cldf/cldfviz
|
c222a735a161b61b755584f62eb1ba1c64f797c0
|
[
"Apache-2.0"
] | 1
|
2022-02-08T10:12:05.000Z
|
2022-02-08T10:12:05.000Z
|
import json
import typing
import collections
from matplotlib import cm
from matplotlib.colors import Normalize, to_hex, CSS4_COLORS, BASE_COLORS
import matplotlib.pyplot as plt
from clldutils.color import qualitative_colors, sequential_colors, rgb_as_hex
from cldfviz.multiparameter import CONTINUOUS, CATEGORICAL, Parameter
__all__ = ['COLORMAPS', 'hextriplet', 'Colormap']
COLORMAPS = {
CATEGORICAL: ['boynton', 'tol', 'base', 'seq'],
CONTINUOUS: [cm for cm in plt.colormaps() if not cm.endswith('_r')],
}
def hextriplet(s):
"""
Wrap clldutils.color.rgb_as_hex to provide unified error handling.
"""
if s in BASE_COLORS:
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if s in CSS4_COLORS:
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e)))
class Colormap:
def __init__(self, parameter: Parameter, name: typing.Optional[str] = None, novalue=None):
domain = parameter.domain
self.explicit_cm = None
if name and name.startswith('{'):
self.explicit_cm = collections.OrderedDict()
raw = json.loads(name, object_pairs_hook=collections.OrderedDict)
if novalue:
raw.setdefault('None', novalue)
label_to_code = {v: k for k, v in parameter.domain.items()}
for v, c in raw.items():
if (v not in parameter.value_to_code) and v not in label_to_code:
raise ValueError('Colormap value "{}" not in domain {}'.format(
v, list(parameter.value_to_code.keys())))
v = parameter.value_to_code.get(v, label_to_code.get(v))
self.explicit_cm[v] = hextriplet(c)
vals = list(parameter.value_to_code)
if len(vals) > len(self.explicit_cm):
raise ValueError('Colormap {} does not cover all values {}!'.format(
dict(raw), vals))
name = None
# reorder the domain of the parameter (and prune it to valid values):
parameter.domain = collections.OrderedDict(
(c, l) for c, l in sorted(
[i for i in parameter.domain.items() if i[0] in self.explicit_cm],
key=lambda i: list(self.explicit_cm.keys()).index(i[0]))
)
self.novalue = hextriplet(novalue) if novalue else None
self._cm = getattr(cm, name or 'yyy', cm.jet)
if isinstance(domain, tuple):
assert not self.explicit_cm
# Initialize matplotlib colormap and normalizer:
norm = Normalize(domain[0], domain[1])
self.cm = lambda v: to_hex(self._cm(norm(float(v))))
else:
if self.explicit_cm:
self.cm = lambda v: self.explicit_cm[v]
else:
if name == 'seq':
colors = sequential_colors(len(domain))
else:
colors = qualitative_colors(len(domain), set=name)
self.cm = lambda v: dict(zip(domain, colors))[v]
def scalar_mappable(self):
return cm.ScalarMappable(norm=None, cmap=self._cm)
def __call__(self, value):
if value is None:
return self.novalue
return self.cm(value)
| 39.835294
| 94
| 0.599232
|
import json
import typing
import collections
from matplotlib import cm
from matplotlib.colors import Normalize, to_hex, CSS4_COLORS, BASE_COLORS
import matplotlib.pyplot as plt
from clldutils.color import qualitative_colors, sequential_colors, rgb_as_hex
from cldfviz.multiparameter import CONTINUOUS, CATEGORICAL, Parameter
__all__ = ['COLORMAPS', 'hextriplet', 'Colormap']
COLORMAPS = {
CATEGORICAL: ['boynton', 'tol', 'base', 'seq'],
CONTINUOUS: [cm for cm in plt.colormaps() if not cm.endswith('_r')],
}
def hextriplet(s):
if s in BASE_COLORS:
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if s in CSS4_COLORS:
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e)))
class Colormap:
def __init__(self, parameter: Parameter, name: typing.Optional[str] = None, novalue=None):
domain = parameter.domain
self.explicit_cm = None
if name and name.startswith('{'):
self.explicit_cm = collections.OrderedDict()
raw = json.loads(name, object_pairs_hook=collections.OrderedDict)
if novalue:
raw.setdefault('None', novalue)
label_to_code = {v: k for k, v in parameter.domain.items()}
for v, c in raw.items():
if (v not in parameter.value_to_code) and v not in label_to_code:
raise ValueError('Colormap value "{}" not in domain {}'.format(
v, list(parameter.value_to_code.keys())))
v = parameter.value_to_code.get(v, label_to_code.get(v))
self.explicit_cm[v] = hextriplet(c)
vals = list(parameter.value_to_code)
if len(vals) > len(self.explicit_cm):
raise ValueError('Colormap {} does not cover all values {}!'.format(
dict(raw), vals))
name = None
parameter.domain = collections.OrderedDict(
(c, l) for c, l in sorted(
[i for i in parameter.domain.items() if i[0] in self.explicit_cm],
key=lambda i: list(self.explicit_cm.keys()).index(i[0]))
)
self.novalue = hextriplet(novalue) if novalue else None
self._cm = getattr(cm, name or 'yyy', cm.jet)
if isinstance(domain, tuple):
assert not self.explicit_cm
norm = Normalize(domain[0], domain[1])
self.cm = lambda v: to_hex(self._cm(norm(float(v))))
else:
if self.explicit_cm:
self.cm = lambda v: self.explicit_cm[v]
else:
if name == 'seq':
colors = sequential_colors(len(domain))
else:
colors = qualitative_colors(len(domain), set=name)
self.cm = lambda v: dict(zip(domain, colors))[v]
def scalar_mappable(self):
return cm.ScalarMappable(norm=None, cmap=self._cm)
def __call__(self, value):
if value is None:
return self.novalue
return self.cm(value)
| true
| true
|
7904696be5f1977c8538edd6b795c265ad816b85
| 18,288
|
py
|
Python
|
src/twisted/web/test/test_distrib.py
|
mathieui/twisted
|
35546d2b50742a32edba54719ce3e752dc50dd2a
|
[
"MIT",
"Unlicense"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/web/test/test_distrib.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/web/test/test_distrib.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| 34.636364
| 83
| 0.61308
|
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
return self._404Test('carol')
def test_getUserWithoutResource(self):
return self._404Test('alice')
def test_getPublicHTMLChild(self):
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# doesn't currently check for that. It probably should someday, and
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| true
| true
|
79046ad8537cf3f46e1a1b3bd28c929bb8644a08
| 2,387
|
py
|
Python
|
project/snowapp/snow.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | 1
|
2020-10-23T14:29:24.000Z
|
2020-10-23T14:29:24.000Z
|
project/snowapp/snow.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
project/snowapp/snow.py
|
FLY-CODE77/opencv
|
5644e6c1ef43d81efb54ccde6c06f1adf000fb96
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import cv2
def overlay(img, glasses, pos):
sx = pos[0]
ex = pos[0] + glasses.shape[1]
sy = pos[1]
ey = pos[1] + glasses.shape[0]
if sx < 0 or sy < 0 or ex > img.shape[1] or ey > img.shape[0]:
return
img1 = img[sy:ey, sx:ex]
img2 = glasses[:, :, 0:3]
alpha = 1. - (glasses[:, :, 3] / 255.)
img1[..., 0] = (img1[..., 0] * alpha + img2[..., 0] * (1. - alpha)).astype(np.uint8)
img1[..., 1] = (img1[..., 1] * alpha + img2[..., 1] * (1. - alpha)).astype(np.uint8)
img1[..., 2] = (img1[..., 2] * alpha + img2[..., 2] * (1. - alpha)).astype(np.uint8)
# cam open
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('cam not opened')
sys.exit()
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi', fourcc, 30 , (w,h))
# XML file load
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
eye_classifier = cv2.CascadeClassifier('haarcascade_eye.xml')
if face_classifier.empty() or eye_classifier.empty():
print('xml load error')
sys.exit()
glasses = cv2.imread('glasses.png', cv2.IMREAD_UNCHANGED)
if glasses is None:
print('png file load error')
sys.exit()
ew, eh = glasses.shape[:2]
ex1, ey1 = 240, 300
ex2, ey2 = 660, 300
# Video process
while True:
ret, frame = cap.read()
if not ret:
break
faces = face_classifier.detectMultiScale(frame ,scaleFactor=1.2, minSize=(100,100), maxSize=(400,400))
for (x, y, w, h) in faces:
faceROI = frame[y: y+h//2, x: x+w]
eyes = eye_classifier.detectMultiScale(faceROI)
if len(eyes) != 2:
continue
x1 = x + eyes[0][0] + (eyes[0][2] // 2)
y1 = y + eyes[0][1] + (eyes[0][3] // 2)
x2 = x + eyes[1][0] + (eyes[1][2] // 2)
y2 = y + eyes[1][1] + (eyes[1][3] // 2)
if x1 > x2:
x1, y1, x2, y2 = x2, y2, x1, y1
fx = (x2 - x1) / (ex2 - ex1)
glasses2 = cv2.resize(glasses, (0, 0), fx=fx, fy=fx, interpolation=cv2.INTER_AREA)
pos = (x1 - int(ex1 * fx), y1 - int(ey1 * fx))
overlay(frame, glasses2, pos)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| 25.666667
| 106
| 0.566401
|
import sys
import numpy as np
import cv2
def overlay(img, glasses, pos):
sx = pos[0]
ex = pos[0] + glasses.shape[1]
sy = pos[1]
ey = pos[1] + glasses.shape[0]
if sx < 0 or sy < 0 or ex > img.shape[1] or ey > img.shape[0]:
return
img1 = img[sy:ey, sx:ex]
img2 = glasses[:, :, 0:3]
alpha = 1. - (glasses[:, :, 3] / 255.)
img1[..., 0] = (img1[..., 0] * alpha + img2[..., 0] * (1. - alpha)).astype(np.uint8)
img1[..., 1] = (img1[..., 1] * alpha + img2[..., 1] * (1. - alpha)).astype(np.uint8)
img1[..., 2] = (img1[..., 2] * alpha + img2[..., 2] * (1. - alpha)).astype(np.uint8)
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('cam not opened')
sys.exit()
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi', fourcc, 30 , (w,h))
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
eye_classifier = cv2.CascadeClassifier('haarcascade_eye.xml')
if face_classifier.empty() or eye_classifier.empty():
print('xml load error')
sys.exit()
glasses = cv2.imread('glasses.png', cv2.IMREAD_UNCHANGED)
if glasses is None:
print('png file load error')
sys.exit()
ew, eh = glasses.shape[:2]
ex1, ey1 = 240, 300
ex2, ey2 = 660, 300
while True:
ret, frame = cap.read()
if not ret:
break
faces = face_classifier.detectMultiScale(frame ,scaleFactor=1.2, minSize=(100,100), maxSize=(400,400))
for (x, y, w, h) in faces:
faceROI = frame[y: y+h//2, x: x+w]
eyes = eye_classifier.detectMultiScale(faceROI)
if len(eyes) != 2:
continue
x1 = x + eyes[0][0] + (eyes[0][2] // 2)
y1 = y + eyes[0][1] + (eyes[0][3] // 2)
x2 = x + eyes[1][0] + (eyes[1][2] // 2)
y2 = y + eyes[1][1] + (eyes[1][3] // 2)
if x1 > x2:
x1, y1, x2, y2 = x2, y2, x1, y1
fx = (x2 - x1) / (ex2 - ex1)
glasses2 = cv2.resize(glasses, (0, 0), fx=fx, fy=fx, interpolation=cv2.INTER_AREA)
pos = (x1 - int(ex1 * fx), y1 - int(ey1 * fx))
overlay(frame, glasses2, pos)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| true
| true
|
79046ae99b3e80ed5be1615069f7dbeda67658f6
| 237
|
py
|
Python
|
src-server/app/forms/pages/about.py
|
mjmcconnell/sra
|
ff1c2563dead2fe4d81cda3d431482defd0f6a62
|
[
"Apache-2.0"
] | null | null | null |
src-server/app/forms/pages/about.py
|
mjmcconnell/sra
|
ff1c2563dead2fe4d81cda3d431482defd0f6a62
|
[
"Apache-2.0"
] | null | null | null |
src-server/app/forms/pages/about.py
|
mjmcconnell/sra
|
ff1c2563dead2fe4d81cda3d431482defd0f6a62
|
[
"Apache-2.0"
] | null | null | null |
"""Form definitions, allow easy validation of input and rendering of forms
"""
# future imports
from __future__ import absolute_import
# local imports
from app.forms.pages.base import PageForm
class AboutPageForm(PageForm):
pass
| 18.230769
| 74
| 0.780591
|
from __future__ import absolute_import
from app.forms.pages.base import PageForm
class AboutPageForm(PageForm):
pass
| true
| true
|
79046b7a3f5b56818ff01c6981882fca24aaa637
| 1,555
|
py
|
Python
|
component/tile/param_tile.py
|
12rambau/weplan
|
16833d5775b4434a7804f7ee6ae419ee79b7c6cf
|
[
"MIT"
] | null | null | null |
component/tile/param_tile.py
|
12rambau/weplan
|
16833d5775b4434a7804f7ee6ae419ee79b7c6cf
|
[
"MIT"
] | null | null | null |
component/tile/param_tile.py
|
12rambau/weplan
|
16833d5775b4434a7804f7ee6ae419ee79b7c6cf
|
[
"MIT"
] | null | null | null |
from sepal_ui import sepalwidgets as sw
from ipywidgets import dlink
from component import parameter as cp
class ParamTile(sw.Card):
def __init__(self, model):
# read the model
self.model = model
# add the base widgets
self.close = sw.Icon(children=["mdi-close"], small=True)
self.title = sw.CardTitle(
class_="pa-0 ma-0", children=[sw.Spacer(), self.close]
)
# create the widgets
self.w_target = sw.Select(
small=True,
items=[{"text": f"{i+1}0%", "value": i + 1} for i in range(cp.nb_target)],
v_model=model.target,
label="target",
dense=True,
)
self.w_weight = sw.Select(
small=True,
items=[i + 1 for i in range(cp.nb_weight)],
v_model=model.weight,
label="weight",
dense=True,
)
# link the widgets to the model
self.model.bind(self.w_target, "target").bind(self.w_weight, "weight")
# create the object
super().__init__(
max_width="500px",
class_="pa-1",
children=[self.title, self.w_target, self.w_weight],
viz=False,
disabled=False,
)
# add javascript events
self.close.on_event("click", lambda *args: self.hide())
dlink((self, "disabled"), (self, "loading"))
def reset(self):
self.w_target.v_model = None
self.w_weight.v_model = None
self.hide()
return
| 26.355932
| 86
| 0.540836
|
from sepal_ui import sepalwidgets as sw
from ipywidgets import dlink
from component import parameter as cp
class ParamTile(sw.Card):
def __init__(self, model):
self.model = model
self.close = sw.Icon(children=["mdi-close"], small=True)
self.title = sw.CardTitle(
class_="pa-0 ma-0", children=[sw.Spacer(), self.close]
)
self.w_target = sw.Select(
small=True,
items=[{"text": f"{i+1}0%", "value": i + 1} for i in range(cp.nb_target)],
v_model=model.target,
label="target",
dense=True,
)
self.w_weight = sw.Select(
small=True,
items=[i + 1 for i in range(cp.nb_weight)],
v_model=model.weight,
label="weight",
dense=True,
)
self.model.bind(self.w_target, "target").bind(self.w_weight, "weight")
super().__init__(
max_width="500px",
class_="pa-1",
children=[self.title, self.w_target, self.w_weight],
viz=False,
disabled=False,
)
self.close.on_event("click", lambda *args: self.hide())
dlink((self, "disabled"), (self, "loading"))
def reset(self):
self.w_target.v_model = None
self.w_weight.v_model = None
self.hide()
return
| true
| true
|
79046b8615e3e038faee049cd39f833066f66a55
| 52,814
|
py
|
Python
|
Lib/test/test_compile.py
|
sarvex/cpython
|
81d968b7c30d5b41f3f28b297b7ee5345d569509
|
[
"0BSD"
] | 5
|
2021-12-03T23:11:53.000Z
|
2022-01-08T21:02:50.000Z
|
Lib/test/test_compile.py
|
Seanpm2001-ASP-DOT-NET-lang/cpython
|
47cca0492b3c379823d4bdb600be56a633e5bb88
|
[
"0BSD"
] | 6
|
2021-09-12T08:15:12.000Z
|
2022-03-05T09:26:40.000Z
|
Lib/test/test_compile.py
|
Seanpm2001-ASP-DOT-NET-lang/cpython
|
47cca0492b3c379823d4bdb600be56a633e5bb88
|
[
"0BSD"
] | 1
|
2019-04-02T05:38:49.000Z
|
2019-04-02T05:38:49.000Z
|
import dis
import math
import os
import unittest
import sys
import ast
import _ast
import tempfile
import types
import textwrap
from test import support
from test.support import script_helper, requires_debug_ranges
from test.support.os_helper import FakePath
class TestSpecifics(unittest.TestCase):
def compile_single(self, source):
compile(source, "<single>", "single")
def assertInvalidSingle(self, source):
self.assertRaises(SyntaxError, self.compile_single, source)
def test_no_ending_newline(self):
compile("hi", "<test>", "exec")
compile("hi\r", "<test>", "exec")
def test_empty(self):
compile("", "<test>", "exec")
def test_other_newlines(self):
compile("\r\n", "<test>", "exec")
compile("\r", "<test>", "exec")
compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec")
compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec")
def test_debug_assignment(self):
# catch assignments to __debug__
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import builtins
prev = builtins.__debug__
setattr(builtins, '__debug__', 'sure')
self.assertEqual(__debug__, prev)
setattr(builtins, '__debug__', prev)
def test_argument_handling(self):
# detect duplicate positional and keyword arguments
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
self.assertRaises(SyntaxError, exec, 'def f(a, a): pass')
self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass')
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_none_keyword_arg(self):
self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec")
def test_duplicate_global_local(self):
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec('z = a', g, m)
self.assertEqual(m.results, ('z', 12))
try:
exec('z = b', g, m)
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec('z = dir()', g, m)
self.assertEqual(m.results, ('z', list('xyz')))
exec('z = globals()', g, m)
self.assertEqual(m.results, ('z', g))
exec('z = locals()', g, m)
self.assertEqual(m.results, ('z', m))
self.assertRaises(TypeError, exec, 'z = b', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, exec, 'z = a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec('z = a', g, d)
self.assertEqual(d['z'], 12)
def test_extended_arg(self):
longexpr = 'x = x or ' + '-x' * 2500
g = {}
code = '''
def f(x):
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
# the expressions above have no effect, x == argument
while x:
x -= 1
# EXTENDED_ARG/JUMP_ABSOLUTE here
return x
''' % ((longexpr,)*10)
exec(code, g)
self.assertEqual(g['f'](5), 0)
def test_argument_order(self):
self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass')
def test_float_literals(self):
# testing bad float literals
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
# testing compile() of indented block w/o trailing newline"
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
# This test is probably specific to CPython and may not generalize
# to other implementations. We are trying to ensure that when
# the first line of code starts after 256, correct line numbers
# in tracebacks are still produced.
def test_leading_newlines(self):
s256 = "".join(["\n"] * 256 + ["spam"])
co = compile(s256, 'fn', 'exec')
self.assertEqual(co.co_firstlineno, 1)
self.assertEqual(list(co.co_lines()), [(0, 2, None), (2, 10, 257)])
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008",
"0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2",
"0b101j", "0o153j", "0b100e1", "0o777e1", "0777",
"000777", "000000000000007"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000e-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("000"), 0)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
self.assertEqual(eval("0b101010"), 42)
self.assertEqual(eval("-0b000000000010"), -2)
self.assertEqual(eval("0o777"), 511)
self.assertEqual(eval("-0o0000010"), -8)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxsize == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295)
self.assertEqual(eval("-" + all_one_bits), -4294967295)
elif sys.maxsize == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615)
else:
self.fail("How many bits *does* this machine have???")
# Verify treatment of constant folding on -(sys.maxsize+1)
# i.e. -2147483648 on 32 bit platforms. Should return int.
self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int)
self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int)
if sys.maxsize == 9223372036854775807:
def test_32_63_bit_values(self):
a = +4294967296 # 1 << 32
b = -4294967296 # 1 << 32
c = +281474976710656 # 1 << 48
d = -281474976710656 # 1 << 48
e = +4611686018427387904 # 1 << 62
f = -4611686018427387904 # 1 << 62
g = +9223372036854775807 # 1 << 63 - 1
h = -9223372036854775807 # 1 << 63 - 1
for variable in self.test_32_63_bit_values.__code__.co_consts:
if variable is not None:
self.assertIsInstance(variable, int)
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
'import None',
'import x as None',
'from x import None',
'from x import y as None'
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'import os as bar',
'import os.path as bar',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'import os As bar',
'import os.path a bar',
'from sys import stdin As stdout',
'from sys import stdin a stdout',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,',
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.__code__), id(f2.__code__))
def test_lambda_doc(self):
l = lambda: "foo"
self.assertIsNone(l.__doc__)
def test_encoding(self):
code = b'# -*- coding: badencoding -*-\npass\n'
self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec')
code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n'
compile(code, 'tmp', 'exec')
self.assertEqual(eval(code), '\xc2\xa4')
code = '"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\u20ac')
code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4')
code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4')
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
class str_map(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
return self.data[str(key)]
def __setitem__(self, key, value):
self.data[str(key)] = value
def __delitem__(self, key):
del self.data[str(key)]
def __contains__(self, key):
return str(key) in self.data
d = str_map()
# Index
d[1] = 1
self.assertEqual(d[1], 1)
d[1] += 1
self.assertEqual(d[1], 2)
del d[1]
self.assertNotIn(1, d)
# Tuple of indices
d[1, 1] = 1
self.assertEqual(d[1, 1], 1)
d[1, 1] += 1
self.assertEqual(d[1, 1], 2)
del d[1, 1]
self.assertNotIn((1, 1), d)
# Simple slice
d[1:2] = 1
self.assertEqual(d[1:2], 1)
d[1:2] += 1
self.assertEqual(d[1:2], 2)
del d[1:2]
self.assertNotIn(slice(1, 2), d)
# Tuple of simple slices
d[1:2, 1:2] = 1
self.assertEqual(d[1:2, 1:2], 1)
d[1:2, 1:2] += 1
self.assertEqual(d[1:2, 1:2], 2)
del d[1:2, 1:2]
self.assertNotIn((slice(1, 2), slice(1, 2)), d)
# Extended slice
d[1:2:3] = 1
self.assertEqual(d[1:2:3], 1)
d[1:2:3] += 1
self.assertEqual(d[1:2:3], 2)
del d[1:2:3]
self.assertNotIn(slice(1, 2, 3), d)
# Tuple of extended slices
d[1:2:3, 1:2:3] = 1
self.assertEqual(d[1:2:3, 1:2:3], 1)
d[1:2:3, 1:2:3] += 1
self.assertEqual(d[1:2:3, 1:2:3], 2)
del d[1:2:3, 1:2:3]
self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d)
# Ellipsis
d[...] = 1
self.assertEqual(d[...], 1)
d[...] += 1
self.assertEqual(d[...], 2)
del d[...]
self.assertNotIn(Ellipsis, d)
# Tuple of Ellipses
d[..., ...] = 1
self.assertEqual(d[..., ...], 1)
d[..., ...] += 1
self.assertEqual(d[..., ...], 2)
del d[..., ...]
self.assertNotIn((Ellipsis, Ellipsis), d)
def test_annotation_limit(self):
# more than 255 annotations, should compile ok
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(300))
compile(s, '?', 'exec')
def test_mangling(self):
class A:
def f():
__mangled = 1
__not_mangled__ = 2
import __mangled_mod
import __package__.module
self.assertIn("_A__mangled", A.f.__code__.co_varnames)
self.assertIn("__not_mangled__", A.f.__code__.co_varnames)
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
fname = fname[:-1]
with open(fname, encoding='utf-8') as f:
fcontents = f.read()
sample_code = [
['<assign>', 'x = 5'],
['<ifblock>', """if True:\n pass\n"""],
['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""],
['<deffunc>', """def foo():\n pass\nfoo()\n"""],
[fname, fcontents],
]
for fname, code in sample_code:
co1 = compile(code, '%s1' % fname, 'exec')
ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast) == _ast.Module)
co2 = compile(ast, '%s3' % fname, 'exec')
self.assertEqual(co1, co2)
# the code object's filename comes from the second compilation step
self.assertEqual(co2.co_filename, '%s3' % fname)
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
self.assertRaises(TypeError, compile, co1, '<ast>', 'eval')
# raise exception when node type is no start node
self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec')
# raise exception when node has invalid children
ast = _ast.Module()
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
def test_dict_evaluation_order(self):
i = 0
def f():
nonlocal i
i += 1
return i
d = {f(): f(), f(): f()}
self.assertEqual(d, {1: 2, 3: 4})
def test_compile_filename(self):
for filename in 'file.py', b'file.py':
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
for filename in bytearray(b'file.py'), memoryview(b'file.py'):
with self.assertWarns(DeprecationWarning):
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
self.assertRaises(TypeError, compile, 'pass', list(b'file.py'), 'exec')
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
c = compile(s, "myfile", "exec")
for obj in c.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(obj.co_filename, c.co_filename)
def test_single_statement(self):
self.compile_single("1 + 2")
self.compile_single("\n1 + 2")
self.compile_single("1 + 2\n")
self.compile_single("1 + 2\n\n")
self.compile_single("1 + 2\t\t\n")
self.compile_single("1 + 2\t\t\n ")
self.compile_single("1 + 2 # one plus two")
self.compile_single("1; 2")
self.compile_single("import sys; sys")
self.compile_single("def f():\n pass")
self.compile_single("while False:\n pass")
self.compile_single("if x:\n f(x)")
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
self.compile_single("c = '''\na=1\nb=2\nc=3\n'''")
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')
self.assertInvalidSingle('a = 13\nb = 187')
self.assertInvalidSingle('del x\ndel y')
self.assertInvalidSingle('f()\ng()')
self.assertInvalidSingle('f()\n# blah\nblah()')
self.assertInvalidSingle('f()\nxy # blah\nblah()')
self.assertInvalidSingle('x = 5 # comment\nx = 6\n')
self.assertInvalidSingle("c = '''\nd=1\n'''\na = 1\n\nb = 2\n")
def test_particularly_evil_undecodable(self):
# Issue 24022
src = b'0000\x00\n00000000000\n\x00\n\x9e\n'
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
def test_yet_more_evil_still_undecodable(self):
# Issue #25388
src = b"#\x00\n#\xfd\n"
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
@support.cpython_only
def test_compiler_recursion_limit(self):
# Expected limit is sys.getrecursionlimit() * the scaling factor
# in symtable.c (currently 3)
# We expect to fail *at* that limit, because we use up some of
# the stack depth limit in the test suite code
# So we check the expected limit and 75% of that
# XXX (ncoghlan): duplicating the scaling factor here is a little
# ugly. Perhaps it should be exposed somewhere...
fail_depth = sys.getrecursionlimit() * 3
crash_depth = sys.getrecursionlimit() * 300
success_depth = int(fail_depth * 0.75)
def check_limit(prefix, repeated, mode="single"):
expect_ok = prefix + repeated * success_depth
compile(expect_ok, '<test>', mode)
for depth in (fail_depth, crash_depth):
broken = prefix + repeated * depth
details = "Compiling ({!r} + {!r} * {})".format(
prefix, repeated, depth)
with self.assertRaises(RecursionError, msg=details):
compile(broken, '<test>', mode)
check_limit("a", "()")
check_limit("a", ".b")
check_limit("a", "[0]")
check_limit("a", "*a")
# XXX Crashes in the parser.
# check_limit("a", " if a else a")
# check_limit("if a: pass", "\nelif a: pass", mode="exec")
def test_null_terminated(self):
# The source code is null-terminated internally, but bytes-like
# objects are accepted, which could be not terminated.
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile("123\x00", "<dummy>", "eval")
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile(memoryview(b"123\x00"), "<dummy>", "eval")
code = compile(memoryview(b"123\x00")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"1234")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"$23$")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
# Also test when eval() and exec() do the compilation step
self.assertEqual(eval(memoryview(b"1234")[1:-1]), 23)
namespace = dict()
exec(memoryview(b"ax = 123")[1:-1], namespace)
self.assertEqual(namespace['x'], 12)
def check_constant(self, func, expected):
for const in func.__code__.co_consts:
if repr(const) == repr(expected):
break
else:
self.fail("unable to find constant %r in %r"
% (expected, func.__code__.co_consts))
# Merging equal constants is not a strict requirement for the Python
# semantics, it's a more an implementation detail.
@support.cpython_only
def test_merge_constants(self):
# Issue #25843: compile() must merge constants which are equal
# and have the same type.
def check_same_constant(const):
ns = {}
code = "f1, f2 = lambda: %r, lambda: %r" % (const, const)
exec(code, ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, const)
self.assertEqual(repr(f1()), repr(const))
check_same_constant(None)
check_same_constant(0)
check_same_constant(0.0)
check_same_constant(b'abc')
check_same_constant('abc')
# Note: "lambda: ..." emits "LOAD_CONST Ellipsis",
# whereas "lambda: Ellipsis" emits "LOAD_GLOBAL Ellipsis"
f1, f2 = lambda: ..., lambda: ...
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, Ellipsis)
self.assertEqual(repr(f1()), repr(Ellipsis))
# Merge constants in tuple or frozenset
f1, f2 = lambda: "not a name", lambda: ("not a name",)
f3 = lambda x: x in {("not a name",)}
self.assertIs(f1.__code__.co_consts[1],
f2.__code__.co_consts[1][0])
self.assertIs(next(iter(f3.__code__.co_consts[1])),
f2.__code__.co_consts[1])
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0}
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.assertTrue(f1(0))
# Merging equal co_linetable and co_code is not a strict requirement
# for the Python semantics, it's a more an implementation detail.
@support.cpython_only
def test_merge_code_attrs(self):
# See https://bugs.python.org/issue42217
f1 = lambda x: x.y.z
f2 = lambda a: a.b.c
self.assertIs(f1.__code__.co_linetable, f2.__code__.co_linetable)
self.assertIs(f1.__code__.co_code, f2.__code__.co_code)
# Stripping unused constants is not a strict requirement for the
# Python semantics, it's a more an implementation detail.
@support.cpython_only
def test_strip_unused_consts(self):
# Python 3.10rc1 appended None to co_consts when None is not used
# at all. See bpo-45056.
def f1():
"docstring"
return 42
self.assertEqual(f1.__code__.co_consts, ("docstring", 42))
# This is a regression test for a CPython specific peephole optimizer
# implementation bug present in a few releases. It's assertion verifies
# that peephole optimization was actually done though that isn't an
# indication of the bugs presence or not (crashing is).
@support.cpython_only
def test_peephole_opt_unreachable_code_array_access_in_bounds(self):
"""Regression test for issue35193 when run under clang msan."""
def unused_code_at_end():
return 3
raise RuntimeError("unreachable")
# The above function definition will trigger the out of bounds
# bug in the peephole optimizer as it scans opcodes past the
# RETURN_VALUE opcode. This does not always crash an interpreter.
# When you build with the clang memory sanitizer it reliably aborts.
self.assertEqual(
'RETURN_VALUE',
list(dis.get_instructions(unused_code_at_end))[-1].opname)
def test_dont_merge_constants(self):
# Issue #25843: compile() must not merge constants which are equal
# but have a different type.
def check_different_constants(const1, const2):
ns = {}
exec("f1, f2 = lambda: %r, lambda: %r" % (const1, const2), ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIsNot(f1.__code__, f2.__code__)
self.assertNotEqual(f1.__code__, f2.__code__)
self.check_constant(f1, const1)
self.check_constant(f2, const2)
self.assertEqual(repr(f1()), repr(const1))
self.assertEqual(repr(f2()), repr(const2))
check_different_constants(0, 0.0)
check_different_constants(+0.0, -0.0)
check_different_constants((0,), (0.0,))
check_different_constants('a', b'a')
check_different_constants(('a',), (b'a',))
# check_different_constants() cannot be used because repr(-0j) is
# '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign.
f1, f2 = lambda: +0.0j, lambda: -0.0j
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, +0.0j)
self.check_constant(f2, -0.0j)
self.assertEqual(repr(f1()), repr(+0.0j))
self.assertEqual(repr(f2()), repr(-0.0j))
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0.0}
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.check_constant(f2, frozenset({0.0}))
self.assertTrue(f1(0))
self.assertTrue(f2(0.0))
def test_path_like_objects(self):
# An implicit test for PyUnicode_FSDecoder().
compile("42", FakePath("test_compile_pathlike"), "single")
def test_stack_overflow(self):
# bpo-31113: Stack overflow when compile a long sequence of
# complex statements.
compile("if a: b\n" * 200000, "<dummy>", "exec")
# Multiple users rely on the fact that CPython does not generate
# bytecode for dead code blocks. See bpo-37500 for more context.
@support.cpython_only
def test_dead_blocks_do_not_generate_bytecode(self):
def unused_block_if():
if 0:
return 42
def unused_block_while():
while 0:
return 42
def unused_block_if_else():
if 1:
return None
else:
return 42
def unused_block_while_else():
while 1:
return None
else:
return 42
funcs = [unused_block_if, unused_block_while,
unused_block_if_else, unused_block_while_else]
for func in funcs:
opcodes = list(dis.get_instructions(func))
self.assertLessEqual(len(opcodes), 4)
self.assertEqual('LOAD_CONST', opcodes[-2].opname)
self.assertEqual(None, opcodes[-2].argval)
self.assertEqual('RETURN_VALUE', opcodes[-1].opname)
def test_false_while_loop(self):
def break_in_while():
while False:
break
def continue_in_while():
while False:
continue
funcs = [break_in_while, continue_in_while]
# Check that we did not raise but we also don't generate bytecode
for func in funcs:
opcodes = list(dis.get_instructions(func))
self.assertEqual(3, len(opcodes))
self.assertEqual('LOAD_CONST', opcodes[1].opname)
self.assertEqual(None, opcodes[1].argval)
self.assertEqual('RETURN_VALUE', opcodes[2].opname)
def test_consts_in_conditionals(self):
def and_true(x):
return True and x
def and_false(x):
return False and x
def or_true(x):
return True or x
def or_false(x):
return False or x
funcs = [and_true, and_false, or_true, or_false]
# Check that condition is removed.
for func in funcs:
with self.subTest(func=func):
opcodes = list(dis.get_instructions(func))
self.assertLessEqual(len(opcodes), 3)
self.assertIn('LOAD_', opcodes[-2].opname)
self.assertEqual('RETURN_VALUE', opcodes[-1].opname)
def test_imported_load_method(self):
sources = [
"""\
import os
def foo():
return os.uname()
""",
"""\
import os as operating_system
def foo():
return operating_system.uname()
""",
"""\
from os import path
def foo(x):
return path.join(x)
""",
"""\
from os import path as os_path
def foo(x):
return os_path.join(x)
"""
]
for source in sources:
namespace = {}
exec(textwrap.dedent(source), namespace)
func = namespace['foo']
with self.subTest(func=func.__name__):
opcodes = list(dis.get_instructions(func))
instructions = [opcode.opname for opcode in opcodes]
self.assertNotIn('LOAD_METHOD', instructions)
self.assertIn('LOAD_ATTR', instructions)
self.assertIn('PRECALL', instructions)
def test_lineno_procedure_call(self):
def call():
(
print()
)
line1 = call.__code__.co_firstlineno + 1
assert line1 not in [line for (_, _, line) in call.__code__.co_lines()]
def test_lineno_after_implicit_return(self):
TRUE = True
# Don't use constant True or False, as compiler will remove test
def if1(x):
x()
if TRUE:
pass
def if2(x):
x()
if TRUE:
pass
else:
pass
def if3(x):
x()
if TRUE:
pass
else:
return None
def if4(x):
x()
if not TRUE:
pass
funcs = [ if1, if2, if3, if4]
lastlines = [ 3, 3, 3, 2]
frame = None
def save_caller_frame():
nonlocal frame
frame = sys._getframe(1)
for func, lastline in zip(funcs, lastlines, strict=True):
with self.subTest(func=func):
func(save_caller_frame)
self.assertEqual(frame.f_lineno-frame.f_code.co_firstlineno, lastline)
def test_lineno_after_no_code(self):
def no_code1():
"doc string"
def no_code2():
a: int
for func in (no_code1, no_code2):
with self.subTest(func=func):
code = func.__code__
lines = list(code.co_lines())
self.assertEqual(len(lines), 1)
start, end, line = lines[0]
self.assertEqual(start, 0)
self.assertEqual(end, len(code.co_code))
self.assertEqual(line, code.co_firstlineno)
def test_lineno_attribute(self):
def load_attr():
return (
o.
a
)
load_attr_lines = [ 0, 2, 3, 1 ]
def load_method():
return (
o.
m(
0
)
)
load_method_lines = [ 0, 2, 3, 4, 3, 1 ]
def store_attr():
(
o.
a
) = (
v
)
store_attr_lines = [ 0, 5, 2, 3 ]
def aug_store_attr():
(
o.
a
) += (
v
)
aug_store_attr_lines = [ 0, 2, 3, 5, 1, 3 ]
funcs = [ load_attr, load_method, store_attr, aug_store_attr]
func_lines = [ load_attr_lines, load_method_lines,
store_attr_lines, aug_store_attr_lines]
for func, lines in zip(funcs, func_lines, strict=True):
with self.subTest(func=func):
code_lines = [ line-func.__code__.co_firstlineno
for (_, _, line) in func.__code__.co_lines()
if line is not None ]
self.assertEqual(lines, code_lines)
def test_line_number_genexp(self):
def return_genexp():
return (1
for
x
in
y)
genexp_lines = [1, 3, 1]
genexp_code = return_genexp.__code__.co_consts[1]
code_lines = [ None if line is None else line-return_genexp.__code__.co_firstlineno
for (_, _, line) in genexp_code.co_lines() ]
self.assertEqual(genexp_lines, code_lines)
def test_line_number_implicit_return_after_async_for(self):
async def test(aseq):
async for i in aseq:
body
expected_lines = [0, 1, 2, 1]
code_lines = [ None if line is None else line-test.__code__.co_firstlineno
for (_, _, line) in test.__code__.co_lines() ]
self.assertEqual(expected_lines, code_lines)
def test_big_dict_literal(self):
# The compiler has a flushing point in "compiler_dict" that calls compiles
# a portion of the dictionary literal when the loop that iterates over the items
# reaches 0xFFFF elements but the code was not including the boundary element,
# dropping the key at position 0xFFFF. See bpo-41531 for more information
dict_size = 0xFFFF + 1
the_dict = "{" + ",".join(f"{x}:{x}" for x in range(dict_size)) + "}"
self.assertEqual(len(eval(the_dict)), dict_size)
def test_redundant_jump_in_if_else_break(self):
# Check if bytecode containing jumps that simply point to the next line
# is generated around if-else-break style structures. See bpo-42615.
def if_else_break():
val = 1
while True:
if val > 0:
val -= 1
else:
break
val = -1
INSTR_SIZE = 2
HANDLED_JUMPS = (
'POP_JUMP_IF_FALSE',
'POP_JUMP_IF_TRUE',
'JUMP_ABSOLUTE',
'JUMP_FORWARD',
)
for line, instr in enumerate(
dis.Bytecode(if_else_break, show_caches=True)
):
if instr.opname == 'JUMP_FORWARD':
self.assertNotEqual(instr.arg, 0)
elif instr.opname in HANDLED_JUMPS:
self.assertNotEqual(instr.arg, (line + 1)*INSTR_SIZE)
def test_no_wraparound_jump(self):
# See https://bugs.python.org/issue46724
def while_not_chained(a, b, c):
while not (a < b < c):
pass
for instr in dis.Bytecode(while_not_chained):
self.assertNotEqual(instr.opname, "EXTENDED_ARG")
@requires_debug_ranges()
class TestSourcePositions(unittest.TestCase):
# Ensure that compiled code snippets have correct line and column numbers
# in `co_positions()`.
def check_positions_against_ast(self, snippet):
# Basic check that makes sure each line and column is at least present
# in one of the AST nodes of the source code.
code = compile(snippet, 'test_compile.py', 'exec')
ast_tree = compile(snippet, 'test_compile.py', 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast_tree) == _ast.Module)
# Use an AST visitor that notes all the offsets.
lines, end_lines, columns, end_columns = set(), set(), set(), set()
class SourceOffsetVisitor(ast.NodeVisitor):
def generic_visit(self, node):
super().generic_visit(node)
if not isinstance(node, ast.expr) and not isinstance(node, ast.stmt):
return
lines.add(node.lineno)
end_lines.add(node.end_lineno)
columns.add(node.col_offset)
end_columns.add(node.end_col_offset)
SourceOffsetVisitor().visit(ast_tree)
# Check against the positions in the code object.
for (line, end_line, col, end_col) in code.co_positions():
# If the offset is not None (indicating missing data), ensure that
# it was part of one of the AST nodes.
if line is not None:
self.assertIn(line, lines)
if end_line is not None:
self.assertIn(end_line, end_lines)
if col is not None:
self.assertIn(col, columns)
if end_col is not None:
self.assertIn(end_col, end_columns)
return code, ast_tree
def assertOpcodeSourcePositionIs(self, code, opcode,
line, end_line, column, end_column, occurrence=1):
for instr, position in zip(
dis.Bytecode(code, show_caches=True), code.co_positions(), strict=True
):
if instr.opname == opcode:
occurrence -= 1
if not occurrence:
self.assertEqual(position[0], line)
self.assertEqual(position[1], end_line)
self.assertEqual(position[2], column)
self.assertEqual(position[3], end_column)
return
self.fail(f"Opcode {opcode} not found in code")
def test_simple_assignment(self):
snippet = "x = 1"
self.check_positions_against_ast(snippet)
def test_compiles_to_extended_op_arg(self):
# Make sure we still have valid positions when the code compiles to an
# EXTENDED_ARG by performing a loop which needs a JUMP_ABSOLUTE after
# a bunch of opcodes.
snippet = "x = x\n" * 10_000
snippet += ("while x != 0:\n"
" x -= 1\n"
"while x != 0:\n"
" x += 1\n"
)
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=10_000 + 2, end_line=10_000 + 2,
column=2, end_column=8, occurrence=1)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=10_000 + 4, end_line=10_000 + 4,
column=2, end_column=9, occurrence=2)
def test_multiline_expression(self):
snippet = """\
f(
1, 2, 3, 4
)
"""
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'CALL',
line=1, end_line=3, column=0, end_column=1)
def test_very_long_line_end_offset(self):
# Make sure we get None for when the column offset is too large to
# store in a byte.
long_string = "a" * 1000
snippet = f"g('{long_string}')"
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'CALL',
line=1, end_line=1, column=None, end_column=None)
def test_complex_single_line_expression(self):
snippet = "a - b @ (c * x['key'] + 23)"
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_SUBSCR',
line=1, end_line=1, column=13, end_column=21)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=9, end_column=21, occurrence=1)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=9, end_column=26, occurrence=2)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=4, end_column=27, occurrence=3)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=0, end_column=27, occurrence=4)
class TestExpressionStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
# stays within reasonable bounds (see issue #21523 for an example
# dysfunction).
N = 100
def check_stack_size(self, code):
# To assert that the alleged stack size is not O(N), we
# check that it is smaller than log(N).
if isinstance(code, str):
code = compile(code, "<foo>", "single")
max_size = math.ceil(math.log(len(code.co_code)))
self.assertLessEqual(code.co_stacksize, max_size)
def test_and(self):
self.check_stack_size("x and " * self.N + "x")
def test_or(self):
self.check_stack_size("x or " * self.N + "x")
def test_and_or(self):
self.check_stack_size("x and x or " * self.N + "x")
def test_chained_comparison(self):
self.check_stack_size("x < " * self.N + "x")
def test_if_else(self):
self.check_stack_size("x if x else " * self.N + "x")
def test_binop(self):
self.check_stack_size("x + " * self.N + "x")
def test_list(self):
self.check_stack_size("[" + "x, " * self.N + "x]")
def test_tuple(self):
self.check_stack_size("(" + "x, " * self.N + "x)")
def test_set(self):
self.check_stack_size("{" + "x, " * self.N + "x}")
def test_dict(self):
self.check_stack_size("{" + "x:x, " * self.N + "x:x}")
def test_func_args(self):
self.check_stack_size("f(" + "x, " * self.N + ")")
def test_func_kwargs(self):
kwargs = (f'a{i}=x' for i in range(self.N))
self.check_stack_size("f(" + ", ".join(kwargs) + ")")
def test_func_args(self):
self.check_stack_size("o.m(" + "x, " * self.N + ")")
def test_meth_kwargs(self):
kwargs = (f'a{i}=x' for i in range(self.N))
self.check_stack_size("o.m(" + ", ".join(kwargs) + ")")
def test_func_and(self):
code = "def f(x):\n"
code += " x and x\n" * self.N
self.check_stack_size(code)
class TestStackSizeStability(unittest.TestCase):
# Check that repeating certain snippets doesn't increase the stack size
# beyond what a single snippet requires.
def check_stack_size(self, snippet, async_=False):
def compile_snippet(i):
ns = {}
script = """def func():\n""" + i * snippet
if async_:
script = "async " + script
code = compile(script, "<script>", "exec")
exec(code, ns, ns)
return ns['func'].__code__
sizes = [compile_snippet(i).co_stacksize for i in range(2, 5)]
if len(set(sizes)) != 1:
import dis, io
out = io.StringIO()
dis.dis(compile_snippet(1), file=out)
self.fail("stack sizes diverge with # of consecutive snippets: "
"%s\n%s\n%s" % (sizes, snippet, out.getvalue()))
def test_if(self):
snippet = """
if x:
a
"""
self.check_stack_size(snippet)
def test_if_else(self):
snippet = """
if x:
a
elif y:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_bare(self):
snippet = """
try:
a
except:
b
"""
self.check_stack_size(snippet)
def test_try_except_qualified(self):
snippet = """
try:
a
except ImportError:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_except_as(self):
snippet = """
try:
a
except ImportError as e:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_except_star_qualified(self):
snippet = """
try:
a
except* ImportError:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_star_as(self):
snippet = """
try:
a
except* ImportError as e:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_star_finally(self):
snippet = """
try:
a
except* A:
b
finally:
c
"""
self.check_stack_size(snippet)
def test_try_finally(self):
snippet = """
try:
a
finally:
b
"""
self.check_stack_size(snippet)
def test_with(self):
snippet = """
with x as y:
a
"""
self.check_stack_size(snippet)
def test_while_else(self):
snippet = """
while x:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for(self):
snippet = """
for x in y:
a
"""
self.check_stack_size(snippet)
def test_for_else(self):
snippet = """
for x in y:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue(self):
snippet = """
for x in y:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_try_finally_block(self):
snippet = """
for x in y:
try:
if z:
break
elif u:
continue
else:
a
finally:
f
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_finally_block(self):
snippet = """
for x in y:
try:
t
finally:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_except_block(self):
snippet = """
for x in y:
try:
t
except:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_with_block(self):
snippet = """
for x in y:
with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_return_inside_try_finally_block(self):
snippet = """
try:
if z:
return
else:
a
finally:
f
"""
self.check_stack_size(snippet)
def test_return_inside_finally_block(self):
snippet = """
try:
t
finally:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_except_block(self):
snippet = """
try:
t
except:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_with_block(self):
snippet = """
with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_async_with(self):
snippet = """
async with x as y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for(self):
snippet = """
async for x in y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for_else(self):
snippet = """
async for x in y:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_for_break_continue_inside_async_with_block(self):
snippet = """
for x in y:
async with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_return_inside_async_with_block(self):
snippet = """
async with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet, async_=True)
if __name__ == "__main__":
unittest.main()
| 34.496408
| 95
| 0.533211
|
import dis
import math
import os
import unittest
import sys
import ast
import _ast
import tempfile
import types
import textwrap
from test import support
from test.support import script_helper, requires_debug_ranges
from test.support.os_helper import FakePath
class TestSpecifics(unittest.TestCase):
def compile_single(self, source):
compile(source, "<single>", "single")
def assertInvalidSingle(self, source):
self.assertRaises(SyntaxError, self.compile_single, source)
def test_no_ending_newline(self):
compile("hi", "<test>", "exec")
compile("hi\r", "<test>", "exec")
def test_empty(self):
compile("", "<test>", "exec")
def test_other_newlines(self):
compile("\r\n", "<test>", "exec")
compile("\r", "<test>", "exec")
compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec")
compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec")
def test_debug_assignment(self):
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import builtins
prev = builtins.__debug__
setattr(builtins, '__debug__', 'sure')
self.assertEqual(__debug__, prev)
setattr(builtins, '__debug__', prev)
def test_argument_handling(self):
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
self.assertRaises(SyntaxError, exec, 'def f(a, a): pass')
self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass')
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_none_keyword_arg(self):
self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec")
def test_duplicate_global_local(self):
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_exec_with_general_mapping_for_locals(self):
class M:
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec('z = a', g, m)
self.assertEqual(m.results, ('z', 12))
try:
exec('z = b', g, m)
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec('z = dir()', g, m)
self.assertEqual(m.results, ('z', list('xyz')))
exec('z = globals()', g, m)
self.assertEqual(m.results, ('z', g))
exec('z = locals()', g, m)
self.assertEqual(m.results, ('z', m))
self.assertRaises(TypeError, exec, 'z = b', m)
class A:
pass
m = A()
self.assertRaises(TypeError, exec, 'z = a', g, m)
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec('z = a', g, d)
self.assertEqual(d['z'], 12)
def test_extended_arg(self):
longexpr = 'x = x or ' + '-x' * 2500
g = {}
code = '''
def f(x):
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
# the expressions above have no effect, x == argument
while x:
x -= 1
# EXTENDED_ARG/JUMP_ABSOLUTE here
return x
''' % ((longexpr,)*10)
exec(code, g)
self.assertEqual(g['f'](5), 0)
def test_argument_order(self):
self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass')
def test_float_literals(self):
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
# This test is probably specific to CPython and may not generalize
# to other implementations. We are trying to ensure that when
# the first line of code starts after 256, correct line numbers
# in tracebacks are still produced.
def test_leading_newlines(self):
s256 = "".join(["\n"] * 256 + ["spam"])
co = compile(s256, 'fn', 'exec')
self.assertEqual(co.co_firstlineno, 1)
self.assertEqual(list(co.co_lines()), [(0, 2, None), (2, 10, 257)])
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008",
"0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2",
"0b101j", "0o153j", "0b100e1", "0o777e1", "0777",
"000777", "000000000000007"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000e-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("000"), 0)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
self.assertEqual(eval("0b101010"), 42)
self.assertEqual(eval("-0b000000000010"), -2)
self.assertEqual(eval("0o777"), 511)
self.assertEqual(eval("-0o0000010"), -8)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxsize == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295)
self.assertEqual(eval("-" + all_one_bits), -4294967295)
elif sys.maxsize == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615)
else:
self.fail("How many bits *does* this machine have???")
# Verify treatment of constant folding on -(sys.maxsize+1)
# i.e. -2147483648 on 32 bit platforms. Should return int.
self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int)
self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int)
if sys.maxsize == 9223372036854775807:
def test_32_63_bit_values(self):
a = +4294967296 # 1 << 32
b = -4294967296 # 1 << 32
c = +281474976710656 # 1 << 48
d = -281474976710656 # 1 << 48
e = +4611686018427387904 # 1 << 62
f = -4611686018427387904 # 1 << 62
g = +9223372036854775807 # 1 << 63 - 1
h = -9223372036854775807 # 1 << 63 - 1
for variable in self.test_32_63_bit_values.__code__.co_consts:
if variable is not None:
self.assertIsInstance(variable, int)
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
'import None',
'import x as None',
'from x import None',
'from x import y as None'
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'import os as bar',
'import os.path as bar',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'import os As bar',
'import os.path a bar',
'from sys import stdin As stdout',
'from sys import stdin a stdout',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,',
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.__code__), id(f2.__code__))
def test_lambda_doc(self):
l = lambda: "foo"
self.assertIsNone(l.__doc__)
def test_encoding(self):
code = b'# -*- coding: badencoding -*-\npass\n'
self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec')
code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n'
compile(code, 'tmp', 'exec')
self.assertEqual(eval(code), '\xc2\xa4')
code = '"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\u20ac')
code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4')
code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4')
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
class str_map(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
return self.data[str(key)]
def __setitem__(self, key, value):
self.data[str(key)] = value
def __delitem__(self, key):
del self.data[str(key)]
def __contains__(self, key):
return str(key) in self.data
d = str_map()
# Index
d[1] = 1
self.assertEqual(d[1], 1)
d[1] += 1
self.assertEqual(d[1], 2)
del d[1]
self.assertNotIn(1, d)
# Tuple of indices
d[1, 1] = 1
self.assertEqual(d[1, 1], 1)
d[1, 1] += 1
self.assertEqual(d[1, 1], 2)
del d[1, 1]
self.assertNotIn((1, 1), d)
# Simple slice
d[1:2] = 1
self.assertEqual(d[1:2], 1)
d[1:2] += 1
self.assertEqual(d[1:2], 2)
del d[1:2]
self.assertNotIn(slice(1, 2), d)
# Tuple of simple slices
d[1:2, 1:2] = 1
self.assertEqual(d[1:2, 1:2], 1)
d[1:2, 1:2] += 1
self.assertEqual(d[1:2, 1:2], 2)
del d[1:2, 1:2]
self.assertNotIn((slice(1, 2), slice(1, 2)), d)
# Extended slice
d[1:2:3] = 1
self.assertEqual(d[1:2:3], 1)
d[1:2:3] += 1
self.assertEqual(d[1:2:3], 2)
del d[1:2:3]
self.assertNotIn(slice(1, 2, 3), d)
# Tuple of extended slices
d[1:2:3, 1:2:3] = 1
self.assertEqual(d[1:2:3, 1:2:3], 1)
d[1:2:3, 1:2:3] += 1
self.assertEqual(d[1:2:3, 1:2:3], 2)
del d[1:2:3, 1:2:3]
self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d)
# Ellipsis
d[...] = 1
self.assertEqual(d[...], 1)
d[...] += 1
self.assertEqual(d[...], 2)
del d[...]
self.assertNotIn(Ellipsis, d)
# Tuple of Ellipses
d[..., ...] = 1
self.assertEqual(d[..., ...], 1)
d[..., ...] += 1
self.assertEqual(d[..., ...], 2)
del d[..., ...]
self.assertNotIn((Ellipsis, Ellipsis), d)
def test_annotation_limit(self):
# more than 255 annotations, should compile ok
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(300))
compile(s, '?', 'exec')
def test_mangling(self):
class A:
def f():
__mangled = 1
__not_mangled__ = 2
import __mangled_mod
import __package__.module
self.assertIn("_A__mangled", A.f.__code__.co_varnames)
self.assertIn("__not_mangled__", A.f.__code__.co_varnames)
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
fname = fname[:-1]
with open(fname, encoding='utf-8') as f:
fcontents = f.read()
sample_code = [
['<assign>', 'x = 5'],
['<ifblock>', """if True:\n pass\n"""],
['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""],
['<deffunc>', """def foo():\n pass\nfoo()\n"""],
[fname, fcontents],
]
for fname, code in sample_code:
co1 = compile(code, '%s1' % fname, 'exec')
ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast) == _ast.Module)
co2 = compile(ast, '%s3' % fname, 'exec')
self.assertEqual(co1, co2)
# the code object's filename comes from the second compilation step
self.assertEqual(co2.co_filename, '%s3' % fname)
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
self.assertRaises(TypeError, compile, co1, '<ast>', 'eval')
# raise exception when node type is no start node
self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec')
# raise exception when node has invalid children
ast = _ast.Module()
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
def test_dict_evaluation_order(self):
i = 0
def f():
nonlocal i
i += 1
return i
d = {f(): f(), f(): f()}
self.assertEqual(d, {1: 2, 3: 4})
def test_compile_filename(self):
for filename in 'file.py', b'file.py':
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
for filename in bytearray(b'file.py'), memoryview(b'file.py'):
with self.assertWarns(DeprecationWarning):
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
self.assertRaises(TypeError, compile, 'pass', list(b'file.py'), 'exec')
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
c = compile(s, "myfile", "exec")
for obj in c.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(obj.co_filename, c.co_filename)
def test_single_statement(self):
self.compile_single("1 + 2")
self.compile_single("\n1 + 2")
self.compile_single("1 + 2\n")
self.compile_single("1 + 2\n\n")
self.compile_single("1 + 2\t\t\n")
self.compile_single("1 + 2\t\t\n ")
self.compile_single("1 + 2
self.compile_single("1; 2")
self.compile_single("import sys; sys")
self.compile_single("def f():\n pass")
self.compile_single("while False:\n pass")
self.compile_single("if x:\n f(x)")
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
self.compile_single("c = '''\na=1\nb=2\nc=3\n'''")
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')
self.assertInvalidSingle('a = 13\nb = 187')
self.assertInvalidSingle('del x\ndel y')
self.assertInvalidSingle('f()\ng()')
self.assertInvalidSingle('f()\n# blah\nblah()')
self.assertInvalidSingle('f()\nxy # blah\nblah()')
self.assertInvalidSingle('x = 5 # comment\nx = 6\n')
self.assertInvalidSingle("c = '''\nd=1\n'''\na = 1\n\nb = 2\n")
def test_particularly_evil_undecodable(self):
# Issue 24022
src = b'0000\x00\n00000000000\n\x00\n\x9e\n'
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
def test_yet_more_evil_still_undecodable(self):
# Issue #25388
src = b" with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
@support.cpython_only
def test_compiler_recursion_limit(self):
# Expected limit is sys.getrecursionlimit() * the scaling factor
# in symtable.c (currently 3)
# We expect to fail *at* that limit, because we use up some of
# the stack depth limit in the test suite code
# So we check the expected limit and 75% of that
# XXX (ncoghlan): duplicating the scaling factor here is a little
# ugly. Perhaps it should be exposed somewhere...
fail_depth = sys.getrecursionlimit() * 3
crash_depth = sys.getrecursionlimit() * 300
success_depth = int(fail_depth * 0.75)
def check_limit(prefix, repeated, mode="single"):
expect_ok = prefix + repeated * success_depth
compile(expect_ok, '<test>', mode)
for depth in (fail_depth, crash_depth):
broken = prefix + repeated * depth
details = "Compiling ({!r} + {!r} * {})".format(
prefix, repeated, depth)
with self.assertRaises(RecursionError, msg=details):
compile(broken, '<test>', mode)
check_limit("a", "()")
check_limit("a", ".b")
check_limit("a", "[0]")
check_limit("a", "*a")
# XXX Crashes in the parser.
# check_limit("a", " if a else a")
# check_limit("if a: pass", "\nelif a: pass", mode="exec")
def test_null_terminated(self):
# The source code is null-terminated internally, but bytes-like
# objects are accepted, which could be not terminated.
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile("123\x00", "<dummy>", "eval")
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile(memoryview(b"123\x00"), "<dummy>", "eval")
code = compile(memoryview(b"123\x00")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"1234")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"$23$")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
# Also test when eval() and exec() do the compilation step
self.assertEqual(eval(memoryview(b"1234")[1:-1]), 23)
namespace = dict()
exec(memoryview(b"ax = 123")[1:-1], namespace)
self.assertEqual(namespace['x'], 12)
def check_constant(self, func, expected):
for const in func.__code__.co_consts:
if repr(const) == repr(expected):
break
else:
self.fail("unable to find constant %r in %r"
% (expected, func.__code__.co_consts))
# Merging equal constants is not a strict requirement for the Python
# semantics, it's a more an implementation detail.
@support.cpython_only
def test_merge_constants(self):
# Issue #25843: compile() must merge constants which are equal
# and have the same type.
def check_same_constant(const):
ns = {}
code = "f1, f2 = lambda: %r, lambda: %r" % (const, const)
exec(code, ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, const)
self.assertEqual(repr(f1()), repr(const))
check_same_constant(None)
check_same_constant(0)
check_same_constant(0.0)
check_same_constant(b'abc')
check_same_constant('abc')
# Note: "lambda: ..." emits "LOAD_CONST Ellipsis",
# whereas "lambda: Ellipsis" emits "LOAD_GLOBAL Ellipsis"
f1, f2 = lambda: ..., lambda: ...
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, Ellipsis)
self.assertEqual(repr(f1()), repr(Ellipsis))
# Merge constants in tuple or frozenset
f1, f2 = lambda: "not a name", lambda: ("not a name",)
f3 = lambda x: x in {("not a name",)}
self.assertIs(f1.__code__.co_consts[1],
f2.__code__.co_consts[1][0])
self.assertIs(next(iter(f3.__code__.co_consts[1])),
f2.__code__.co_consts[1])
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0}
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.assertTrue(f1(0))
# Merging equal co_linetable and co_code is not a strict requirement
# for the Python semantics, it's a more an implementation detail.
@support.cpython_only
def test_merge_code_attrs(self):
# See https://bugs.python.org/issue42217
f1 = lambda x: x.y.z
f2 = lambda a: a.b.c
self.assertIs(f1.__code__.co_linetable, f2.__code__.co_linetable)
self.assertIs(f1.__code__.co_code, f2.__code__.co_code)
# Stripping unused constants is not a strict requirement for the
# Python semantics, it's a more an implementation detail.
@support.cpython_only
def test_strip_unused_consts(self):
# Python 3.10rc1 appended None to co_consts when None is not used
# at all. See bpo-45056.
def f1():
return 42
self.assertEqual(f1.__code__.co_consts, ("docstring", 42))
# This is a regression test for a CPython specific peephole optimizer
# implementation bug present in a few releases. It's assertion verifies
# that peephole optimization was actually done though that isn't an
# indication of the bugs presence or not (crashing is).
@support.cpython_only
def test_peephole_opt_unreachable_code_array_access_in_bounds(self):
def unused_code_at_end():
return 3
raise RuntimeError("unreachable")
# The above function definition will trigger the out of bounds
# bug in the peephole optimizer as it scans opcodes past the
# RETURN_VALUE opcode. This does not always crash an interpreter.
# When you build with the clang memory sanitizer it reliably aborts.
self.assertEqual(
'RETURN_VALUE',
list(dis.get_instructions(unused_code_at_end))[-1].opname)
def test_dont_merge_constants(self):
# Issue #25843: compile() must not merge constants which are equal
# but have a different type.
def check_different_constants(const1, const2):
ns = {}
exec("f1, f2 = lambda: %r, lambda: %r" % (const1, const2), ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIsNot(f1.__code__, f2.__code__)
self.assertNotEqual(f1.__code__, f2.__code__)
self.check_constant(f1, const1)
self.check_constant(f2, const2)
self.assertEqual(repr(f1()), repr(const1))
self.assertEqual(repr(f2()), repr(const2))
check_different_constants(0, 0.0)
check_different_constants(+0.0, -0.0)
check_different_constants((0,), (0.0,))
check_different_constants('a', b'a')
check_different_constants(('a',), (b'a',))
# check_different_constants() cannot be used because repr(-0j) is
# '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign.
f1, f2 = lambda: +0.0j, lambda: -0.0j
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, +0.0j)
self.check_constant(f2, -0.0j)
self.assertEqual(repr(f1()), repr(+0.0j))
self.assertEqual(repr(f2()), repr(-0.0j))
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0.0}
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.check_constant(f2, frozenset({0.0}))
self.assertTrue(f1(0))
self.assertTrue(f2(0.0))
def test_path_like_objects(self):
# An implicit test for PyUnicode_FSDecoder().
compile("42", FakePath("test_compile_pathlike"), "single")
def test_stack_overflow(self):
# bpo-31113: Stack overflow when compile a long sequence of
# complex statements.
compile("if a: b\n" * 200000, "<dummy>", "exec")
# Multiple users rely on the fact that CPython does not generate
# bytecode for dead code blocks. See bpo-37500 for more context.
@support.cpython_only
def test_dead_blocks_do_not_generate_bytecode(self):
def unused_block_if():
if 0:
return 42
def unused_block_while():
while 0:
return 42
def unused_block_if_else():
if 1:
return None
else:
return 42
def unused_block_while_else():
while 1:
return None
else:
return 42
funcs = [unused_block_if, unused_block_while,
unused_block_if_else, unused_block_while_else]
for func in funcs:
opcodes = list(dis.get_instructions(func))
self.assertLessEqual(len(opcodes), 4)
self.assertEqual('LOAD_CONST', opcodes[-2].opname)
self.assertEqual(None, opcodes[-2].argval)
self.assertEqual('RETURN_VALUE', opcodes[-1].opname)
def test_false_while_loop(self):
def break_in_while():
while False:
break
def continue_in_while():
while False:
continue
funcs = [break_in_while, continue_in_while]
# Check that we did not raise but we also don't generate bytecode
for func in funcs:
opcodes = list(dis.get_instructions(func))
self.assertEqual(3, len(opcodes))
self.assertEqual('LOAD_CONST', opcodes[1].opname)
self.assertEqual(None, opcodes[1].argval)
self.assertEqual('RETURN_VALUE', opcodes[2].opname)
def test_consts_in_conditionals(self):
def and_true(x):
return True and x
def and_false(x):
return False and x
def or_true(x):
return True or x
def or_false(x):
return False or x
funcs = [and_true, and_false, or_true, or_false]
# Check that condition is removed.
for func in funcs:
with self.subTest(func=func):
opcodes = list(dis.get_instructions(func))
self.assertLessEqual(len(opcodes), 3)
self.assertIn('LOAD_', opcodes[-2].opname)
self.assertEqual('RETURN_VALUE', opcodes[-1].opname)
def test_imported_load_method(self):
sources = [
"""\
import os
def foo():
return os.uname()
""",
"""\
import os as operating_system
def foo():
return operating_system.uname()
""",
"""\
from os import path
def foo(x):
return path.join(x)
""",
"""\
from os import path as os_path
def foo(x):
return os_path.join(x)
"""
]
for source in sources:
namespace = {}
exec(textwrap.dedent(source), namespace)
func = namespace['foo']
with self.subTest(func=func.__name__):
opcodes = list(dis.get_instructions(func))
instructions = [opcode.opname for opcode in opcodes]
self.assertNotIn('LOAD_METHOD', instructions)
self.assertIn('LOAD_ATTR', instructions)
self.assertIn('PRECALL', instructions)
def test_lineno_procedure_call(self):
def call():
(
print()
)
line1 = call.__code__.co_firstlineno + 1
assert line1 not in [line for (_, _, line) in call.__code__.co_lines()]
def test_lineno_after_implicit_return(self):
TRUE = True
# Don't use constant True or False, as compiler will remove test
def if1(x):
x()
if TRUE:
pass
def if2(x):
x()
if TRUE:
pass
else:
pass
def if3(x):
x()
if TRUE:
pass
else:
return None
def if4(x):
x()
if not TRUE:
pass
funcs = [ if1, if2, if3, if4]
lastlines = [ 3, 3, 3, 2]
frame = None
def save_caller_frame():
nonlocal frame
frame = sys._getframe(1)
for func, lastline in zip(funcs, lastlines, strict=True):
with self.subTest(func=func):
func(save_caller_frame)
self.assertEqual(frame.f_lineno-frame.f_code.co_firstlineno, lastline)
def test_lineno_after_no_code(self):
def no_code1():
def no_code2():
a: int
for func in (no_code1, no_code2):
with self.subTest(func=func):
code = func.__code__
lines = list(code.co_lines())
self.assertEqual(len(lines), 1)
start, end, line = lines[0]
self.assertEqual(start, 0)
self.assertEqual(end, len(code.co_code))
self.assertEqual(line, code.co_firstlineno)
def test_lineno_attribute(self):
def load_attr():
return (
o.
a
)
load_attr_lines = [ 0, 2, 3, 1 ]
def load_method():
return (
o.
m(
0
)
)
load_method_lines = [ 0, 2, 3, 4, 3, 1 ]
def store_attr():
(
o.
a
) = (
v
)
store_attr_lines = [ 0, 5, 2, 3 ]
def aug_store_attr():
(
o.
a
) += (
v
)
aug_store_attr_lines = [ 0, 2, 3, 5, 1, 3 ]
funcs = [ load_attr, load_method, store_attr, aug_store_attr]
func_lines = [ load_attr_lines, load_method_lines,
store_attr_lines, aug_store_attr_lines]
for func, lines in zip(funcs, func_lines, strict=True):
with self.subTest(func=func):
code_lines = [ line-func.__code__.co_firstlineno
for (_, _, line) in func.__code__.co_lines()
if line is not None ]
self.assertEqual(lines, code_lines)
def test_line_number_genexp(self):
def return_genexp():
return (1
for
x
in
y)
genexp_lines = [1, 3, 1]
genexp_code = return_genexp.__code__.co_consts[1]
code_lines = [ None if line is None else line-return_genexp.__code__.co_firstlineno
for (_, _, line) in genexp_code.co_lines() ]
self.assertEqual(genexp_lines, code_lines)
def test_line_number_implicit_return_after_async_for(self):
async def test(aseq):
async for i in aseq:
body
expected_lines = [0, 1, 2, 1]
code_lines = [ None if line is None else line-test.__code__.co_firstlineno
for (_, _, line) in test.__code__.co_lines() ]
self.assertEqual(expected_lines, code_lines)
def test_big_dict_literal(self):
# The compiler has a flushing point in "compiler_dict" that calls compiles
# a portion of the dictionary literal when the loop that iterates over the items
# reaches 0xFFFF elements but the code was not including the boundary element,
# dropping the key at position 0xFFFF. See bpo-41531 for more information
dict_size = 0xFFFF + 1
the_dict = "{" + ",".join(f"{x}:{x}" for x in range(dict_size)) + "}"
self.assertEqual(len(eval(the_dict)), dict_size)
def test_redundant_jump_in_if_else_break(self):
# Check if bytecode containing jumps that simply point to the next line
# is generated around if-else-break style structures. See bpo-42615.
def if_else_break():
val = 1
while True:
if val > 0:
val -= 1
else:
break
val = -1
INSTR_SIZE = 2
HANDLED_JUMPS = (
'POP_JUMP_IF_FALSE',
'POP_JUMP_IF_TRUE',
'JUMP_ABSOLUTE',
'JUMP_FORWARD',
)
for line, instr in enumerate(
dis.Bytecode(if_else_break, show_caches=True)
):
if instr.opname == 'JUMP_FORWARD':
self.assertNotEqual(instr.arg, 0)
elif instr.opname in HANDLED_JUMPS:
self.assertNotEqual(instr.arg, (line + 1)*INSTR_SIZE)
def test_no_wraparound_jump(self):
# See https://bugs.python.org/issue46724
def while_not_chained(a, b, c):
while not (a < b < c):
pass
for instr in dis.Bytecode(while_not_chained):
self.assertNotEqual(instr.opname, "EXTENDED_ARG")
@requires_debug_ranges()
class TestSourcePositions(unittest.TestCase):
# Ensure that compiled code snippets have correct line and column numbers
# in `co_positions()`.
def check_positions_against_ast(self, snippet):
# Basic check that makes sure each line and column is at least present
# in one of the AST nodes of the source code.
code = compile(snippet, 'test_compile.py', 'exec')
ast_tree = compile(snippet, 'test_compile.py', 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast_tree) == _ast.Module)
# Use an AST visitor that notes all the offsets.
lines, end_lines, columns, end_columns = set(), set(), set(), set()
class SourceOffsetVisitor(ast.NodeVisitor):
def generic_visit(self, node):
super().generic_visit(node)
if not isinstance(node, ast.expr) and not isinstance(node, ast.stmt):
return
lines.add(node.lineno)
end_lines.add(node.end_lineno)
columns.add(node.col_offset)
end_columns.add(node.end_col_offset)
SourceOffsetVisitor().visit(ast_tree)
# Check against the positions in the code object.
for (line, end_line, col, end_col) in code.co_positions():
# If the offset is not None (indicating missing data), ensure that
# it was part of one of the AST nodes.
if line is not None:
self.assertIn(line, lines)
if end_line is not None:
self.assertIn(end_line, end_lines)
if col is not None:
self.assertIn(col, columns)
if end_col is not None:
self.assertIn(end_col, end_columns)
return code, ast_tree
def assertOpcodeSourcePositionIs(self, code, opcode,
line, end_line, column, end_column, occurrence=1):
for instr, position in zip(
dis.Bytecode(code, show_caches=True), code.co_positions(), strict=True
):
if instr.opname == opcode:
occurrence -= 1
if not occurrence:
self.assertEqual(position[0], line)
self.assertEqual(position[1], end_line)
self.assertEqual(position[2], column)
self.assertEqual(position[3], end_column)
return
self.fail(f"Opcode {opcode} not found in code")
def test_simple_assignment(self):
snippet = "x = 1"
self.check_positions_against_ast(snippet)
def test_compiles_to_extended_op_arg(self):
# Make sure we still have valid positions when the code compiles to an
# EXTENDED_ARG by performing a loop which needs a JUMP_ABSOLUTE after
# a bunch of opcodes.
snippet = "x = x\n" * 10_000
snippet += ("while x != 0:\n"
" x -= 1\n"
"while x != 0:\n"
" x += 1\n"
)
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=10_000 + 2, end_line=10_000 + 2,
column=2, end_column=8, occurrence=1)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=10_000 + 4, end_line=10_000 + 4,
column=2, end_column=9, occurrence=2)
def test_multiline_expression(self):
snippet = """\
f(
1, 2, 3, 4
)
"""
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'CALL',
line=1, end_line=3, column=0, end_column=1)
def test_very_long_line_end_offset(self):
# Make sure we get None for when the column offset is too large to
# store in a byte.
long_string = "a" * 1000
snippet = f"g('{long_string}')"
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'CALL',
line=1, end_line=1, column=None, end_column=None)
def test_complex_single_line_expression(self):
snippet = "a - b @ (c * x['key'] + 23)"
compiled_code, _ = self.check_positions_against_ast(snippet)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_SUBSCR',
line=1, end_line=1, column=13, end_column=21)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=9, end_column=21, occurrence=1)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=9, end_column=26, occurrence=2)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=4, end_column=27, occurrence=3)
self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP',
line=1, end_line=1, column=0, end_column=27, occurrence=4)
class TestExpressionStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
# stays within reasonable bounds (see issue #21523 for an example
# dysfunction).
N = 100
def check_stack_size(self, code):
# To assert that the alleged stack size is not O(N), we
# check that it is smaller than log(N).
if isinstance(code, str):
code = compile(code, "<foo>", "single")
max_size = math.ceil(math.log(len(code.co_code)))
self.assertLessEqual(code.co_stacksize, max_size)
def test_and(self):
self.check_stack_size("x and " * self.N + "x")
def test_or(self):
self.check_stack_size("x or " * self.N + "x")
def test_and_or(self):
self.check_stack_size("x and x or " * self.N + "x")
def test_chained_comparison(self):
self.check_stack_size("x < " * self.N + "x")
def test_if_else(self):
self.check_stack_size("x if x else " * self.N + "x")
def test_binop(self):
self.check_stack_size("x + " * self.N + "x")
def test_list(self):
self.check_stack_size("[" + "x, " * self.N + "x]")
def test_tuple(self):
self.check_stack_size("(" + "x, " * self.N + "x)")
def test_set(self):
self.check_stack_size("{" + "x, " * self.N + "x}")
def test_dict(self):
self.check_stack_size("{" + "x:x, " * self.N + "x:x}")
def test_func_args(self):
self.check_stack_size("f(" + "x, " * self.N + ")")
def test_func_kwargs(self):
kwargs = (f'a{i}=x' for i in range(self.N))
self.check_stack_size("f(" + ", ".join(kwargs) + ")")
def test_func_args(self):
self.check_stack_size("o.m(" + "x, " * self.N + ")")
def test_meth_kwargs(self):
kwargs = (f'a{i}=x' for i in range(self.N))
self.check_stack_size("o.m(" + ", ".join(kwargs) + ")")
def test_func_and(self):
code = "def f(x):\n"
code += " x and x\n" * self.N
self.check_stack_size(code)
class TestStackSizeStability(unittest.TestCase):
# Check that repeating certain snippets doesn't increase the stack size
# beyond what a single snippet requires.
def check_stack_size(self, snippet, async_=False):
def compile_snippet(i):
ns = {}
script = """def func():\n""" + i * snippet
if async_:
script = "async " + script
code = compile(script, "<script>", "exec")
exec(code, ns, ns)
return ns['func'].__code__
sizes = [compile_snippet(i).co_stacksize for i in range(2, 5)]
if len(set(sizes)) != 1:
import dis, io
out = io.StringIO()
dis.dis(compile_snippet(1), file=out)
self.fail("stack sizes diverge with
"%s\n%s\n%s" % (sizes, snippet, out.getvalue()))
def test_if(self):
snippet = """
if x:
a
"""
self.check_stack_size(snippet)
def test_if_else(self):
snippet = """
if x:
a
elif y:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_bare(self):
snippet = """
try:
a
except:
b
"""
self.check_stack_size(snippet)
def test_try_except_qualified(self):
snippet = """
try:
a
except ImportError:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_except_as(self):
snippet = """
try:
a
except ImportError as e:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_except_star_qualified(self):
snippet = """
try:
a
except* ImportError:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_star_as(self):
snippet = """
try:
a
except* ImportError as e:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_star_finally(self):
snippet = """
try:
a
except* A:
b
finally:
c
"""
self.check_stack_size(snippet)
def test_try_finally(self):
snippet = """
try:
a
finally:
b
"""
self.check_stack_size(snippet)
def test_with(self):
snippet = """
with x as y:
a
"""
self.check_stack_size(snippet)
def test_while_else(self):
snippet = """
while x:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for(self):
snippet = """
for x in y:
a
"""
self.check_stack_size(snippet)
def test_for_else(self):
snippet = """
for x in y:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue(self):
snippet = """
for x in y:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_try_finally_block(self):
snippet = """
for x in y:
try:
if z:
break
elif u:
continue
else:
a
finally:
f
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_finally_block(self):
snippet = """
for x in y:
try:
t
finally:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_except_block(self):
snippet = """
for x in y:
try:
t
except:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_with_block(self):
snippet = """
for x in y:
with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_return_inside_try_finally_block(self):
snippet = """
try:
if z:
return
else:
a
finally:
f
"""
self.check_stack_size(snippet)
def test_return_inside_finally_block(self):
snippet = """
try:
t
finally:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_except_block(self):
snippet = """
try:
t
except:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_with_block(self):
snippet = """
with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_async_with(self):
snippet = """
async with x as y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for(self):
snippet = """
async for x in y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for_else(self):
snippet = """
async for x in y:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_for_break_continue_inside_async_with_block(self):
snippet = """
for x in y:
async with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_return_inside_async_with_block(self):
snippet = """
async with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet, async_=True)
if __name__ == "__main__":
unittest.main()
| true
| true
|
79046bb7fa59f674883f400dbaf8775cea35d79b
| 7,625
|
py
|
Python
|
scripts/ref_leak_test.py
|
Sekenre/cbor2
|
f789dd080e80607fb5486950e60e700940488c60
|
[
"MIT"
] | 127
|
2016-06-13T17:38:42.000Z
|
2022-02-13T23:38:35.000Z
|
scripts/ref_leak_test.py
|
Sekenre/cbor2
|
f789dd080e80607fb5486950e60e700940488c60
|
[
"MIT"
] | 125
|
2016-09-17T18:06:27.000Z
|
2022-03-15T18:34:09.000Z
|
scripts/ref_leak_test.py
|
Sekenre/cbor2
|
f789dd080e80607fb5486950e60e700940488c60
|
[
"MIT"
] | 43
|
2016-09-09T09:39:58.000Z
|
2021-12-22T20:08:56.000Z
|
#!/usr/bin/env python
"""
This is a crude script for detecting reference leaks in the C-based cbor2
implementation. It is by no means fool-proof and won't pick up all possible ref
leaks, but it is a reasonable "confidence test" that things aren't horribly
wrong. The script assumes you're in an environment with objgraph and cbor2
installed.
The script outputs a nicely formatted table of the tests run, and the number of
"extra" objects that existed after the tests (indicating a ref-leak), or "-" if
no extra objects existed. The ideal output is obviously "-" in all rows.
"""
import sys
import objgraph
import tracemalloc
from datetime import datetime, timezone, timedelta
from fractions import Fraction
from decimal import Decimal
from collections import namedtuple, OrderedDict
def import_cbor2():
# Similar hack to that used in tests/conftest to get separate C and Python
# implementations
import cbor2
import cbor2.types
import cbor2.encoder
import cbor2.decoder
class Module(object):
# Mock module class
pass
py_cbor2 = Module()
for source in (cbor2.types, cbor2.encoder, cbor2.decoder):
for name in dir(source):
setattr(py_cbor2, name, getattr(source, name))
return cbor2, py_cbor2
c_cbor2, py_cbor2 = import_cbor2()
UTC = timezone.utc
TEST_VALUES = [
# label, kwargs, value
('None', {}, None),
('10e0', {}, 1),
('10e12', {}, 1000000000000),
('10e29', {}, 100000000000000000000000000000),
('-10e0', {}, -1),
('-10e12', {}, -1000000000000),
('-10e29', {}, -100000000000000000000000000000),
('float1', {}, 1.0),
('float2', {}, 3.8),
('str', {}, 'foo'),
('bigstr', {}, 'foobarbaz ' * 1000),
('bytes', {}, b'foo'),
('bigbytes', {}, b'foobarbaz\x00' * 1000),
('datetime', {'timezone': UTC}, datetime(2019, 5, 9, 22, 4, 5, 123456)),
('decimal', {}, Decimal('1.1')),
('fraction', {}, Fraction(1, 5)),
('intlist', {}, [1, 2, 3]),
('bigintlist', {}, [1, 2, 3] * 1000),
('strlist', {}, ['foo', 'bar', 'baz']),
('bigstrlist', {}, ['foo', 'bar', 'baz'] * 1000),
('dict', {}, {'a': 1, 'b': 2, 'c': 3}),
('bigdict', {}, {'a' * i: i for i in range(1000)}),
('set', {}, {1, 2, 3}),
('bigset', {}, set(range(1000))),
('bigdictlist', {}, [{'a' * i: i for i in range(100)}] * 100),
('objectdict', {'timezone': UTC},
{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}),
('objectdictlist', {'timezone': UTC},
[{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}] * 100),
]
Leaks = namedtuple('Leaks', ('count', 'comparison'))
Tests = namedtuple('Test', ('objgraph', 'malloc'))
Result = namedtuple('Result', ('encoding', 'decoding', 'roundtrip'))
peak = {}
def growth():
return objgraph.growth(limit=None, peak_stats=peak)
def test_malloc(op):
count = 0
start = datetime.now()
# NOTE: Filter pointing to the op() line in the loop below, because we're
# only interested in memory allocated by that line. Naturally, if this file
# is edited, the lineno parameter below must be adjusted!
only_op = tracemalloc.Filter(True, __file__, lineno=102, all_frames=True)
tracemalloc.start(10)
try:
# Perform a pre-run of op so that any one-time memory allocation
# (module imports, etc.) don't affect the later diffs
op()
before = tracemalloc.take_snapshot().filter_traces([only_op])
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
after = tracemalloc.take_snapshot().filter_traces([only_op])
diff = after.compare_to(before, 'traceback')
diff = [entry for entry in diff if entry.size_diff > 0]
return count, diff
finally:
tracemalloc.stop()
def test_objgraph(op):
count = 0
start = datetime.now()
# See notes above
op()
growth()
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
return count, growth()
def test(op):
return Tests(Leaks(*test_objgraph(op)), Leaks(*test_malloc(op)))
def format_leaks(result):
if result.objgraph.comparison:
return '%d objs (/%d)' % (
sum(leak[-1] for leak in result.objgraph.comparison),
result.objgraph.count)
elif result.malloc.comparison and (
result.malloc.count < result.malloc.comparison[0].size_diff):
# Running the loop always results in *some* memory allocation, but as
# long as the bytes allocated are less than the number of loops it's
# unlikely to be an actual leak
return '%d bytes (/%d)' % (
result.malloc.comparison[0].size_diff, result.malloc.count)
else:
return '-'
def output_table(results):
# Build table content
head = ('Test', 'Encoding', 'Decoding', 'Round-trip')
rows = [head] + [
(
label,
format_leaks(result.encoding),
format_leaks(result.decoding),
format_leaks(result.roundtrip),
)
for label, result in results.items()
]
# Format table output
cols = zip(*rows)
col_widths = [max(len(row) for row in col) for col in cols]
sep = ''.join((
'+-',
'-+-'.join('-' * width for width in col_widths),
'-+',
))
print(sep)
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(head, col_widths)
),
' |',
)))
print(sep)
for row in rows[1:]:
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(row, col_widths)
),
' |',
)))
print(sep)
print()
print("""\
There *will* be false positives in the table above. Ignore leaks involving a
tiny number of objects (e.g. 1) or a small number of bytes (e.g. < 8Kb) as such
allocations are quite normal.
In the case of a ref-leak of an object that can reference others (lists, sets,
dicts, or anything with a __dict__), expect to see 100s or 1000s of "objs"
leaked. In the case of a ref-leak of a simple object (int, str, bytes, etc.),
expect to see a few hundred Kb allocated.
If leaks occur across the board, it's likely to be in something universal like
dump/load. If it's restricted to a type, check the encoding and decoding
methods for that type.
""")
def main():
results = OrderedDict()
sys.stderr.write("Testing")
sys.stderr.flush()
for name, kwargs, value in TEST_VALUES:
encoded = py_cbor2.dumps(value, **kwargs)
results[name] = Result(
encoding=test(lambda: c_cbor2.dumps(value, **kwargs)),
decoding=test(lambda: c_cbor2.loads(encoded)),
roundtrip=test(lambda: c_cbor2.loads(c_cbor2.dumps(value, **kwargs))),
)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
sys.stderr.write("\n")
output_table(results)
sys.stderr.write("\n")
if __name__ == '__main__':
main()
| 33.738938
| 93
| 0.570361
|
import sys
import objgraph
import tracemalloc
from datetime import datetime, timezone, timedelta
from fractions import Fraction
from decimal import Decimal
from collections import namedtuple, OrderedDict
def import_cbor2():
import cbor2
import cbor2.types
import cbor2.encoder
import cbor2.decoder
class Module(object):
pass
py_cbor2 = Module()
for source in (cbor2.types, cbor2.encoder, cbor2.decoder):
for name in dir(source):
setattr(py_cbor2, name, getattr(source, name))
return cbor2, py_cbor2
c_cbor2, py_cbor2 = import_cbor2()
UTC = timezone.utc
TEST_VALUES = [
('None', {}, None),
('10e0', {}, 1),
('10e12', {}, 1000000000000),
('10e29', {}, 100000000000000000000000000000),
('-10e0', {}, -1),
('-10e12', {}, -1000000000000),
('-10e29', {}, -100000000000000000000000000000),
('float1', {}, 1.0),
('float2', {}, 3.8),
('str', {}, 'foo'),
('bigstr', {}, 'foobarbaz ' * 1000),
('bytes', {}, b'foo'),
('bigbytes', {}, b'foobarbaz\x00' * 1000),
('datetime', {'timezone': UTC}, datetime(2019, 5, 9, 22, 4, 5, 123456)),
('decimal', {}, Decimal('1.1')),
('fraction', {}, Fraction(1, 5)),
('intlist', {}, [1, 2, 3]),
('bigintlist', {}, [1, 2, 3] * 1000),
('strlist', {}, ['foo', 'bar', 'baz']),
('bigstrlist', {}, ['foo', 'bar', 'baz'] * 1000),
('dict', {}, {'a': 1, 'b': 2, 'c': 3}),
('bigdict', {}, {'a' * i: i for i in range(1000)}),
('set', {}, {1, 2, 3}),
('bigset', {}, set(range(1000))),
('bigdictlist', {}, [{'a' * i: i for i in range(100)}] * 100),
('objectdict', {'timezone': UTC},
{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}),
('objectdictlist', {'timezone': UTC},
[{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}] * 100),
]
Leaks = namedtuple('Leaks', ('count', 'comparison'))
Tests = namedtuple('Test', ('objgraph', 'malloc'))
Result = namedtuple('Result', ('encoding', 'decoding', 'roundtrip'))
peak = {}
def growth():
return objgraph.growth(limit=None, peak_stats=peak)
def test_malloc(op):
count = 0
start = datetime.now()
# only interested in memory allocated by that line. Naturally, if this file
# is edited, the lineno parameter below must be adjusted!
only_op = tracemalloc.Filter(True, __file__, lineno=102, all_frames=True)
tracemalloc.start(10)
try:
# Perform a pre-run of op so that any one-time memory allocation
# (module imports, etc.) don't affect the later diffs
op()
before = tracemalloc.take_snapshot().filter_traces([only_op])
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
after = tracemalloc.take_snapshot().filter_traces([only_op])
diff = after.compare_to(before, 'traceback')
diff = [entry for entry in diff if entry.size_diff > 0]
return count, diff
finally:
tracemalloc.stop()
def test_objgraph(op):
count = 0
start = datetime.now()
op()
growth()
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
return count, growth()
def test(op):
return Tests(Leaks(*test_objgraph(op)), Leaks(*test_malloc(op)))
def format_leaks(result):
if result.objgraph.comparison:
return '%d objs (/%d)' % (
sum(leak[-1] for leak in result.objgraph.comparison),
result.objgraph.count)
elif result.malloc.comparison and (
result.malloc.count < result.malloc.comparison[0].size_diff):
# unlikely to be an actual leak
return '%d bytes (/%d)' % (
result.malloc.comparison[0].size_diff, result.malloc.count)
else:
return '-'
def output_table(results):
# Build table content
head = ('Test', 'Encoding', 'Decoding', 'Round-trip')
rows = [head] + [
(
label,
format_leaks(result.encoding),
format_leaks(result.decoding),
format_leaks(result.roundtrip),
)
for label, result in results.items()
]
# Format table output
cols = zip(*rows)
col_widths = [max(len(row) for row in col) for col in cols]
sep = ''.join((
'+-',
'-+-'.join('-' * width for width in col_widths),
'-+',
))
print(sep)
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(head, col_widths)
),
' |',
)))
print(sep)
for row in rows[1:]:
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(row, col_widths)
),
' |',
)))
print(sep)
print()
print("""\
There *will* be false positives in the table above. Ignore leaks involving a
tiny number of objects (e.g. 1) or a small number of bytes (e.g. < 8Kb) as such
allocations are quite normal.
In the case of a ref-leak of an object that can reference others (lists, sets,
dicts, or anything with a __dict__), expect to see 100s or 1000s of "objs"
leaked. In the case of a ref-leak of a simple object (int, str, bytes, etc.),
expect to see a few hundred Kb allocated.
If leaks occur across the board, it's likely to be in something universal like
dump/load. If it's restricted to a type, check the encoding and decoding
methods for that type.
""")
def main():
results = OrderedDict()
sys.stderr.write("Testing")
sys.stderr.flush()
for name, kwargs, value in TEST_VALUES:
encoded = py_cbor2.dumps(value, **kwargs)
results[name] = Result(
encoding=test(lambda: c_cbor2.dumps(value, **kwargs)),
decoding=test(lambda: c_cbor2.loads(encoded)),
roundtrip=test(lambda: c_cbor2.loads(c_cbor2.dumps(value, **kwargs))),
)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
sys.stderr.write("\n")
output_table(results)
sys.stderr.write("\n")
if __name__ == '__main__':
main()
| true
| true
|
79046c7d31f55452e393c3f704929ce34e69eb05
| 3,817
|
py
|
Python
|
neurokit2/complexity/entropy_distribution.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
neurokit2/complexity/entropy_distribution.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
neurokit2/complexity/entropy_distribution.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_embedding import complexity_embedding
from .entropy_shannon import entropy_shannon
def entropy_distribution(signal=None, delay=1, dimension=3, bins="Sturges", base=2):
"""**Distribution Entropy (DistrEn)**
Distribution Entropy (**DistrEn**, more commonly known as **DistEn**).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
bins : int or str
Method to find the number of bins. Can be a number, or one of ``"Sturges"``, ``"Rice"``,
``"Doane"``, or ``"sqrt"``.
base : int
The logarithmic base to use for :func:`entropy_shannon`.
Returns
--------
distren : float
The Distance Entropy entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
distren, info = nk.entropy_distribution(signal)
distren
References
-----------
* Li, P., Liu, C., Li, K., Zheng, D., Liu, C., & Hou, Y. (2015). Assessing the complexity of
short-term heartbeat interval series by distribution entropy. Medical & biological
engineering & computing, 53(1), 77-87.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Bins": bins,
}
# Time-delay embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance
n = len(embedded)
d = np.zeros(round(n * (n - 1) / 2))
for k in range(1, n):
Ix = (int((k - 1) * (n - k / 2)), int(k * (n - ((k + 1) / 2))))
d[Ix[0] : Ix[1]] = np.max(
abs(np.tile(embedded[k - 1, :], (n - k, 1)) - embedded[k:, :]), axis=1
)
# TODO: "D is symmetrical. Only the upper or lower triangular matrix will actually be adequate
# for the estimation of the ePDF, which can be used to facilitate its fast calculation."
n_d = len(d)
# Number of bins
if isinstance(bins, str):
bins = bins.lower()
if bins == "sturges":
n_bins = np.ceil(np.log2(n_d) + 1)
elif bins == "rice":
n_bins = np.ceil(2 * (n_d ** (1 / 3)))
elif bins == "sqrt":
n_bins = np.ceil(np.sqrt(n_d))
elif bins == "doanes":
sigma = np.sqrt(6 * (n_d - 2) / ((n_d + 1) * (n_d + 3)))
n_bins = np.ceil(1 + np.log2(n_d) + np.log2(1 + abs(scipy.stats.skew(d) / sigma)))
else:
raise Exception("Please enter a valid binning method")
else:
n_bins = bins
# Get probability
freq, _ = np.histogram(d, int(n_bins))
freq = freq / freq.sum()
# Compute Shannon Entropy
distren, _ = entropy_shannon(freq=freq, base=base)
# Normalize by number of bins (so that the range should be within [0, 1])
distren = distren / (np.log(n_bins) / np.log(base))
return distren, info
| 32.905172
| 98
| 0.596804
|
import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_embedding import complexity_embedding
from .entropy_shannon import entropy_shannon
def entropy_distribution(signal=None, delay=1, dimension=3, bins="Sturges", base=2):
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {
"Dimension": dimension,
"Delay": delay,
"Bins": bins,
}
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
n = len(embedded)
d = np.zeros(round(n * (n - 1) / 2))
for k in range(1, n):
Ix = (int((k - 1) * (n - k / 2)), int(k * (n - ((k + 1) / 2))))
d[Ix[0] : Ix[1]] = np.max(
abs(np.tile(embedded[k - 1, :], (n - k, 1)) - embedded[k:, :]), axis=1
)
# for the estimation of the ePDF, which can be used to facilitate its fast calculation."
n_d = len(d)
if isinstance(bins, str):
bins = bins.lower()
if bins == "sturges":
n_bins = np.ceil(np.log2(n_d) + 1)
elif bins == "rice":
n_bins = np.ceil(2 * (n_d ** (1 / 3)))
elif bins == "sqrt":
n_bins = np.ceil(np.sqrt(n_d))
elif bins == "doanes":
sigma = np.sqrt(6 * (n_d - 2) / ((n_d + 1) * (n_d + 3)))
n_bins = np.ceil(1 + np.log2(n_d) + np.log2(1 + abs(scipy.stats.skew(d) / sigma)))
else:
raise Exception("Please enter a valid binning method")
else:
n_bins = bins
freq, _ = np.histogram(d, int(n_bins))
freq = freq / freq.sum()
distren, _ = entropy_shannon(freq=freq, base=base)
distren = distren / (np.log(n_bins) / np.log(base))
return distren, info
| true
| true
|
79046caa8581e0899b3236005dd8cef53003a204
| 909
|
py
|
Python
|
test_model/utils/caption2txt.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | 1
|
2022-03-15T11:25:34.000Z
|
2022-03-15T11:25:34.000Z
|
test_model/utils/caption2txt.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | null | null | null |
test_model/utils/caption2txt.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | null | null | null |
import json
import os
srt_path = '/home/lyp/桌面/MAE_论文逐段精读【论文精读】.457423264.zh-CN.srt'
json_path = '/home/lyp/桌面/caption.json'
txt_path = '/home/lyp/桌面'
def srt2txt(path):
out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(path,'r+') as f:
with open(out_path, 'w+') as out:
for index,lines in enumerate(f.readlines()):
if(index%5 == 2):
out.write(lines.split('>')[1].split('<')[0]+'\n')
def json2txt(path):
out_path = out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(out_path,'w+') as out:
with open(json_path,'r+') as f:
caption_dict = json.load(f)
# print(len(caption_dict['body']))
for content_dict in caption_dict['body']:
out.write(content_dict['content']+'\n')
if __name__ == '__main__':
srt2txt(srt_path)
json2txt(json_path)
| 36.36
| 73
| 0.585259
|
import json
import os
srt_path = '/home/lyp/桌面/MAE_论文逐段精读【论文精读】.457423264.zh-CN.srt'
json_path = '/home/lyp/桌面/caption.json'
txt_path = '/home/lyp/桌面'
def srt2txt(path):
out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(path,'r+') as f:
with open(out_path, 'w+') as out:
for index,lines in enumerate(f.readlines()):
if(index%5 == 2):
out.write(lines.split('>')[1].split('<')[0]+'\n')
def json2txt(path):
out_path = out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(out_path,'w+') as out:
with open(json_path,'r+') as f:
caption_dict = json.load(f)
for content_dict in caption_dict['body']:
out.write(content_dict['content']+'\n')
if __name__ == '__main__':
srt2txt(srt_path)
json2txt(json_path)
| true
| true
|
79046d31ed91366118a39dcf7f70c3237ed795ac
| 39
|
py
|
Python
|
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | 28
|
2020-06-23T19:00:58.000Z
|
2021-03-26T22:13:07.000Z
|
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
from .kfrozendict import kassertfrozen
| 19.5
| 38
| 0.871795
|
from .kfrozendict import kassertfrozen
| true
| true
|
79046db2ac5bf7ab7c68b2186b55ab8aa0c5d8ee
| 2,177
|
py
|
Python
|
data_utils.py
|
shivgahlout/DenseNet-pytorch
|
8fd286d9f718d164a4583eebd100dff127263891
|
[
"MIT"
] | 3
|
2018-08-20T04:55:04.000Z
|
2019-05-18T02:10:31.000Z
|
data_utils.py
|
gahshiv/DenseNet-pytorch
|
8fd286d9f718d164a4583eebd100dff127263891
|
[
"MIT"
] | null | null | null |
data_utils.py
|
gahshiv/DenseNet-pytorch
|
8fd286d9f718d164a4583eebd100dff127263891
|
[
"MIT"
] | 1
|
2019-07-17T01:10:10.000Z
|
2019-07-17T01:10:10.000Z
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
| 35.112903
| 96
| 0.509417
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
| true
| true
|
79046e72c3b7ce43e36846ffb09e38e0ed9245f9
| 6,677
|
py
|
Python
|
unit_test/docker_container_test.py
|
bjtucker/cibuildwheel
|
856f59d5031f42bb27aa4af28e4019add9bbcb89
|
[
"BSD-2-Clause"
] | 1
|
2019-07-28T12:51:30.000Z
|
2019-07-28T12:51:30.000Z
|
unit_test/docker_container_test.py
|
bjtucker/cibuildwheel
|
856f59d5031f42bb27aa4af28e4019add9bbcb89
|
[
"BSD-2-Clause"
] | 1
|
2021-05-25T20:07:09.000Z
|
2021-05-25T20:07:09.000Z
|
unit_test/docker_container_test.py
|
pradyunsg/cibuildwheel
|
61af5e461581d9b315b27cb6db1d0cc56979cc67
|
[
"BSD-2-Clause"
] | null | null | null |
import platform
import random
import shutil
import subprocess
import textwrap
from pathlib import Path, PurePath
import pytest
from cibuildwheel.docker_container import DockerContainer
from cibuildwheel.environment import EnvironmentAssignment
# for these tests we use manylinux2014 images, because they're available on
# multi architectures and include python3.8
pm = platform.machine()
if pm == "x86_64":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_x86_64:2020-05-17-2f8ac3b"
elif pm == "aarch64":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_aarch64:2020-05-17-2f8ac3b"
elif pm == "ppc64le":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_ppc64le:2020-05-17-2f8ac3b"
elif pm == "s390x":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_s390x:2020-05-17-2f8ac3b"
@pytest.mark.docker
def test_simple():
with DockerContainer(DEFAULT_IMAGE) as container:
assert container.call(["echo", "hello"], capture_output=True) == "hello\n"
@pytest.mark.docker
def test_no_lf():
with DockerContainer(DEFAULT_IMAGE) as container:
assert container.call(["printf", "hello"], capture_output=True) == "hello"
@pytest.mark.docker
def test_environment():
with DockerContainer(DEFAULT_IMAGE) as container:
assert (
container.call(
["sh", "-c", "echo $TEST_VAR"], env={"TEST_VAR": "1"}, capture_output=True
)
== "1\n"
)
@pytest.mark.docker
def test_cwd():
with DockerContainer(DEFAULT_IMAGE, cwd="/cibuildwheel/working_directory") as container:
assert container.call(["pwd"], capture_output=True) == "/cibuildwheel/working_directory\n"
assert container.call(["pwd"], capture_output=True, cwd="/opt") == "/opt\n"
@pytest.mark.docker
def test_container_removed():
with DockerContainer(DEFAULT_IMAGE) as container:
docker_containers_listing = subprocess.run(
"docker container ls",
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
assert container.name in docker_containers_listing
old_container_name = container.name
docker_containers_listing = subprocess.run(
"docker container ls",
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
assert old_container_name not in docker_containers_listing
@pytest.mark.docker
def test_large_environment():
# max environment variable size is 128kB
long_env_var_length = 127 * 1024
large_environment = {
"a": "0" * long_env_var_length,
"b": "0" * long_env_var_length,
"c": "0" * long_env_var_length,
"d": "0" * long_env_var_length,
}
with DockerContainer(DEFAULT_IMAGE) as container:
# check the length of d
assert (
container.call(["sh", "-c", "echo ${#d}"], env=large_environment, capture_output=True)
== f"{long_env_var_length}\n"
)
@pytest.mark.docker
def test_binary_output():
with DockerContainer(DEFAULT_IMAGE) as container:
# note: the below embedded snippets are in python2
# check that we can pass though arbitrary binary data without erroring
container.call(
[
"/usr/bin/python2",
"-c",
textwrap.dedent(
"""
import sys
sys.stdout.write(''.join(chr(n) for n in range(0, 256)))
"""
),
]
)
# check that we can capture arbitrary binary data
output = container.call(
[
"/usr/bin/python2",
"-c",
textwrap.dedent(
"""
import sys
sys.stdout.write(''.join(chr(n % 256) for n in range(0, 512)))
"""
),
],
capture_output=True,
)
data = bytes(output, encoding="utf8", errors="surrogateescape")
for i in range(512):
assert data[i] == i % 256
# check that environment variables can carry binary data, except null characters
# (https://www.gnu.org/software/libc/manual/html_node/Environment-Variables.html)
binary_data = bytes(n for n in range(1, 256))
binary_data_string = str(binary_data, encoding="utf8", errors="surrogateescape")
output = container.call(
["python2", "-c", 'import os, sys; sys.stdout.write(os.environ["TEST_VAR"])'],
env={"TEST_VAR": binary_data_string},
capture_output=True,
)
assert output == binary_data_string
@pytest.mark.docker
def test_file_operations(tmp_path: Path):
with DockerContainer(DEFAULT_IMAGE) as container:
# test copying a file in
test_binary_data = bytes(random.randrange(256) for _ in range(1000))
original_test_file = tmp_path / "test.dat"
original_test_file.write_bytes(test_binary_data)
dst_file = PurePath("/tmp/test.dat")
container.copy_into(original_test_file, dst_file)
output = container.call(["cat", dst_file], capture_output=True)
assert test_binary_data == bytes(output, encoding="utf8", errors="surrogateescape")
@pytest.mark.docker
def test_dir_operations(tmp_path: Path):
with DockerContainer(DEFAULT_IMAGE) as container:
test_binary_data = bytes(random.randrange(256) for _ in range(1000))
original_test_file = tmp_path / "test.dat"
original_test_file.write_bytes(test_binary_data)
# test copying a dir in
test_dir = tmp_path / "test_dir"
test_dir.mkdir()
test_file = test_dir / "test.dat"
shutil.copyfile(original_test_file, test_file)
dst_dir = PurePath("/tmp/test_dir")
dst_file = dst_dir / "test.dat"
container.copy_into(test_dir, dst_dir)
output = container.call(["cat", dst_file], capture_output=True)
assert test_binary_data == bytes(output, encoding="utf8", errors="surrogateescape")
# test glob
assert container.glob(dst_dir, "*.dat") == [dst_file]
# test copy dir out
new_test_dir = tmp_path / "test_dir_new"
container.copy_out(dst_dir, new_test_dir)
assert test_binary_data == (new_test_dir / "test.dat").read_bytes()
@pytest.mark.docker
def test_environment_executor():
with DockerContainer(DEFAULT_IMAGE) as container:
assignment = EnvironmentAssignment("TEST=$(echo 42)")
assert assignment.evaluated_value({}, container.environment_executor) == "42"
| 33.385
| 98
| 0.634417
|
import platform
import random
import shutil
import subprocess
import textwrap
from pathlib import Path, PurePath
import pytest
from cibuildwheel.docker_container import DockerContainer
from cibuildwheel.environment import EnvironmentAssignment
# multi architectures and include python3.8
pm = platform.machine()
if pm == "x86_64":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_x86_64:2020-05-17-2f8ac3b"
elif pm == "aarch64":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_aarch64:2020-05-17-2f8ac3b"
elif pm == "ppc64le":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_ppc64le:2020-05-17-2f8ac3b"
elif pm == "s390x":
DEFAULT_IMAGE = "quay.io/pypa/manylinux2014_s390x:2020-05-17-2f8ac3b"
@pytest.mark.docker
def test_simple():
with DockerContainer(DEFAULT_IMAGE) as container:
assert container.call(["echo", "hello"], capture_output=True) == "hello\n"
@pytest.mark.docker
def test_no_lf():
with DockerContainer(DEFAULT_IMAGE) as container:
assert container.call(["printf", "hello"], capture_output=True) == "hello"
@pytest.mark.docker
def test_environment():
with DockerContainer(DEFAULT_IMAGE) as container:
assert (
container.call(
["sh", "-c", "echo $TEST_VAR"], env={"TEST_VAR": "1"}, capture_output=True
)
== "1\n"
)
@pytest.mark.docker
def test_cwd():
with DockerContainer(DEFAULT_IMAGE, cwd="/cibuildwheel/working_directory") as container:
assert container.call(["pwd"], capture_output=True) == "/cibuildwheel/working_directory\n"
assert container.call(["pwd"], capture_output=True, cwd="/opt") == "/opt\n"
@pytest.mark.docker
def test_container_removed():
with DockerContainer(DEFAULT_IMAGE) as container:
docker_containers_listing = subprocess.run(
"docker container ls",
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
assert container.name in docker_containers_listing
old_container_name = container.name
docker_containers_listing = subprocess.run(
"docker container ls",
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
assert old_container_name not in docker_containers_listing
@pytest.mark.docker
def test_large_environment():
# max environment variable size is 128kB
long_env_var_length = 127 * 1024
large_environment = {
"a": "0" * long_env_var_length,
"b": "0" * long_env_var_length,
"c": "0" * long_env_var_length,
"d": "0" * long_env_var_length,
}
with DockerContainer(DEFAULT_IMAGE) as container:
# check the length of d
assert (
container.call(["sh", "-c", "echo ${#d}"], env=large_environment, capture_output=True)
== f"{long_env_var_length}\n"
)
@pytest.mark.docker
def test_binary_output():
with DockerContainer(DEFAULT_IMAGE) as container:
# note: the below embedded snippets are in python2
# check that we can pass though arbitrary binary data without erroring
container.call(
[
"/usr/bin/python2",
"-c",
textwrap.dedent(
"""
import sys
sys.stdout.write(''.join(chr(n) for n in range(0, 256)))
"""
),
]
)
# check that we can capture arbitrary binary data
output = container.call(
[
"/usr/bin/python2",
"-c",
textwrap.dedent(
"""
import sys
sys.stdout.write(''.join(chr(n % 256) for n in range(0, 512)))
"""
),
],
capture_output=True,
)
data = bytes(output, encoding="utf8", errors="surrogateescape")
for i in range(512):
assert data[i] == i % 256
# check that environment variables can carry binary data, except null characters
# (https://www.gnu.org/software/libc/manual/html_node/Environment-Variables.html)
binary_data = bytes(n for n in range(1, 256))
binary_data_string = str(binary_data, encoding="utf8", errors="surrogateescape")
output = container.call(
["python2", "-c", 'import os, sys; sys.stdout.write(os.environ["TEST_VAR"])'],
env={"TEST_VAR": binary_data_string},
capture_output=True,
)
assert output == binary_data_string
@pytest.mark.docker
def test_file_operations(tmp_path: Path):
with DockerContainer(DEFAULT_IMAGE) as container:
# test copying a file in
test_binary_data = bytes(random.randrange(256) for _ in range(1000))
original_test_file = tmp_path / "test.dat"
original_test_file.write_bytes(test_binary_data)
dst_file = PurePath("/tmp/test.dat")
container.copy_into(original_test_file, dst_file)
output = container.call(["cat", dst_file], capture_output=True)
assert test_binary_data == bytes(output, encoding="utf8", errors="surrogateescape")
@pytest.mark.docker
def test_dir_operations(tmp_path: Path):
with DockerContainer(DEFAULT_IMAGE) as container:
test_binary_data = bytes(random.randrange(256) for _ in range(1000))
original_test_file = tmp_path / "test.dat"
original_test_file.write_bytes(test_binary_data)
# test copying a dir in
test_dir = tmp_path / "test_dir"
test_dir.mkdir()
test_file = test_dir / "test.dat"
shutil.copyfile(original_test_file, test_file)
dst_dir = PurePath("/tmp/test_dir")
dst_file = dst_dir / "test.dat"
container.copy_into(test_dir, dst_dir)
output = container.call(["cat", dst_file], capture_output=True)
assert test_binary_data == bytes(output, encoding="utf8", errors="surrogateescape")
# test glob
assert container.glob(dst_dir, "*.dat") == [dst_file]
# test copy dir out
new_test_dir = tmp_path / "test_dir_new"
container.copy_out(dst_dir, new_test_dir)
assert test_binary_data == (new_test_dir / "test.dat").read_bytes()
@pytest.mark.docker
def test_environment_executor():
with DockerContainer(DEFAULT_IMAGE) as container:
assignment = EnvironmentAssignment("TEST=$(echo 42)")
assert assignment.evaluated_value({}, container.environment_executor) == "42"
| true
| true
|
79046e9ef7bef36d5dc67989725dd7e5a8af1cb7
| 42,375
|
py
|
Python
|
tests/integration/test_fscmds.py
|
mensago/mensagod
|
3b62e4038691ee3c9db618b024edf62467d402f5
|
[
"MIT"
] | null | null | null |
tests/integration/test_fscmds.py
|
mensago/mensagod
|
3b62e4038691ee3c9db618b024edf62467d402f5
|
[
"MIT"
] | null | null | null |
tests/integration/test_fscmds.py
|
mensago/mensagod
|
3b62e4038691ee3c9db618b024edf62467d402f5
|
[
"MIT"
] | 1
|
2021-03-06T18:46:48.000Z
|
2021-03-06T18:46:48.000Z
|
# pylint: disable=too-many-lines
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
'''Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
'''
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
'''Creates a test folder for holding files'''
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
'''Tests the COPY command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
# By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB
# disk quota
status = make_test_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
# Subtest #5: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# We actually have to do an update instead of an insert because the quota checks in earlier
# calls ensure that there is a quota record for admin in the database
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #6: Actual success
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
'''Test the DELETE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad path
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
'''This tests the command DOWNLOAD'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path'
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #3 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length'
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size'
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #5 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length'
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \
'test_download: #8 resumed file hash failure'
conn.disconnect()
def test_getquotainfo():
'''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
'''Tests the LIST command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
# Subtest #4: A list of files
for i in range(1,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
# Subtest #5: A list of files with time specifier
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
'''Tests the LISTDIRS command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
# Subtest #4: A list of directories
for i in range(2,7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
'''Tests the MKDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
# Subtest #2: Actual success - 1 directory
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
# Subtest #3: Directory already exists
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
# Subtest #4: Actual success - nested directories
multipath = ' '.join(['/', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
'''Tests the MOVE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
status = make_test_file(admin_dir)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_replace():
'''Test the REPLACE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
# Subtest #5: Actual success
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
'''Tests the RMDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory'
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory'
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory'
def test_select():
'''Tests the SELECT command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select: #2 failed to handle path as file'
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to create test directory'
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to work correctly'
conn.disconnect()
def test_setquota():
'''Tests the SETQUOTA command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
# Subtest #2: Bad workspace list
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
# Subtest #3: Actual success
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
'''Tests the UPLOAD command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
# Hash parameter is missing
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
# Subtest #3: Size too big
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big'
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
# bringing in any insecure code here, so it's only a little bit bad.
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload: #4 quota check failed'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch'
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch'
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size'
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload'
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match'
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload'
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
| 29.59148
| 99
| 0.693192
|
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
ction': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
t_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy:
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy:
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy:
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
)
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
ge({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
ction': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download:
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download:
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download:
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download:
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download:
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download:
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download:
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download:
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download:
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH
'test_download:
conn.disconnect()
def test_getquotainfo():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
T',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
tion': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
dmin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
ction': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
on': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
le(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy:
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy:
conn.disconnect()
def test_replace():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
est_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
ction': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir:
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir:
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir:
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir:
def test_select():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select:
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select:
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select:
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select:
conn.disconnect()
def test_setquota():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
ge({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
{
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
{
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
sage({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload:
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload:
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload:
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload:
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload:
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload:
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload:
assert tempFileName != '', 'test_upload:
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload:
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload:
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload:
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload:
assert tempFileName != '', 'test_upload:
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload:
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload:
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
| true
| true
|
79046f5b495206bc0d904acfd0a5c0fac1401b4a
| 15,554
|
py
|
Python
|
semanticmapping/sspspace.py
|
nsdumont/SemanticMapping
|
af97a452b3de30a9670536c7fa92c28a70fae44d
|
[
"MIT"
] | null | null | null |
semanticmapping/sspspace.py
|
nsdumont/SemanticMapping
|
af97a452b3de30a9670536c7fa92c28a70fae44d
|
[
"MIT"
] | null | null | null |
semanticmapping/sspspace.py
|
nsdumont/SemanticMapping
|
af97a452b3de30a9670536c7fa92c28a70fae44d
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
from scipy.stats import qmc
from scipy.stats import special_ortho_group
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import warnings
from .ssp import SSP
class SSPSpace:
def __init__(self, domain_dim: int, ssp_dim: int, axis_matrix=None, phase_matrix=None,
domain_bounds=None, length_scale=1):
self.sample_points = None
self.sample_ssps = None
self.domain_dim = domain_dim
self.ssp_dim = ssp_dim
if not isinstance(length_scale, np.ndarray) or length_scale.size == 1:
self.length_scale = length_scale * np.ones((self.domain_dim,))
if domain_bounds is not None:
assert domain_bounds.shape[0] == domain_dim
self.domain_bounds = domain_bounds
if (axis_matrix is None) & (phase_matrix is None):
raise RuntimeError("SSP spaces must be defined by either a axis matrix or phase matrix. Use subclasses to construct spaces with predefined axes.")
elif (phase_matrix is None):
assert axis_matrix.shape[0] == ssp_dim, f'Expected ssp_dim {axis_matrix.shape[0]}, got {ssp_dim}.'
assert axis_matrix.shape[1] == domain_dim
self.axis_matrix = axis_matrix
self.phase_matrix = (-1.j*np.log(np.fft.fft(axis_matrix,axis=0))).real
elif (axis_matrix is None):
assert phase_matrix.shape[0] == ssp_dim
assert phase_matrix.shape[1] == domain_dim
self.phase_matrix = phase_matrix
self.axis_matrix = np.fft.ifft(np.exp(1.j*phase_matrix), axis=0).real
def update_lengthscale(self, scale):
if not isinstance(scale, np.ndarray) or scale.size == 1:
self.length_scale = scale * np.ones((self.domain_dim,))
else:
assert scale.size == self.domain_dim
self.length_scale = scale
assert self.length_scale.size == self.domain_dim
def encode(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.fft.ifft( np.exp( 1.j * self.phase_matrix @ scaled_x ), axis=0 ).real
return data
def encode_and_deriv(self,x):
ls_mat = np.atleast_2d(np.diag(1 / self.length_scale))
scaled_x = x @ ls_mat
fdata = np.exp( 1.j * self.phase_matrix @ scaled_x.T )
data = np.fft.ifft( fdata, axis=0 ).real
ddata = np.fft.ifft( 1.j * np.stack([np.diag(fdata[:,j]) for j in range(x.shape[0])]) @ self.phase_matrix @ ls_mat, axis=0 ).real
return data.T, ddata.T
def encode_fourier(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.exp( 1.j * self.phase_matrix @ scaled_x )
return data
def encode_as_SSP(self,x):
assert x.shape[0] == self.domain_dim
return SSP(self.encode(x),self)
def decode(self,ssp,method='from-set', num_sample_pts=10000,from_set_method='grid',num_init_pts =10):
if method=='least-squares':
# problems duw to complex log
x = np.linalg.lstsq(self.phase_matrix, (1.j*np.log(np.fft.fft(ssp,axis=0))).real)[0]
#raise NotImplementedError()
#fssp = np.fft.fft(ssp,axis=0)
#x = np.linalg.lstsq(np.tile(self.phase_matrix,(2,1)), np.hstack([np.arccos(fssp.real), np.arcsin(fssp.imag)]))
return x
elif method=='from-set':
sample_ssps, sample_points = self.get_sample_ssps(num_sample_pts,method=from_set_method)
sims = sample_ssps.T @ ssp
return sample_points[:,np.argmax(sims)]
elif method=='direct-optim':
x0 = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
def min_func(x,target=ssp):
x_ssp = self.encode(np.atleast_2d(x))
return -np.inner(x_ssp, target).flatten()
soln = minimize(min_func, x0, method='L-BFGS-B')
return soln.x
elif method=='grad_descent':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
scaled_x = ls_mat @ x
x_enc = np.exp(1.j * self.phase_matrix @ scaled_x)
grad_mat = (1.j * (self.phase_matrix @ ls_mat).T * x_enc)
grad = (grad_mat @ fssp.T).flatten()
x = x - 0.1*grad.real
return x
elif method=='nonlin-reg':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
dy = np.hstack([fssp.real, fssp.imag])
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
J = np.vstack([self.phase_matrix * np.sin(self.phase_matrix @ x @ ls_mat).reshape(1,-1),
-self.phase_matrix * np.cos(self.phase_matrix @ x @ ls_mat).reshape(1,-1)])
soln = np.linalg.pinv(J.T @ J) @ J.T @ dy
x = x + soln
return x
else:
raise NotImplementedError()
def clean_up(self,ssp,**kwargs):
x = self.decode(ssp,**kwargs)
return self.encode(x)
def get_sample_points(self,num_points,method='grid'):
if self.domain_bounds is None:
bounds = np.vstack([-10*np.ones(self.domain_dim), 10*np.ones(self.domain_dim)]).T
else:
bounds = self.domain_bounds
if method=='grid':
n_per_dim = int(num_points**(1/self.domain_dim))
if n_per_dim**self.domain_dim != num_points:
warnings.warn((f'Evenly distributing points over a '
f'{self.domain_dim} grid requires numbers '
f'of samples to be powers of {self.domain_dim}.'
f'Requested {num_points} samples, returning '
f'{n_per_dim**self.domain_dim}'), RuntimeWarning)
### end if
xs = np.linspace(bounds[:,0],bounds[:,1],n_per_dim)
xxs = np.meshgrid(*[xs[:,i] for i in range(self.domain_dim)])
sample_points = np.array([x.reshape(-1) for x in xxs])
return sample_points
elif method=='sobol':
sampler = qmc.Sobol(d=self.domain_dim)
lbounds = bounds[:,0]
ubounds = bounds[:,1]
u_sample_points = sampler.random(num_points)
sample_points = qmc.scale(u_sample_points, lbounds, ubounds)
return sample_points.T
else:
raise NotImplementedError()
def get_sample_ssps(self,num_points,**kwargs): # make new if num_pts different than whats stored?
sample_points = self.get_sample_points(num_points,**kwargs)
sample_ssps = self.encode(sample_points)
return sample_ssps, sample_points
def identity(self):
s = np.zeros(self.ssp_dim)
s[0] = 1
return s
def bind(self,a,b):
return np.fft.ifft(np.fft.fft(a) * np.fft.fft(b)).real
def invert(self,a):
return a[-np.arange(len(a))]
def normalize(self,ssp):
return ssp/np.max([1e-6,np.sqrt(np.sum(ssp**2))])
def unitary(self,ssp):
fssp = np.fft.fft(ssp)
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return np.fft.ifft(fssp).real
def unitary_fourier(self,fssp):
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return fssp
def decode_path(self, ssp_path, N_ma=None, n_samples = 10000):
sample_ssps, sample_points = self.get_sample_ssps(n_samples)
path = np.zeros((ssp_path.shape[0], self.domain_dim))
max_sims = np.zeros(ssp_path.shape[0])
for i in range(ssp_path.shape[0]):
sims = sample_ssps.T @ ssp_path[i,:]
max_sims[i] = np.max(sims)
path[i,:] = sample_points[:,np.argmax(sims)]
return path, max_sims
def similarity_plot(self,ssp,n_grid=100,plot_type='heatmap',cmap="YlGnBu",ax=None,**kwargs):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if self.domain_dim == 1:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
im=ax.plot(xs, self.encode(xs.reshape(1,-1)).T @ self.data)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
elif self.domain_dim == 2:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
ys = np.linspace(self.domain_bounds[1,0],self.domain_bounds[1,1], n_grid)
X,Y = np.meshgrid(xs,ys)
sims = self.encode(np.vstack([X.reshape(-1),Y.reshape(-1)])).T @ ssp
if plot_type=='heatmap':
im=ax.pcolormesh(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contour':
im=ax.contour(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contourf':
im=ax.contourf(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
ax.set_ylim(self.domain_bounds[1,0],self.domain_bounds[1,1])
else:
raise NotImplementedError()
return im
class RandomSSPSpace(SSPSpace):
def __init__(self, domain_dim: int, ssp_dim: int, domain_bounds=None, length_scale=1, rng=np.random.default_rng()):
partial_phases = rng.random.rand(ssp_dim//2,domain_dim)*2*np.pi - np.pi
axis_matrix = _constructaxisfromphases(partial_phases)
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
class HexagonalSSPSpace(SSPSpace):
def __init__(self, domain_dim:int,ssp_dim: int=151, n_rotates:int=5, n_scales:int=5,
scale_min=2*np.pi/np.sqrt(6) - 0.5, scale_max=2*np.pi/np.sqrt(6) + 0.5,
domain_bounds=None, length_scale=1):
if (n_rotates==5) & (n_scales==5) & (ssp_dim != 151):
n_rotates = int(np.max([1,np.sqrt((ssp_dim-1)/(2*(domain_dim+1)))]))
n_scales = n_rotates
phases_hex = np.hstack([np.sqrt(1+ 1/domain_dim)*np.identity(domain_dim) - (domain_dim**(-3/2))*(np.sqrt(domain_dim+1) + 1),
(domain_dim**(-1/2))*np.ones((domain_dim,1))]).T
self.grid_basis_dim = domain_dim + 1
self.num_grids = n_rotates*n_scales
scales = np.linspace(scale_min,scale_max,n_scales)
phases_scaled = np.vstack([phases_hex*i for i in scales])
if (n_rotates==1):
phases_scaled_rotated = phases_scaled
elif (domain_dim==1):
scales = np.linspace(scale_min,scale_max,n_scales+n_rotates)
phases_scaled_rotated = np.vstack([phases_hex*i for i in scales])
elif (domain_dim == 2):
angles = np.linspace(0,2*np.pi/3,n_rotates)
R_mats = np.stack([np.stack([np.cos(angles), -np.sin(angles)],axis=1),
np.stack([np.sin(angles), np.cos(angles)], axis=1)], axis=1)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
else:
R_mats = special_ortho_group.rvs(domain_dim, size=n_rotates)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
axis_matrix = _constructaxisfromphases(phases_scaled_rotated)
ssp_dim = axis_matrix.shape[0]
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
def sample_grid_encoders(self, n):
sample_pts = self.get_sample_points(n,method='sobol')
N = self.num_grids
if N < n:
sorts = np.hstack([np.arange(N), np.random.randint(0, N - 1, size = n - N)])
else:
sorts = np.arange(n)
encoders = np.zeros((self.ssp_dim,n))
for i in range(n):
sub_mat = _get_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
proj_mat = _proj_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
sub_space = SSPSpace(self.domain_dim,2*self.grid_basis_dim + 1, axis_matrix= sub_mat @ self.axis_matrix)
encoders[:,i] = N * proj_mat @ sub_space.encode(sample_pts[:,i])
return encoders
def _constructaxisfromphases(K):
d = K.shape[0]
n = K.shape[1]
axes = np.ones((d*2 + 1,n))
for i in range(n):
F = np.ones((d*2 + 1,), dtype="complex")
F[0:d] = np.exp(1.j*K[:,i])
F[-d:] = np.flip(np.conj(F[0:d]))
F = np.fft.ifftshift(F)
axes[:,i] = np.fft.ifft(F).real
return axes
def _get_sub_FourierSSP(n, N, sublen=3):
# Return a matrix, \bar{A}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \bar{A}_n F{S_{total}} = F{S_n}
# i.e. pick out the sub vector in the Fourier domain
tot_len = 2*sublen*N + 1
FA = np.zeros((2*sublen + 1, tot_len))
FA[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FA[sublen, sublen*N] = 1
FA[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FA
def _get_sub_SSP(n,N,sublen=3):
# Return a matrix, A_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# A_n S_{total} = S_n
# i.e. pick out the sub vector in the time domain
tot_len = 2*sublen*N + 1
FA = _get_sub_FourierSSP(n,N,sublen=sublen)
W = np.fft.fft(np.eye(tot_len))
invW = np.fft.ifft(np.eye(2*sublen + 1))
A = invW @ np.fft.ifftshift(FA) @ W
return A.real
def _proj_sub_FourierSSP(n,N,sublen=3):
# Return a matrix, \bar{B}_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n \bar{B}_n F{S_{n}} = F{S_{total}}
# i.e. project the sub vector in the Fourier domain such that summing all such projections gives the full vector in Fourier domain
tot_len = 2*sublen*N + 1
FB = np.zeros((2*sublen + 1, tot_len))
FB[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FB[sublen, sublen*N] = 1/N # all sub vectors have a "1" zero freq term so scale it so full vector will have 1
FB[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FB.T
def _proj_sub_SSP(n,N,sublen=3):
# Return a matrix, B_n
# Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper
# Then
# \sum_n B_n S_{n} = S_{total}
# i.e. project the sub vector in the time domain such that summing all such projections gives the full vector
tot_len = 2*sublen*N + 1
FB = _proj_sub_FourierSSP(n,N,sublen=sublen)
invW = np.fft.ifft(np.eye(tot_len))
W = np.fft.fft(np.eye(2*sublen + 1))
B = invW @ np.fft.ifftshift(FB) @ W
return B.real
| 44.44
| 158
| 0.594317
|
import numpy as np
import scipy
from scipy.stats import qmc
from scipy.stats import special_ortho_group
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import warnings
from .ssp import SSP
class SSPSpace:
def __init__(self, domain_dim: int, ssp_dim: int, axis_matrix=None, phase_matrix=None,
domain_bounds=None, length_scale=1):
self.sample_points = None
self.sample_ssps = None
self.domain_dim = domain_dim
self.ssp_dim = ssp_dim
if not isinstance(length_scale, np.ndarray) or length_scale.size == 1:
self.length_scale = length_scale * np.ones((self.domain_dim,))
if domain_bounds is not None:
assert domain_bounds.shape[0] == domain_dim
self.domain_bounds = domain_bounds
if (axis_matrix is None) & (phase_matrix is None):
raise RuntimeError("SSP spaces must be defined by either a axis matrix or phase matrix. Use subclasses to construct spaces with predefined axes.")
elif (phase_matrix is None):
assert axis_matrix.shape[0] == ssp_dim, f'Expected ssp_dim {axis_matrix.shape[0]}, got {ssp_dim}.'
assert axis_matrix.shape[1] == domain_dim
self.axis_matrix = axis_matrix
self.phase_matrix = (-1.j*np.log(np.fft.fft(axis_matrix,axis=0))).real
elif (axis_matrix is None):
assert phase_matrix.shape[0] == ssp_dim
assert phase_matrix.shape[1] == domain_dim
self.phase_matrix = phase_matrix
self.axis_matrix = np.fft.ifft(np.exp(1.j*phase_matrix), axis=0).real
def update_lengthscale(self, scale):
if not isinstance(scale, np.ndarray) or scale.size == 1:
self.length_scale = scale * np.ones((self.domain_dim,))
else:
assert scale.size == self.domain_dim
self.length_scale = scale
assert self.length_scale.size == self.domain_dim
def encode(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.fft.ifft( np.exp( 1.j * self.phase_matrix @ scaled_x ), axis=0 ).real
return data
def encode_and_deriv(self,x):
ls_mat = np.atleast_2d(np.diag(1 / self.length_scale))
scaled_x = x @ ls_mat
fdata = np.exp( 1.j * self.phase_matrix @ scaled_x.T )
data = np.fft.ifft( fdata, axis=0 ).real
ddata = np.fft.ifft( 1.j * np.stack([np.diag(fdata[:,j]) for j in range(x.shape[0])]) @ self.phase_matrix @ ls_mat, axis=0 ).real
return data.T, ddata.T
def encode_fourier(self,x):
assert x.shape[0] == self.domain_dim
ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten()))
scaled_x = ls_mat @ x
data = np.exp( 1.j * self.phase_matrix @ scaled_x )
return data
def encode_as_SSP(self,x):
assert x.shape[0] == self.domain_dim
return SSP(self.encode(x),self)
def decode(self,ssp,method='from-set', num_sample_pts=10000,from_set_method='grid',num_init_pts =10):
if method=='least-squares':
x = np.linalg.lstsq(self.phase_matrix, (1.j*np.log(np.fft.fft(ssp,axis=0))).real)[0]
return x
elif method=='from-set':
sample_ssps, sample_points = self.get_sample_ssps(num_sample_pts,method=from_set_method)
sims = sample_ssps.T @ ssp
return sample_points[:,np.argmax(sims)]
elif method=='direct-optim':
x0 = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
def min_func(x,target=ssp):
x_ssp = self.encode(np.atleast_2d(x))
return -np.inner(x_ssp, target).flatten()
soln = minimize(min_func, x0, method='L-BFGS-B')
return soln.x
elif method=='grad_descent':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
scaled_x = ls_mat @ x
x_enc = np.exp(1.j * self.phase_matrix @ scaled_x)
grad_mat = (1.j * (self.phase_matrix @ ls_mat).T * x_enc)
grad = (grad_mat @ fssp.T).flatten()
x = x - 0.1*grad.real
return x
elif method=='nonlin-reg':
x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts)
fssp = np.fft.fft(ssp,axis=0)
dy = np.hstack([fssp.real, fssp.imag])
ls_mat = np.diag(1/self.length_scale.flatten())
for j in range(10):
J = np.vstack([self.phase_matrix * np.sin(self.phase_matrix @ x @ ls_mat).reshape(1,-1),
-self.phase_matrix * np.cos(self.phase_matrix @ x @ ls_mat).reshape(1,-1)])
soln = np.linalg.pinv(J.T @ J) @ J.T @ dy
x = x + soln
return x
else:
raise NotImplementedError()
def clean_up(self,ssp,**kwargs):
x = self.decode(ssp,**kwargs)
return self.encode(x)
def get_sample_points(self,num_points,method='grid'):
if self.domain_bounds is None:
bounds = np.vstack([-10*np.ones(self.domain_dim), 10*np.ones(self.domain_dim)]).T
else:
bounds = self.domain_bounds
if method=='grid':
n_per_dim = int(num_points**(1/self.domain_dim))
if n_per_dim**self.domain_dim != num_points:
warnings.warn((f'Evenly distributing points over a '
f'{self.domain_dim} grid requires numbers '
f'of samples to be powers of {self.domain_dim}.'
f'Requested {num_points} samples, returning '
f'{n_per_dim**self.domain_dim}'), RuntimeWarning)
xs = np.linspace(bounds[:,0],bounds[:,1],n_per_dim)
xxs = np.meshgrid(*[xs[:,i] for i in range(self.domain_dim)])
sample_points = np.array([x.reshape(-1) for x in xxs])
return sample_points
elif method=='sobol':
sampler = qmc.Sobol(d=self.domain_dim)
lbounds = bounds[:,0]
ubounds = bounds[:,1]
u_sample_points = sampler.random(num_points)
sample_points = qmc.scale(u_sample_points, lbounds, ubounds)
return sample_points.T
else:
raise NotImplementedError()
def get_sample_ssps(self,num_points,**kwargs):
sample_points = self.get_sample_points(num_points,**kwargs)
sample_ssps = self.encode(sample_points)
return sample_ssps, sample_points
def identity(self):
s = np.zeros(self.ssp_dim)
s[0] = 1
return s
def bind(self,a,b):
return np.fft.ifft(np.fft.fft(a) * np.fft.fft(b)).real
def invert(self,a):
return a[-np.arange(len(a))]
def normalize(self,ssp):
return ssp/np.max([1e-6,np.sqrt(np.sum(ssp**2))])
def unitary(self,ssp):
fssp = np.fft.fft(ssp)
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return np.fft.ifft(fssp).real
def unitary_fourier(self,fssp):
fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2)
return fssp
def decode_path(self, ssp_path, N_ma=None, n_samples = 10000):
sample_ssps, sample_points = self.get_sample_ssps(n_samples)
path = np.zeros((ssp_path.shape[0], self.domain_dim))
max_sims = np.zeros(ssp_path.shape[0])
for i in range(ssp_path.shape[0]):
sims = sample_ssps.T @ ssp_path[i,:]
max_sims[i] = np.max(sims)
path[i,:] = sample_points[:,np.argmax(sims)]
return path, max_sims
def similarity_plot(self,ssp,n_grid=100,plot_type='heatmap',cmap="YlGnBu",ax=None,**kwargs):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if self.domain_dim == 1:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
im=ax.plot(xs, self.encode(xs.reshape(1,-1)).T @ self.data)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
elif self.domain_dim == 2:
xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid)
ys = np.linspace(self.domain_bounds[1,0],self.domain_bounds[1,1], n_grid)
X,Y = np.meshgrid(xs,ys)
sims = self.encode(np.vstack([X.reshape(-1),Y.reshape(-1)])).T @ ssp
if plot_type=='heatmap':
im=ax.pcolormesh(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contour':
im=ax.contour(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
elif plot_type=='contourf':
im=ax.contourf(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs)
ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1])
ax.set_ylim(self.domain_bounds[1,0],self.domain_bounds[1,1])
else:
raise NotImplementedError()
return im
class RandomSSPSpace(SSPSpace):
def __init__(self, domain_dim: int, ssp_dim: int, domain_bounds=None, length_scale=1, rng=np.random.default_rng()):
partial_phases = rng.random.rand(ssp_dim//2,domain_dim)*2*np.pi - np.pi
axis_matrix = _constructaxisfromphases(partial_phases)
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
class HexagonalSSPSpace(SSPSpace):
def __init__(self, domain_dim:int,ssp_dim: int=151, n_rotates:int=5, n_scales:int=5,
scale_min=2*np.pi/np.sqrt(6) - 0.5, scale_max=2*np.pi/np.sqrt(6) + 0.5,
domain_bounds=None, length_scale=1):
if (n_rotates==5) & (n_scales==5) & (ssp_dim != 151):
n_rotates = int(np.max([1,np.sqrt((ssp_dim-1)/(2*(domain_dim+1)))]))
n_scales = n_rotates
phases_hex = np.hstack([np.sqrt(1+ 1/domain_dim)*np.identity(domain_dim) - (domain_dim**(-3/2))*(np.sqrt(domain_dim+1) + 1),
(domain_dim**(-1/2))*np.ones((domain_dim,1))]).T
self.grid_basis_dim = domain_dim + 1
self.num_grids = n_rotates*n_scales
scales = np.linspace(scale_min,scale_max,n_scales)
phases_scaled = np.vstack([phases_hex*i for i in scales])
if (n_rotates==1):
phases_scaled_rotated = phases_scaled
elif (domain_dim==1):
scales = np.linspace(scale_min,scale_max,n_scales+n_rotates)
phases_scaled_rotated = np.vstack([phases_hex*i for i in scales])
elif (domain_dim == 2):
angles = np.linspace(0,2*np.pi/3,n_rotates)
R_mats = np.stack([np.stack([np.cos(angles), -np.sin(angles)],axis=1),
np.stack([np.sin(angles), np.cos(angles)], axis=1)], axis=1)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
else:
R_mats = special_ortho_group.rvs(domain_dim, size=n_rotates)
phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim)
axis_matrix = _constructaxisfromphases(phases_scaled_rotated)
ssp_dim = axis_matrix.shape[0]
super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix,
domain_bounds=domain_bounds,length_scale=length_scale)
def sample_grid_encoders(self, n):
sample_pts = self.get_sample_points(n,method='sobol')
N = self.num_grids
if N < n:
sorts = np.hstack([np.arange(N), np.random.randint(0, N - 1, size = n - N)])
else:
sorts = np.arange(n)
encoders = np.zeros((self.ssp_dim,n))
for i in range(n):
sub_mat = _get_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
proj_mat = _proj_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim)
sub_space = SSPSpace(self.domain_dim,2*self.grid_basis_dim + 1, axis_matrix= sub_mat @ self.axis_matrix)
encoders[:,i] = N * proj_mat @ sub_space.encode(sample_pts[:,i])
return encoders
def _constructaxisfromphases(K):
d = K.shape[0]
n = K.shape[1]
axes = np.ones((d*2 + 1,n))
for i in range(n):
F = np.ones((d*2 + 1,), dtype="complex")
F[0:d] = np.exp(1.j*K[:,i])
F[-d:] = np.flip(np.conj(F[0:d]))
F = np.fft.ifftshift(F)
axes[:,i] = np.fft.ifft(F).real
return axes
def _get_sub_FourierSSP(n, N, sublen=3):
tot_len = 2*sublen*N + 1
FA = np.zeros((2*sublen + 1, tot_len))
FA[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FA[sublen, sublen*N] = 1
FA[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FA
def _get_sub_SSP(n,N,sublen=3):
tot_len = 2*sublen*N + 1
FA = _get_sub_FourierSSP(n,N,sublen=sublen)
W = np.fft.fft(np.eye(tot_len))
invW = np.fft.ifft(np.eye(2*sublen + 1))
A = invW @ np.fft.ifftshift(FA) @ W
return A.real
def _proj_sub_FourierSSP(n,N,sublen=3):
tot_len = 2*sublen*N + 1
FB = np.zeros((2*sublen + 1, tot_len))
FB[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen)
FB[sublen, sublen*N] = 1/N
FB[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen)
return FB.T
def _proj_sub_SSP(n,N,sublen=3):
tot_len = 2*sublen*N + 1
FB = _proj_sub_FourierSSP(n,N,sublen=sublen)
invW = np.fft.ifft(np.eye(tot_len))
W = np.fft.fft(np.eye(2*sublen + 1))
B = invW @ np.fft.ifftshift(FB) @ W
return B.real
| true
| true
|
790470298781d11b85153ed42699fa2112365e5e
| 1,497
|
py
|
Python
|
pyvdk/api/categories/utils.py
|
UT1C/pyVDK
|
168177c4006acc7f57be36f189bee8101e10253d
|
[
"MIT"
] | 16
|
2020-11-24T18:27:59.000Z
|
2021-05-14T19:25:44.000Z
|
pyvdk/api/categories/utils.py
|
UT1C/pyVDK
|
168177c4006acc7f57be36f189bee8101e10253d
|
[
"MIT"
] | 1
|
2021-04-21T14:35:55.000Z
|
2021-06-26T04:18:44.000Z
|
pyvdk/api/categories/utils.py
|
UT1C/pyVDK
|
168177c4006acc7f57be36f189bee8101e10253d
|
[
"MIT"
] | 2
|
2020-12-03T16:56:31.000Z
|
2020-12-19T16:28:58.000Z
|
# -*- coding: utf-8 -*-
#
from typing import Optional
from ..category import Category
class Utils(Category):
def check_link(
self,
url: str = None,
**kwargs
) -> dict:
return self._request("checkLink", locals())
def delete_from_last_shortened(
self,
key: str = None,
**kwargs
) -> dict:
return self._request("deleteFromLastShortened", locals())
def get_last_shortened_links(
self,
count: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getLastShortenedLinks", locals())
def get_link_stats(
self,
key: str = None,
source: Optional[str] = None,
access_key: Optional[str] = None,
interval: Optional[str] = None,
intervals_count: Optional[int] = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getLinkStats", locals())
def get_server_time(
self,
**kwargs
) -> dict:
return self._request("getServerTime", locals())
def get_short_link(
self,
url: str = None,
private: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getShortLink", locals())
def resolve_screen_name(
self,
screen_name: str = None,
**kwargs
) -> dict:
return self._request("resolveScreenName", locals())
| 23.390625
| 65
| 0.560454
|
from typing import Optional
from ..category import Category
class Utils(Category):
def check_link(
self,
url: str = None,
**kwargs
) -> dict:
return self._request("checkLink", locals())
def delete_from_last_shortened(
self,
key: str = None,
**kwargs
) -> dict:
return self._request("deleteFromLastShortened", locals())
def get_last_shortened_links(
self,
count: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getLastShortenedLinks", locals())
def get_link_stats(
self,
key: str = None,
source: Optional[str] = None,
access_key: Optional[str] = None,
interval: Optional[str] = None,
intervals_count: Optional[int] = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getLinkStats", locals())
def get_server_time(
self,
**kwargs
) -> dict:
return self._request("getServerTime", locals())
def get_short_link(
self,
url: str = None,
private: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getShortLink", locals())
def resolve_screen_name(
self,
screen_name: str = None,
**kwargs
) -> dict:
return self._request("resolveScreenName", locals())
| true
| true
|
790471054c1b09baa754315f543aef0cde3d50c8
| 648
|
py
|
Python
|
test/test_contactdeletetest.py
|
winsok/pythonlearning
|
b74e31a2c3e830d0563ca90c5ba32d59eaf4e74d
|
[
"Apache-2.0"
] | null | null | null |
test/test_contactdeletetest.py
|
winsok/pythonlearning
|
b74e31a2c3e830d0563ca90c5ba32d59eaf4e74d
|
[
"Apache-2.0"
] | null | null | null |
test/test_contactdeletetest.py
|
winsok/pythonlearning
|
b74e31a2c3e830d0563ca90c5ba32d59eaf4e74d
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if app.contacts.count() == 0:
app.contacts.create_new_contact(Contact(firstname="crab"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contacts.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == app.contacts.count()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(), key=Contact.id_or_max)
| 36
| 125
| 0.739198
|
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if app.contacts.count() == 0:
app.contacts.create_new_contact(Contact(firstname="crab"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contacts.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == app.contacts.count()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(), key=Contact.id_or_max)
| true
| true
|
79047169f4d03379b0dde84408f6ba416ba2cc8a
| 6,507
|
py
|
Python
|
funkyvalidate/tests/test_interfaces.py
|
OaklandPeters/funkyvalidate
|
10fb69173d7912769721b06794a57a93a03dacde
|
[
"MIT"
] | null | null | null |
funkyvalidate/tests/test_interfaces.py
|
OaklandPeters/funkyvalidate
|
10fb69173d7912769721b06794a57a93a03dacde
|
[
"MIT"
] | null | null | null |
funkyvalidate/tests/test_interfaces.py
|
OaklandPeters/funkyvalidate
|
10fb69173d7912769721b06794a57a93a03dacde
|
[
"MIT"
] | null | null | null |
import os
import unittest
import abc
from funkyvalidate.examples.existing_directory import ExistingDirectory
from funkyvalidate.examples.existing_file import ExistingFile
from funkyvalidate import InterfaceType, meets
form_path = lambda *parts: os.path.abspath(os.path.join(*parts))
test_dir = form_path(__file__, '..')
example_dir = form_path(test_dir, 'test_example_dir')
nonexistant_dir = form_path(test_dir, 'nonexistant')
test_init_file = form_path(test_dir, '__init__.py')
# test_dir = os.path.abspath(os.path.join(__file__, '..', 'tests'))
# example_dir = os.path.abspath(os.path.join(test_dir, 'test_example_dir'))
# nonexistant_dir = os.path.abspath(os.path.join(test_dir, 'nonexistant'))
# test_init_file = os.path.abspath(os.path.join(test))
# Rebase current directory
os.chdir(test_dir)
class ExamplesTests(unittest.TestCase):
def setUp(self):
self.assertTrue(os.path.exists(test_init_file))
def test_existingdirectory(self):
self.assertTrue(isinstance(example_dir, ExistingDirectory))
self.assertFalse(isinstance(nonexistant_dir, ExistingDirectory))
self.assertFalse(isinstance(test_init_file, ExistingDirectory))
# Test constructor
self.assertTrue(ExistingDirectory(example_dir) == example_dir)
self.assertRaises(TypeError, ExistingDirectory, 321.321)
self.assertRaises(TypeError, ExistingDirectory, [example_dir])
self.assertRaises(IOError, ExistingDirectory, nonexistant_dir)
self.assertRaises(IOError, ExistingDirectory, test_init_file)
def test_existingfile(self):
"""Test the value-type interface for existing files."""
self.assertTrue(isinstance(test_init_file, ExistingFile))
self.assertFalse(isinstance(example_dir, ExistingFile))
# Test constructor
self.assertTrue(ExistingFile(test_init_file) == test_init_file)
self.assertRaises(TypeError, ExistingFile, 12)
self.assertRaises(IOError, ExistingFile, 'wargarbl')
self.assertRaises(IOError, ExistingFile, nonexistant_dir)
class MyInterface(InterfaceType):
@abc.abstractproperty
def first_name(self):
pass
class YesClass(object):
def __init__(self):
pass
first_name = "foo"
yes = YesClass()
class AlsoClass(object):
def __init__(self):
self.first_name = "bar"
also = AlsoClass()
class NoClass(object):
pass
no = NoClass()
class WeirdClass(object):
def __init__(self):
self.first_name = abc.abstractmethod(lambda self: NotImplemented)
first_name = "bazinga"
weird = WeirdClass()
class FirstChild(MyInterface):
def __init__(self):
self.other_stuff = "boo"
# can't instantiate FirstChild
class SecondChild(FirstChild):
first_name = "fixed"
second_child = SecondChild()
# class Weirder(MyInterface):
# first_name = abc.abstractmethod(lambda self: NotImplemented)
# def __init__(self):
# self.first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeFirst(InterfaceType):
first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeSecond(CommutativeFirst):
def __init__(self):
pass
first_name = "booo"
commutative = CommutativeSecond()
class CommutativeFails(CommutativeFirst):
"""This cannot be instantiated, even though the instance
overrides first_name. I believe this to be buggy behavior, however,
it is shared by abc.ABCMeta. (IE its not my fault).
"""
def __init__(self):
self.first_name = "boo"
class InterfaceTests(unittest.TestCase):
"""These test __instancecheck__ and __subclasscheck__, which depend on the meets function.
"""
def test_myinterface_itself(self):
self.assertFalse(meets(MyInterface, MyInterface))
self.assertFalse(issubclass(MyInterface, MyInterface))
self.assertRaises(TypeError, MyInterface)
def test_also_class(self):
"""
AlsoClass does not meet the interface as a class, but does once instantiated.
"""
self.assertFalse(meets(AlsoClass, MyInterface))
self.assertTrue(meets(also, MyInterface))
self.assertTrue(isinstance(also, MyInterface))
self.assertFalse(issubclass(AlsoClass, MyInterface))
def test_yes_class(self):
"""Meets interface"""
self.assertTrue(meets(YesClass, MyInterface))
self.assertTrue(meets(yes, MyInterface))
self.assertTrue(isinstance(yes, MyInterface))
self.assertTrue(issubclass(YesClass, MyInterface))
def test_no_class(self):
"""Does not meet interface."""
self.assertFalse(meets(NoClass, MyInterface))
self.assertFalse(meets(no, MyInterface))
self.assertFalse(isinstance(no, MyInterface))
self.assertFalse(issubclass(NoClass, MyInterface))
def test_weird_class(self):
"""Meets interface as class, but not as instance.
This is strange - not something that would normally ever happen."""
self.assertTrue(meets(WeirdClass, MyInterface))
self.assertFalse(meets(weird, MyInterface))
self.assertFalse(isinstance(weird, MyInterface))
self.assertTrue(issubclass(WeirdClass, MyInterface))
def test_first_child_class(self):
"""First child inherits MyInterface, but does not implement
it at all - so it can't be implemented."""
self.assertFalse(meets(FirstChild, MyInterface))
self.assertFalse(issubclass(FirstChild, MyInterface))
self.assertRaises(TypeError, FirstChild)
def test_second_child_class(self):
"""Meets the interface inherited from its parent."""
self.assertTrue(meets(SecondChild, MyInterface))
self.assertTrue(meets(second_child, MyInterface))
self.assertTrue(isinstance(second_child, MyInterface))
self.assertTrue(issubclass(SecondChild, MyInterface))
def test_commutative(self):
"""
AlsoClass does not meet the interface as a class, but does once instantiated.
"""
self.assertFalse(meets(CommutativeFirst, MyInterface))
self.assertTrue(meets(CommutativeSecond, MyInterface))
self.assertTrue(meets(commutative, MyInterface))
self.assertTrue(isinstance(commutative, MyInterface))
self.assertFalse(issubclass(CommutativeFirst, MyInterface))
self.assertTrue(issubclass(CommutativeSecond, MyInterface))
self.assertRaises(TypeError, CommutativeFails)
if __name__ == "__main__":
unittest.main()
| 35.950276
| 94
| 0.716306
|
import os
import unittest
import abc
from funkyvalidate.examples.existing_directory import ExistingDirectory
from funkyvalidate.examples.existing_file import ExistingFile
from funkyvalidate import InterfaceType, meets
form_path = lambda *parts: os.path.abspath(os.path.join(*parts))
test_dir = form_path(__file__, '..')
example_dir = form_path(test_dir, 'test_example_dir')
nonexistant_dir = form_path(test_dir, 'nonexistant')
test_init_file = form_path(test_dir, '__init__.py')
os.chdir(test_dir)
class ExamplesTests(unittest.TestCase):
def setUp(self):
self.assertTrue(os.path.exists(test_init_file))
def test_existingdirectory(self):
self.assertTrue(isinstance(example_dir, ExistingDirectory))
self.assertFalse(isinstance(nonexistant_dir, ExistingDirectory))
self.assertFalse(isinstance(test_init_file, ExistingDirectory))
self.assertTrue(ExistingDirectory(example_dir) == example_dir)
self.assertRaises(TypeError, ExistingDirectory, 321.321)
self.assertRaises(TypeError, ExistingDirectory, [example_dir])
self.assertRaises(IOError, ExistingDirectory, nonexistant_dir)
self.assertRaises(IOError, ExistingDirectory, test_init_file)
def test_existingfile(self):
self.assertTrue(isinstance(test_init_file, ExistingFile))
self.assertFalse(isinstance(example_dir, ExistingFile))
self.assertTrue(ExistingFile(test_init_file) == test_init_file)
self.assertRaises(TypeError, ExistingFile, 12)
self.assertRaises(IOError, ExistingFile, 'wargarbl')
self.assertRaises(IOError, ExistingFile, nonexistant_dir)
class MyInterface(InterfaceType):
@abc.abstractproperty
def first_name(self):
pass
class YesClass(object):
def __init__(self):
pass
first_name = "foo"
yes = YesClass()
class AlsoClass(object):
def __init__(self):
self.first_name = "bar"
also = AlsoClass()
class NoClass(object):
pass
no = NoClass()
class WeirdClass(object):
def __init__(self):
self.first_name = abc.abstractmethod(lambda self: NotImplemented)
first_name = "bazinga"
weird = WeirdClass()
class FirstChild(MyInterface):
def __init__(self):
self.other_stuff = "boo"
class SecondChild(FirstChild):
first_name = "fixed"
second_child = SecondChild()
# class Weirder(MyInterface):
# first_name = abc.abstractmethod(lambda self: NotImplemented)
# def __init__(self):
# self.first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeFirst(InterfaceType):
first_name = abc.abstractmethod(lambda self: NotImplemented)
class CommutativeSecond(CommutativeFirst):
def __init__(self):
pass
first_name = "booo"
commutative = CommutativeSecond()
class CommutativeFails(CommutativeFirst):
def __init__(self):
self.first_name = "boo"
class InterfaceTests(unittest.TestCase):
def test_myinterface_itself(self):
self.assertFalse(meets(MyInterface, MyInterface))
self.assertFalse(issubclass(MyInterface, MyInterface))
self.assertRaises(TypeError, MyInterface)
def test_also_class(self):
self.assertFalse(meets(AlsoClass, MyInterface))
self.assertTrue(meets(also, MyInterface))
self.assertTrue(isinstance(also, MyInterface))
self.assertFalse(issubclass(AlsoClass, MyInterface))
def test_yes_class(self):
self.assertTrue(meets(YesClass, MyInterface))
self.assertTrue(meets(yes, MyInterface))
self.assertTrue(isinstance(yes, MyInterface))
self.assertTrue(issubclass(YesClass, MyInterface))
def test_no_class(self):
self.assertFalse(meets(NoClass, MyInterface))
self.assertFalse(meets(no, MyInterface))
self.assertFalse(isinstance(no, MyInterface))
self.assertFalse(issubclass(NoClass, MyInterface))
def test_weird_class(self):
self.assertTrue(meets(WeirdClass, MyInterface))
self.assertFalse(meets(weird, MyInterface))
self.assertFalse(isinstance(weird, MyInterface))
self.assertTrue(issubclass(WeirdClass, MyInterface))
def test_first_child_class(self):
self.assertFalse(meets(FirstChild, MyInterface))
self.assertFalse(issubclass(FirstChild, MyInterface))
self.assertRaises(TypeError, FirstChild)
def test_second_child_class(self):
self.assertTrue(meets(SecondChild, MyInterface))
self.assertTrue(meets(second_child, MyInterface))
self.assertTrue(isinstance(second_child, MyInterface))
self.assertTrue(issubclass(SecondChild, MyInterface))
def test_commutative(self):
self.assertFalse(meets(CommutativeFirst, MyInterface))
self.assertTrue(meets(CommutativeSecond, MyInterface))
self.assertTrue(meets(commutative, MyInterface))
self.assertTrue(isinstance(commutative, MyInterface))
self.assertFalse(issubclass(CommutativeFirst, MyInterface))
self.assertTrue(issubclass(CommutativeSecond, MyInterface))
self.assertRaises(TypeError, CommutativeFails)
if __name__ == "__main__":
unittest.main()
| true
| true
|
790471a34452b433ba41649b7e3a92c0604e5e81
| 39,279
|
py
|
Python
|
pytype/pytd/optimize.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | 2
|
2019-07-25T12:53:02.000Z
|
2019-08-18T16:26:16.000Z
|
pytype/pytd/optimize.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | null | null | null |
pytype/pytd/optimize.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for optimizing pytd syntax trees.
pytd files come from various sources, and are typically redundant (duplicate
functions, different signatures saying the same thing, overlong type
disjunctions). The Visitors in this file remove various forms of these
redundancies.
"""
import collections
import logging
from pytype import utils
from pytype.pytd import abc_hierarchy
from pytype.pytd import booleq
from pytype.pytd import mro
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import type_match
from pytype.pytd import visitors
import six
log = logging.getLogger(__name__)
class RenameUnknowns(visitors.Visitor):
"""Give unknowns that map to the same set of concrete types the same name."""
def __init__(self, mapping):
super(RenameUnknowns, self).__init__()
self.name_to_cls = {name: hash(cls) for name, cls in mapping.items()}
self.cls_to_canonical_name = {
cls: name for name, cls in self.name_to_cls.items()}
def VisitClassType(self, node):
if node.name.startswith("~unknown"):
return pytd.ClassType(
self.cls_to_canonical_name[self.name_to_cls[node.name]], None)
else:
return node
class RemoveDuplicates(visitors.Visitor):
"""Remove duplicate function signatures.
For example, this transforms
def f(x: int) -> float
def f(x: int) -> float
to
def f(x: int) -> float
In order to be removed, a signature has to be exactly identical to an
existing one.
"""
def VisitFunction(self, node):
# We remove duplicates, but keep existing entries in the same order.
return node.Replace(
signatures=tuple(pytd_utils.OrderedSet(node.signatures)))
class RemoveRedundantSignatures(visitors.Visitor):
"""Remove duplicate function signatures.
For example, this transforms
def f(x: int) -> float
def f(x: int or float) -> float
to
def f(x: int or float) -> float
In order to be removed, a signature has to be "contained" (a subclass of)
an existing one.
"""
def __init__(self, hierarchy):
super(RemoveRedundantSignatures, self).__init__()
self.match = type_match.TypeMatch(hierarchy.GetSuperClasses(),
any_also_is_bottom=False)
self.subst = {}
def EnterClass(self, cls):
# Preserve the identify of each type parameter, and don't
# allow them to match against anything by themselves.
self.subst = {p.type_param: pytd.NamedType("$" + p.name)
for p in cls.template}
def LeaveClass(self, _):
self.subst = {}
def VisitFunction(self, node):
new_signatures = []
matches = set()
# We keep track of which signature matched which other signatures, purely
# for optimization - that way we don't have to query the reverse direction.
for i, s1 in enumerate(node.signatures):
for j, s2 in enumerate(node.signatures):
if i != j and (j, i) not in matches:
if s1.exceptions or s2.exceptions:
# We don't support matching of exceptions.
continue
if s1.template:
# type_match doesn't support polymorphic functions on the
# left side yet.
continue
if self.match.match(s1, s2, self.subst) == booleq.TRUE:
matches.add((i, j))
break
else:
new_signatures.append(s1)
return node.Replace(signatures=tuple(new_signatures))
class SimplifyUnions(visitors.Visitor):
"""Remove duplicate or redundant entries in union types.
For example, this transforms
a: int or int
b: int or ?
c: int or (int or float)
to
a: int
b: ?
c: int or float
"""
def VisitUnionType(self, union):
return pytd_utils.JoinTypes(union.type_list)
class _ReturnsAndExceptions(object):
"""Mutable class for collecting return types and exceptions of functions.
The collecting is stable: Items are kept in the order in which they were
encountered.
Attributes:
return_types: Return types seen so far.
exceptions: Exceptions seen so far.
"""
def __init__(self):
self.return_types = []
self.exceptions = []
def Update(self, signature):
"""Add the return types / exceptions of a signature to this instance."""
if signature.return_type not in self.return_types:
self.return_types.append(signature.return_type)
self.exceptions.extend(exception
for exception in signature.exceptions
if exception not in self.exceptions)
class CombineReturnsAndExceptions(visitors.Visitor):
"""Group function signatures that only differ in exceptions or return values.
For example, this transforms
def f(x: int) -> float:
raise OverflowError()
def f(x: int) -> int:
raise IndexError()
to
def f(x: int) -> float or int:
raise IndexError()
raise OverflowError()
"""
def _GroupByArguments(self, signatures):
"""Groups signatures by arguments.
Arguments:
signatures: A list of function signatures (Signature instances).
Returns:
A dictionary mapping signatures (without return and exceptions) to
a tuple of return values and exceptions.
"""
groups = collections.OrderedDict() # Signature -> ReturnsAndExceptions
for sig in signatures:
stripped_signature = sig.Replace(return_type=None, exceptions=None)
ret = groups.get(stripped_signature)
if not ret:
ret = _ReturnsAndExceptions()
groups[stripped_signature] = ret
ret.Update(sig)
return groups
def VisitFunction(self, f):
"""Merge signatures of a function.
This groups signatures by arguments and then for each group creates a
single signature that joins the return values / exceptions using "or".
Arguments:
f: A pytd.Function instance
Returns:
Function with simplified / combined signatures.
"""
groups = self._GroupByArguments(f.signatures)
new_signatures = []
for stripped_signature, ret_exc in groups.items():
ret = pytd_utils.JoinTypes(ret_exc.return_types)
exc = tuple(ret_exc.exceptions)
new_signatures.append(
stripped_signature.Replace(return_type=ret, exceptions=exc)
)
return f.Replace(signatures=tuple(new_signatures))
class CombineContainers(visitors.Visitor):
"""Change unions of containers to containers of unions.
For example, this transforms
list[int] or list[float]
to
list[int or float]
.
"""
_CONTAINER_NAMES = {
pytd.TupleType: ("__builtin__.tuple", "typing.Tuple"),
pytd.CallableType: ("typing.Callable",),
}
def _key(self, t):
if isinstance(t, (pytd.CallableType, pytd.TupleType)):
return (t.base_type, len(t.parameters))
else:
return t.base_type
def _should_merge(self, pytd_type, union):
"""Determine whether pytd_type values in the union should be merged.
If the union contains the homogeneous flavor of pytd_type (e.g.,
GenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type
values of different lengths, we want to turn all of the pytd_type values
into homogeneous ones so that they can be merged into a single container.
Args:
pytd_type: The pytd type, either TupleType or CallableType.
union: a pytd.UnionType
Returns:
True if the pytd_type values should be merged, False otherwise.
"""
names = self._CONTAINER_NAMES[pytd_type]
length = None
for t in union.type_list:
if isinstance(t, pytd_type):
if length is None:
length = len(t.parameters)
elif length != len(t.parameters):
return True
elif (isinstance(t, pytd.GenericType) and
t.base_type.name in names):
return True
return False
def VisitUnionType(self, union):
"""Push unions down into containers.
This collects similar container types in unions and merges them into
single instances with the union type pushed down to the element_type level.
Arguments:
union: A pytd.Union instance. Might appear in a parameter, a return type,
a constant type, etc.
Returns:
A simplified pytd.Union.
"""
if not any(isinstance(t, pytd.GenericType) for t in union.type_list):
# Optimization: If we're not going to change anything, return original.
return union
union = pytd_utils.JoinTypes(union.type_list) # flatten
if not isinstance(union, pytd.UnionType):
union = pytd.UnionType((union,))
merge_tuples = self._should_merge(pytd.TupleType, union)
merge_callables = self._should_merge(pytd.CallableType, union)
if merge_tuples or merge_callables:
type_list = []
for t in union.type_list:
if merge_tuples and isinstance(t, pytd.TupleType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.UnionType(t.parameters),))
elif merge_callables and isinstance(t, pytd.CallableType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.AnythingType(), t.ret))
type_list.append(t)
union = union.Replace(type_list=tuple(type_list))
collect = {}
has_redundant_base_types = False
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in collect:
has_redundant_base_types = True
collect[key] = tuple(
pytd_utils.JoinTypes([p1, p2])
for p1, p2 in zip(collect[key], t.parameters))
else:
collect[key] = t.parameters
if not has_redundant_base_types:
return union
result = pytd.NothingType()
done = set()
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in done:
continue # already added
parameters = collect[key]
add = t.Replace(parameters=tuple(p.Visit(CombineContainers())
for p in parameters))
done.add(key)
else:
add = t
result = pytd_utils.JoinTypes([result, add])
return result
class Factorize(visitors.Visitor):
"""Opposite of ExpandSignatures. Factorizes cartesian products of functions.
For example, this transforms
def f(x: int, y: int)
def f(x: int, y: float)
def f(x: float, y: int)
def f(x: float, y: float)
to
def f(x: int or float, y: int or float)
"""
def _GroupByOmittedArg(self, signatures, i):
"""Group functions that are identical if you ignore one of the arguments.
Arguments:
signatures: A list of function signatures
i: The index of the argument to ignore during comparison.
Returns:
A list of tuples (signature, types). "signature" is a signature with
argument i omitted, "types" is the list of types that argument was
found to have. signatures that don't have argument i are represented
as (original, None).
"""
groups = collections.OrderedDict()
for sig in signatures:
if i >= len(sig.params):
# We can't omit argument i, because this signature has too few
# arguments. Represent this signature as (original, None).
groups[sig] = None
continue
if sig.params[i].mutated_type is not None:
# We can't group mutable parameters. Leave this signature alone.
groups[sig] = None
continue
# Set type of parameter i to None
params = list(sig.params)
param_i = params[i]
params[i] = param_i.Replace(type=None)
stripped_signature = sig.Replace(params=tuple(params))
existing = groups.get(stripped_signature)
if existing:
existing.append(param_i.type)
else:
groups[stripped_signature] = [param_i.type]
return groups.items()
def VisitFunction(self, f):
"""Shrink a function, by factorizing cartesian products of arguments.
Greedily groups signatures, looking at the arguments from left to right.
This algorithm is *not* optimal. But it does the right thing for the
typical cases.
Arguments:
f: An instance of pytd.Function. If this function has more
than one signature, we will try to combine some of these signatures by
introducing union types.
Returns:
A new, potentially optimized, instance of pytd.Function.
"""
max_argument_count = max(len(s.params) for s in f.signatures)
signatures = f.signatures
for i in six.moves.xrange(max_argument_count):
new_sigs = []
for sig, types in self._GroupByOmittedArg(signatures, i):
if types:
# One or more options for argument <i>:
new_params = list(sig.params)
new_params[i] = sig.params[i].Replace(
type=pytd_utils.JoinTypes(types))
sig = sig.Replace(params=tuple(new_params))
new_sigs.append(sig)
else:
# Signature doesn't have argument <i>, so we store the original:
new_sigs.append(sig)
signatures = new_sigs
return f.Replace(signatures=tuple(signatures))
class ApplyOptionalArguments(visitors.Visitor):
"""Removes functions that are instances of a more specific case.
For example, this reduces
def f(x: int, ...) # [1]
def f(x: int, y: int) # [2]
to just
def f(x: int, ...)
Because "..." makes it possible to pass any additional arguments to [1],
it encompasses both declarations, hence we can omit [2].
"""
def _HasShorterVersion(self, sig, optional_arg_sigs):
"""Find a shorter signature with optional arguments for a longer signature.
Arguments:
sig: The function signature we'd like to shorten
optional_arg_sigs: A set of function signatures with optional arguments
that will be matched against sig.
Returns:
True if there is a shorter signature that generalizes sig, but is not
identical to sig.
"""
param_count = len(sig.params)
if not sig.has_optional:
param_count += 1 # also consider f(x, y, ...) for f(x, y)
for i in six.moves.xrange(param_count):
if sig.params[0:i] in optional_arg_sigs:
return True
return False
def VisitFunction(self, f):
"""Remove all signatures that have a shorter version.
We use signatures with optional argument (has_opt=True) as template
and then match all signatures against those templates, removing those
that match.
Arguments:
f: An instance of pytd.Function
Returns:
A potentially simplified instance of pytd.Function.
"""
# Set of signatures that can replace longer ones. Only used for matching,
# hence we can use an unordered data structure.
optional_arg_sigs = frozenset(s.params
for s in f.signatures
if s.has_optional)
new_signatures = (s for s in f.signatures
if not self._HasShorterVersion(s, optional_arg_sigs))
return f.Replace(signatures=tuple(new_signatures))
class SuperClassHierarchy(object):
"""Utility class for optimizations working with superclasses."""
def __init__(self, superclasses):
self._superclasses = superclasses
self._subclasses = utils.invert_dict(self._superclasses)
def GetSuperClasses(self):
return self._superclasses
def _CollectSuperclasses(self, type_name, collect):
"""Recursively collect super classes for a type.
Arguments:
type_name: A string, the type's name.
collect: A set() of strings, modified to contain all superclasses.
"""
collect.add(type_name)
superclasses = [name
for name in self._superclasses.get(type_name, [])]
# The superclasses might have superclasses of their own, so recurse.
for superclass in superclasses:
self._CollectSuperclasses(superclass, collect)
def ExpandSuperClasses(self, t):
"""Generate a list of all (known) superclasses for a type.
Arguments:
t: A type name. E.g. "int".
Returns:
A set of types. This set includes t as well as all its superclasses. For
example, this will return "bool", "int" and "object" for "bool".
"""
superclasses = set()
self._CollectSuperclasses(t, superclasses)
return superclasses
def ExpandSubClasses(self, t):
"""Generate a set of all (known) subclasses for a type.
Arguments:
t: A type. E.g. NamedType("int").
Returns:
A set of types. This set includes t as well as all its subclasses. For
example, this will return "int" and "bool" for "int".
"""
queue = [t]
seen = set()
while queue:
item = queue.pop()
if item not in seen:
seen.add(item)
queue.extend(self._subclasses[item])
return seen
def HasSubClassInSet(self, cls, known):
"""Queries whether a subclass of a type is present in a given set."""
return any(sub in known
for sub in self._subclasses[cls])
def HasSuperClassInSet(self, cls, known):
"""Queries whether a superclass of a type is present in a given set."""
return any(sub in known
for sub in self._superclasses[cls])
class SimplifyUnionsWithSuperclasses(visitors.Visitor):
"""Simplify Unions with superclasses.
E.g., this changes
int or bool
to
int
since bool is a subclass of int.
(Interpreting types as "sets of values", this simplification is sound since
A union B = A, if B is a subset of A.)
"""
def __init__(self, hierarchy):
super(SimplifyUnionsWithSuperclasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
c = collections.Counter()
for t in set(union.type_list):
# TODO(rechen): How can we make this work with GenericType?
if isinstance(t, pytd.GENERIC_BASE_TYPE):
c += collections.Counter(self.hierarchy.ExpandSubClasses(str(t)))
# Below, c[str[t]] can be zero - that's the default for non-existent items
# in collections.Counter. It'll happen for types that are not
# instances of GENERIC_BASE_TYPE, like container types.
new_type_list = [t for t in union.type_list if c[str(t)] <= 1]
return pytd_utils.JoinTypes(new_type_list)
class FindCommonSuperClasses(visitors.Visitor):
"""Find common super classes. Optionally also uses abstract base classes.
E.g., this changes
def f(x: list or tuple, y: frozenset or set) -> int or float
to
def f(x: Sequence, y: Set) -> Real
"""
def __init__(self, hierarchy):
super(FindCommonSuperClasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
"""Given a union type, try to find a simplification by using superclasses.
This is a lossy optimization that tries to map a list of types to a common
base type. For example, int and bool are both base classes of int, so it
would convert "int or bool" to "int".
Arguments:
union: A union type.
Returns:
A simplified type, if available.
"""
intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0]))
for t in union.type_list[1:]:
intersection.intersection_update(
self.hierarchy.ExpandSuperClasses(str(t)))
# Remove "redundant" superclasses, by removing everything from the tree
# that's not a leaf. I.e., we don't need "object" if we have more
# specialized types.
new_type_list = tuple(
pytd.NamedType(cls) for cls in intersection
if not self.hierarchy.HasSubClassInSet(cls, intersection))
if not new_type_list:
return union # if types don't intersect, leave them alone
return pytd_utils.JoinTypes(new_type_list)
class CollapseLongUnions(visitors.Visitor):
"""Shortens long unions to object (or "?").
Poor man's version of FindCommonSuperClasses. Shorten types like
"str or unicode or int or float or list" to just "object" or "?".
Additionally, if the union already contains at least one "object", we also
potentially replace the entire union with just "object".
Attributes:
max_length: The maximum number of types to allow in a union. If there are
more types than this, it is shortened.
"""
def __init__(self, max_length=7):
assert isinstance(max_length, six.integer_types)
super(CollapseLongUnions, self).__init__()
self.generic_type = pytd.AnythingType()
self.max_length = max_length
def VisitUnionType(self, union):
if len(union.type_list) > self.max_length:
return self.generic_type
elif self.generic_type in union.type_list:
return self.generic_type
else:
return union
class AdjustGenericType(visitors.Visitor):
"""Changes the generic type from "object" to "Any"."""
def __init__(self):
super(AdjustGenericType, self).__init__()
self.old_generic_type = pytd.ClassType("__builtin__.object")
self.new_generic_type = pytd.AnythingType()
def VisitClassType(self, t):
if t == self.old_generic_type:
return self.new_generic_type
else:
return t
class AdjustReturnAndConstantGenericType(visitors.Visitor):
"""Changes "object" to "Any" in return and constant types."""
def VisitSignature(self, sig):
return sig.Replace(return_type=sig.return_type.Visit(AdjustGenericType()))
def VisitConstant(self, c):
return c.Replace(type=c.type.Visit(AdjustGenericType()))
class AddInheritedMethods(visitors.Visitor):
"""Copy methods and constants from base classes into their derived classes.
E.g. this changes
class Bar:
[methods and constants of Bar]
class Foo(Bar):
[methods and constants of Foo]
to
class Bar:
[methods and constants of Bar]
class Foo(Bar):
[methods and constants of Bar]
[methods and constants of Foo]
.
This is not an optimization by itself, but it can help with other
optimizations (like signature merging), and is also useful as preprocessor
for type matching.
"""
def VisitLateType(self, _):
raise NotImplementedError("Can't use AddInheritedMethods with LateType.")
def VisitClass(self, cls):
"""Add superclass methods and constants to this Class."""
if any(base for base in cls.parents if isinstance(base, pytd.NamedType)):
raise AssertionError("AddInheritedMethods needs a resolved AST")
# Filter out only the types we can reason about.
# TODO(kramm): Do we want handle UnionTypes and GenericTypes at some point?
bases = [base.cls
for base in cls.parents
if isinstance(base, pytd.ClassType)]
# Don't pull in methods that are named the same as existing methods in
# this class, local methods override parent class methods.
names = {m.name for m in cls.methods} | {c.name for c in cls.constants}
# TODO(kramm): This should do full-blown MRO.
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
new_methods = list(cls.methods)
for base in bases:
for m in base.methods:
if m.name not in names:
new_methods.append(m.Visit(adjust_self))
new_constants = list(cls.constants)
for base in bases:
for c in base.constants:
if c.name not in names:
new_constants.append(c)
return cls.Replace(methods=tuple(new_methods),
constants=tuple(new_constants))
class RemoveInheritedMethods(visitors.Visitor):
"""Removes methods from classes if they also exist in their superclass.
E.g. this changes
class A:
def f(self, y: int) -> bool
class B(A):
def f(self, y: int) -> bool
to
class A:
def f(self, y: int) -> bool
class B(A):
pass
.
"""
def __init__(self):
super(RemoveInheritedMethods, self).__init__()
self.class_to_stripped_signatures = {}
def _StrippedSignatures(self, t):
"""Given a class, list method name + signature without "self".
Args:
t: A pytd.TYPE.
Returns:
A set of name + signature tuples, with the self parameter of the
signature removed.
"""
if not isinstance(t, pytd.ClassType):
# For union types, generic types etc., inheritance is more complicated.
# Be conservative and default to not removing methods inherited from
# those.
return {}
stripped_signatures = {}
for method in t.cls.methods:
for sig in method.signatures:
if (sig.params and
sig.params[0].name == "self" and
isinstance(sig.params[0].type, pytd.ClassType)):
stripped_signatures[method.name] = (
sig.Replace(params=sig.params[1:]), method.is_abstract)
return stripped_signatures
def _FindNameAndSig(self, classes, name, sig):
"""Find a tuple(name, signature) in all methods of a type/class."""
if classes:
t = classes[0]
classes = classes[1:]
if t not in self.class_to_stripped_signatures:
self.class_to_stripped_signatures[t] = self._StrippedSignatures(t)
if name in self.class_to_stripped_signatures[t]:
return sig == self.class_to_stripped_signatures[t][name]
return self._FindNameAndSig(classes, name, sig)
return False
def _MaybeRemoveSignature(self, name, sig, is_abstract):
"""Visit a Signature and return None if we can remove it."""
if (not sig.params or
sig.params[0].name != "self" or
not isinstance(sig.params[0].type, pytd.ClassType)):
return sig # Not a method
cls = sig.params[0].type.cls
if cls is None:
# TODO(kramm): Remove once pytype stops generating ClassType(name, None).
return sig
try:
if self._FindNameAndSig(
mro.GetBasesInMRO(cls), name,
(sig.Replace(params=sig.params[1:]), is_abstract)):
return None # remove (see VisitFunction)
except mro.MROError:
return sig
return sig
def _MaybeDeleteFunction(self, f):
"""Visit a Function and return None if we can remove it."""
signatures = tuple(self._MaybeRemoveSignature(f.name, sig, f.is_abstract)
for sig in f.signatures)
if any(signatures):
if signatures.count(None):
return f.Replace(
signatures=tuple(s for s in signatures if s is not None))
else:
return f # unchanged
else:
return None # delete function
def VisitClass(self, cls):
methods = tuple(self._MaybeDeleteFunction(m) for m in cls.methods)
if methods.count(None):
return cls.Replace(methods=tuple(m for m in methods if m is not None))
else:
return cls # unchanged
class PullInMethodClasses(visitors.Visitor):
"""Simplifies classes with only a __call__ function to just a method.
This transforms
class Foo:
m: Bar
class Bar:
def __call__(self: Foo, ...)
to
class Foo:
def m(self, ...)
.
"""
def __init__(self):
super(PullInMethodClasses, self).__init__()
self._module = None
self._total_count = collections.defaultdict(int)
self._processed_count = collections.defaultdict(int)
def _MaybeLookup(self, t):
if isinstance(t, pytd.NamedType):
try:
return self._module.Lookup(t.name)
except KeyError:
return None
elif isinstance(t, pytd.ClassType):
return t.cls
else:
return None
def _HasSelf(self, sig):
"""True if a signature has a self parameter.
This only checks for the name, since the type can be too many different
things (type of the method, type of the parent class, object, unknown etc.)
and doesn't carry over to the simplified version, anyway.
Arguments:
sig: Function signature (instance of pytd.Signature)
Returns:
True if the signature has "self".
"""
return sig.params and sig.params[0].name == "self"
def _LookupIfSimpleCall(self, t):
"""Looks up the type if it has only one method, "__call__"."""
if not isinstance(t, (pytd.NamedType, pytd.ClassType)):
# We only do this for simple types.
return None
cls = self._MaybeLookup(t)
if not isinstance(cls, pytd.Class):
# This is not a class or it doesn't exist, so assume it's not a method.
return None
if [f.name for f in cls.methods] != ["__call__"]:
return None
method, = cls.methods
return cls if all(self._HasSelf(sig) for sig in method.signatures) else None
def _CanDelete(self, cls):
"""Checks whether this class can be deleted.
Returns whether all occurences of this class as a type were due to
constants we removed.
Arguments:
cls: A pytd.Class.
Returns:
True if we can delete this class.
"""
if not self._processed_count[cls.name]:
# Leave standalone classes alone. E.g. the pytd files in
# pytd/builtins/ defines classes not used by anything else.
return False
return self._processed_count[cls.name] == self._total_count[cls.name]
def EnterTypeDeclUnit(self, module):
# Since modules are hierarchical, we enter TypeDeclUnits multiple times-
# but we only want to record the top-level one.
if not self._module:
self._module = module
def VisitTypeDeclUnit(self, unit):
return unit.Replace(classes=tuple(c for c in unit.classes
if not self._CanDelete(c)))
def VisitClassType(self, t):
self._total_count[t.name] += 1
return t
def VisitNamedType(self, t):
self._total_count[t.name] += 1
return t
def VisitClass(self, cls):
"""Visit a class, and change constants to methods where possible."""
new_constants = []
new_methods = list(cls.methods)
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
for const in cls.constants:
c = self._LookupIfSimpleCall(const.type)
if c:
signatures = c.methods[0].signatures
self._processed_count[c.name] += 1
new_method = pytd.Function(const.name, signatures, c.methods[0].kind)
new_methods.append(new_method.Visit(adjust_self))
else:
new_constants.append(const) # keep
return cls.Replace(constants=tuple(new_constants),
methods=tuple(new_methods))
class AbsorbMutableParameters(visitors.Visitor):
"""Converts mutable parameters to unions. This is lossy.
For example, this will change
def f(x: list[int]):
x = list[int or float]
to
def f(x: list[int] or list[int or float])
.
(Use optimize.CombineContainers to then change x to list[int or float].)
This also works for methods - it will then potentially change the type of
"self". The resulting AST is temporary and needs careful handling.
"""
def VisitParameter(self, p):
if p.mutated_type is None:
return p
else:
return p.Replace(type=pytd_utils.JoinTypes([p.type, p.mutated_type]),
mutated_type=None)
class SimplifyContainers(visitors.Visitor):
"""Simplifies containers whose type parameters are all Any.
For example, this will change
def f() -> List[any]
to
def f() -> list
Note that we don't simplify TupleType or CallableType, since they have
variable-length parameters, and the parameter length is meaningful even when
the parameters are all Any.
"""
def _Simplify(self, t):
if all(isinstance(p, pytd.AnythingType) for p in t.parameters):
return t.base_type
else:
return t
def VisitGenericType(self, t):
return self._Simplify(t)
class TypeParameterScope(visitors.Visitor):
"""Common superclass for optimizations that track type parameters."""
def __init__(self):
super(TypeParameterScope, self).__init__()
self.type_params_stack = [{}]
def EnterClass(self, cls):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: cls for t in cls.template})
self.type_params_stack.append(new)
def EnterSignature(self, sig):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: sig for t in sig.template})
self.type_params_stack.append(new)
def IsClassTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Class)
def IsFunctionTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Signature)
def LeaveClass(self, _):
self.type_params_stack.pop()
def LeaveSignature(self, _):
self.type_params_stack.pop()
class MergeTypeParameters(TypeParameterScope):
"""Remove all function type parameters in a union with a class type param.
For example, this will change
class A(typing.Generic(T)):
def append(self, T or T2) -> T2
to
class A(typing.Generic(T)):
def append(self, T) -> T
.
Use this visitor after using AbsorbMutableParameters.
As another example, the combination of AbsorbMutableParameters and
MergeTypeParameters transforms
class list(typing.Generic(T)):
def append(self, v: T2) -> NoneType:
self = T or T2
to
class list(typing.Generic(T')):
def append(self, V:T') -> NoneType
by creating a *new* template variable T' that propagates the
mutations to the outermost level (in this example, T' = T or T2)
"""
def __init__(self):
super(MergeTypeParameters, self).__init__()
self.type_param_union = None
def _AppendNew(self, l1, l2):
"""Appends all items to l1 that are not in l2."""
# l1 and l2 are small (2-3 elements), so just use two loops.
for e2 in l2:
if not any(e1 is e2 for e1 in l1):
l1.append(e2)
def EnterSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).EnterSignature(node)
assert self.type_param_union is None
self.type_param_union = collections.defaultdict(list)
def LeaveSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).LeaveSignature(node)
self.type_param_union = None
def VisitUnionType(self, u):
type_params = [t for t in u.type_list if isinstance(t, pytd.TypeParameter)]
for t in type_params:
if self.IsFunctionTypeParameter(t):
self._AppendNew(self.type_param_union[t.name], type_params)
return u
def _AllContaining(self, type_param, seen=None):
"""Gets all type parameters that are in a union with the passed one."""
seen = seen or set()
result = [type_param]
for other in self.type_param_union[type_param.name]:
if other in seen:
continue # break cycles
seen.add(other)
self._AppendNew(result, self._AllContaining(other, seen) or [other])
return result
def _ReplaceByOuterIfNecessary(self, item, substitutions):
"""Potentially replace a function type param with a class type param.
Args:
item: A pytd.TemplateItem
substitutions: A dictionary to update with what we replaced.
Returns:
Either [item] or [].
"""
containing_union = self._AllContaining(item.type_param)
if not containing_union:
return [item]
class_type_parameters = [type_param
for type_param in containing_union
if self.IsClassTypeParameter(type_param)]
if class_type_parameters:
substitutions[item.type_param] = pytd_utils.JoinTypes(
class_type_parameters)
return []
else:
# It's a function type parameter that appears in a union with other
# function type parameters.
# TODO(kramm): We could merge those, too.
return [item]
def VisitSignature(self, sig):
new_template = []
substitutions = {k: k for k in self.type_params_stack[-1]}
for item in sig.template:
new_template += self._ReplaceByOuterIfNecessary(item, substitutions)
if sig.template == new_template:
return sig # Nothing changed.
else:
return sig.Replace(template=tuple(new_template)).Visit(
visitors.ReplaceTypeParameters(substitutions)).Visit(SimplifyUnions())
def Optimize(node,
builtins=None,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
can_do_lookup=True):
"""Optimize a PYTD tree.
Tries to shrink a PYTD tree by applying various optimizations.
Arguments:
node: A pytd node to be optimized. It won't be modified - this function
will return a new node.
builtins: Definitions of all of the external types in node.
lossy: Allow optimizations that change the meaning of the pytd.
use_abcs: Use abstract base classes to represent unions like
e.g. "float or int" as "Real".
max_union: How many types we allow in a union before we simplify
it to just "object".
remove_mutable: Whether to simplify mutable parameters to normal
parameters.
can_do_lookup: True: We're either allowed to try to resolve NamedType
instances in the AST, or the AST is already resolved. False: Skip any
optimizations that would require NamedTypes to be resolved.
Returns:
An optimized node.
"""
node = node.Visit(RemoveDuplicates())
node = node.Visit(SimplifyUnions())
node = node.Visit(CombineReturnsAndExceptions())
node = node.Visit(Factorize())
node = node.Visit(ApplyOptionalArguments())
node = node.Visit(CombineContainers())
node = node.Visit(SimplifyContainers())
if builtins:
superclasses = builtins.Visit(visitors.ExtractSuperClassesByName())
superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))
if use_abcs:
superclasses.update(abc_hierarchy.GetSuperClasses())
hierarchy = SuperClassHierarchy(superclasses)
node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))
if lossy:
node = node.Visit(FindCommonSuperClasses(hierarchy))
if max_union:
node = node.Visit(CollapseLongUnions(max_union))
node = node.Visit(AdjustReturnAndConstantGenericType())
if remove_mutable:
node = node.Visit(AbsorbMutableParameters())
node = node.Visit(CombineContainers())
node = node.Visit(MergeTypeParameters())
node = node.Visit(visitors.AdjustSelf())
node = node.Visit(SimplifyContainers())
if builtins and can_do_lookup:
node = visitors.LookupClasses(node, builtins, ignore_late_types=True)
node = node.Visit(RemoveInheritedMethods())
node = node.Visit(RemoveRedundantSignatures(hierarchy))
return node
| 33.007563
| 80
| 0.675705
|
import collections
import logging
from pytype import utils
from pytype.pytd import abc_hierarchy
from pytype.pytd import booleq
from pytype.pytd import mro
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import type_match
from pytype.pytd import visitors
import six
log = logging.getLogger(__name__)
class RenameUnknowns(visitors.Visitor):
def __init__(self, mapping):
super(RenameUnknowns, self).__init__()
self.name_to_cls = {name: hash(cls) for name, cls in mapping.items()}
self.cls_to_canonical_name = {
cls: name for name, cls in self.name_to_cls.items()}
def VisitClassType(self, node):
if node.name.startswith("~unknown"):
return pytd.ClassType(
self.cls_to_canonical_name[self.name_to_cls[node.name]], None)
else:
return node
class RemoveDuplicates(visitors.Visitor):
def VisitFunction(self, node):
return node.Replace(
signatures=tuple(pytd_utils.OrderedSet(node.signatures)))
class RemoveRedundantSignatures(visitors.Visitor):
def __init__(self, hierarchy):
super(RemoveRedundantSignatures, self).__init__()
self.match = type_match.TypeMatch(hierarchy.GetSuperClasses(),
any_also_is_bottom=False)
self.subst = {}
def EnterClass(self, cls):
# allow them to match against anything by themselves.
self.subst = {p.type_param: pytd.NamedType("$" + p.name)
for p in cls.template}
def LeaveClass(self, _):
self.subst = {}
def VisitFunction(self, node):
new_signatures = []
matches = set()
# We keep track of which signature matched which other signatures, purely
# for optimization - that way we don't have to query the reverse direction.
for i, s1 in enumerate(node.signatures):
for j, s2 in enumerate(node.signatures):
if i != j and (j, i) not in matches:
if s1.exceptions or s2.exceptions:
continue
if s1.template:
# type_match doesn't support polymorphic functions on the
continue
if self.match.match(s1, s2, self.subst) == booleq.TRUE:
matches.add((i, j))
break
else:
new_signatures.append(s1)
return node.Replace(signatures=tuple(new_signatures))
class SimplifyUnions(visitors.Visitor):
def VisitUnionType(self, union):
return pytd_utils.JoinTypes(union.type_list)
class _ReturnsAndExceptions(object):
def __init__(self):
self.return_types = []
self.exceptions = []
def Update(self, signature):
if signature.return_type not in self.return_types:
self.return_types.append(signature.return_type)
self.exceptions.extend(exception
for exception in signature.exceptions
if exception not in self.exceptions)
class CombineReturnsAndExceptions(visitors.Visitor):
def _GroupByArguments(self, signatures):
groups = collections.OrderedDict()
for sig in signatures:
stripped_signature = sig.Replace(return_type=None, exceptions=None)
ret = groups.get(stripped_signature)
if not ret:
ret = _ReturnsAndExceptions()
groups[stripped_signature] = ret
ret.Update(sig)
return groups
def VisitFunction(self, f):
groups = self._GroupByArguments(f.signatures)
new_signatures = []
for stripped_signature, ret_exc in groups.items():
ret = pytd_utils.JoinTypes(ret_exc.return_types)
exc = tuple(ret_exc.exceptions)
new_signatures.append(
stripped_signature.Replace(return_type=ret, exceptions=exc)
)
return f.Replace(signatures=tuple(new_signatures))
class CombineContainers(visitors.Visitor):
_CONTAINER_NAMES = {
pytd.TupleType: ("__builtin__.tuple", "typing.Tuple"),
pytd.CallableType: ("typing.Callable",),
}
def _key(self, t):
if isinstance(t, (pytd.CallableType, pytd.TupleType)):
return (t.base_type, len(t.parameters))
else:
return t.base_type
def _should_merge(self, pytd_type, union):
names = self._CONTAINER_NAMES[pytd_type]
length = None
for t in union.type_list:
if isinstance(t, pytd_type):
if length is None:
length = len(t.parameters)
elif length != len(t.parameters):
return True
elif (isinstance(t, pytd.GenericType) and
t.base_type.name in names):
return True
return False
def VisitUnionType(self, union):
if not any(isinstance(t, pytd.GenericType) for t in union.type_list):
return union
union = pytd_utils.JoinTypes(union.type_list) # flatten
if not isinstance(union, pytd.UnionType):
union = pytd.UnionType((union,))
merge_tuples = self._should_merge(pytd.TupleType, union)
merge_callables = self._should_merge(pytd.CallableType, union)
if merge_tuples or merge_callables:
type_list = []
for t in union.type_list:
if merge_tuples and isinstance(t, pytd.TupleType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.UnionType(t.parameters),))
elif merge_callables and isinstance(t, pytd.CallableType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.AnythingType(), t.ret))
type_list.append(t)
union = union.Replace(type_list=tuple(type_list))
collect = {}
has_redundant_base_types = False
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in collect:
has_redundant_base_types = True
collect[key] = tuple(
pytd_utils.JoinTypes([p1, p2])
for p1, p2 in zip(collect[key], t.parameters))
else:
collect[key] = t.parameters
if not has_redundant_base_types:
return union
result = pytd.NothingType()
done = set()
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in done:
continue # already added
parameters = collect[key]
add = t.Replace(parameters=tuple(p.Visit(CombineContainers())
for p in parameters))
done.add(key)
else:
add = t
result = pytd_utils.JoinTypes([result, add])
return result
class Factorize(visitors.Visitor):
def _GroupByOmittedArg(self, signatures, i):
groups = collections.OrderedDict()
for sig in signatures:
if i >= len(sig.params):
# We can't omit argument i, because this signature has too few
groups[sig] = None
continue
if sig.params[i].mutated_type is not None:
groups[sig] = None
continue
# Set type of parameter i to None
params = list(sig.params)
param_i = params[i]
params[i] = param_i.Replace(type=None)
stripped_signature = sig.Replace(params=tuple(params))
existing = groups.get(stripped_signature)
if existing:
existing.append(param_i.type)
else:
groups[stripped_signature] = [param_i.type]
return groups.items()
def VisitFunction(self, f):
max_argument_count = max(len(s.params) for s in f.signatures)
signatures = f.signatures
for i in six.moves.xrange(max_argument_count):
new_sigs = []
for sig, types in self._GroupByOmittedArg(signatures, i):
if types:
# One or more options for argument <i>:
new_params = list(sig.params)
new_params[i] = sig.params[i].Replace(
type=pytd_utils.JoinTypes(types))
sig = sig.Replace(params=tuple(new_params))
new_sigs.append(sig)
else:
# Signature doesn't have argument <i>, so we store the original:
new_sigs.append(sig)
signatures = new_sigs
return f.Replace(signatures=tuple(signatures))
class ApplyOptionalArguments(visitors.Visitor):
def _HasShorterVersion(self, sig, optional_arg_sigs):
param_count = len(sig.params)
if not sig.has_optional:
param_count += 1
for i in six.moves.xrange(param_count):
if sig.params[0:i] in optional_arg_sigs:
return True
return False
def VisitFunction(self, f):
optional_arg_sigs = frozenset(s.params
for s in f.signatures
if s.has_optional)
new_signatures = (s for s in f.signatures
if not self._HasShorterVersion(s, optional_arg_sigs))
return f.Replace(signatures=tuple(new_signatures))
class SuperClassHierarchy(object):
def __init__(self, superclasses):
self._superclasses = superclasses
self._subclasses = utils.invert_dict(self._superclasses)
def GetSuperClasses(self):
return self._superclasses
def _CollectSuperclasses(self, type_name, collect):
collect.add(type_name)
superclasses = [name
for name in self._superclasses.get(type_name, [])]
for superclass in superclasses:
self._CollectSuperclasses(superclass, collect)
def ExpandSuperClasses(self, t):
superclasses = set()
self._CollectSuperclasses(t, superclasses)
return superclasses
def ExpandSubClasses(self, t):
queue = [t]
seen = set()
while queue:
item = queue.pop()
if item not in seen:
seen.add(item)
queue.extend(self._subclasses[item])
return seen
def HasSubClassInSet(self, cls, known):
return any(sub in known
for sub in self._subclasses[cls])
def HasSuperClassInSet(self, cls, known):
return any(sub in known
for sub in self._superclasses[cls])
class SimplifyUnionsWithSuperclasses(visitors.Visitor):
def __init__(self, hierarchy):
super(SimplifyUnionsWithSuperclasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
c = collections.Counter()
for t in set(union.type_list):
if isinstance(t, pytd.GENERIC_BASE_TYPE):
c += collections.Counter(self.hierarchy.ExpandSubClasses(str(t)))
# in collections.Counter. It'll happen for types that are not
new_type_list = [t for t in union.type_list if c[str(t)] <= 1]
return pytd_utils.JoinTypes(new_type_list)
class FindCommonSuperClasses(visitors.Visitor):
def __init__(self, hierarchy):
super(FindCommonSuperClasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0]))
for t in union.type_list[1:]:
intersection.intersection_update(
self.hierarchy.ExpandSuperClasses(str(t)))
new_type_list = tuple(
pytd.NamedType(cls) for cls in intersection
if not self.hierarchy.HasSubClassInSet(cls, intersection))
if not new_type_list:
return union
return pytd_utils.JoinTypes(new_type_list)
class CollapseLongUnions(visitors.Visitor):
def __init__(self, max_length=7):
assert isinstance(max_length, six.integer_types)
super(CollapseLongUnions, self).__init__()
self.generic_type = pytd.AnythingType()
self.max_length = max_length
def VisitUnionType(self, union):
if len(union.type_list) > self.max_length:
return self.generic_type
elif self.generic_type in union.type_list:
return self.generic_type
else:
return union
class AdjustGenericType(visitors.Visitor):
def __init__(self):
super(AdjustGenericType, self).__init__()
self.old_generic_type = pytd.ClassType("__builtin__.object")
self.new_generic_type = pytd.AnythingType()
def VisitClassType(self, t):
if t == self.old_generic_type:
return self.new_generic_type
else:
return t
class AdjustReturnAndConstantGenericType(visitors.Visitor):
def VisitSignature(self, sig):
return sig.Replace(return_type=sig.return_type.Visit(AdjustGenericType()))
def VisitConstant(self, c):
return c.Replace(type=c.type.Visit(AdjustGenericType()))
class AddInheritedMethods(visitors.Visitor):
def VisitLateType(self, _):
raise NotImplementedError("Can't use AddInheritedMethods with LateType.")
def VisitClass(self, cls):
if any(base for base in cls.parents if isinstance(base, pytd.NamedType)):
raise AssertionError("AddInheritedMethods needs a resolved AST")
bases = [base.cls
for base in cls.parents
if isinstance(base, pytd.ClassType)]
# this class, local methods override parent class methods.
names = {m.name for m in cls.methods} | {c.name for c in cls.constants}
# TODO(kramm): This should do full-blown MRO.
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
new_methods = list(cls.methods)
for base in bases:
for m in base.methods:
if m.name not in names:
new_methods.append(m.Visit(adjust_self))
new_constants = list(cls.constants)
for base in bases:
for c in base.constants:
if c.name not in names:
new_constants.append(c)
return cls.Replace(methods=tuple(new_methods),
constants=tuple(new_constants))
class RemoveInheritedMethods(visitors.Visitor):
def __init__(self):
super(RemoveInheritedMethods, self).__init__()
self.class_to_stripped_signatures = {}
def _StrippedSignatures(self, t):
if not isinstance(t, pytd.ClassType):
# For union types, generic types etc., inheritance is more complicated.
# Be conservative and default to not removing methods inherited from
# those.
return {}
stripped_signatures = {}
for method in t.cls.methods:
for sig in method.signatures:
if (sig.params and
sig.params[0].name == "self" and
isinstance(sig.params[0].type, pytd.ClassType)):
stripped_signatures[method.name] = (
sig.Replace(params=sig.params[1:]), method.is_abstract)
return stripped_signatures
def _FindNameAndSig(self, classes, name, sig):
if classes:
t = classes[0]
classes = classes[1:]
if t not in self.class_to_stripped_signatures:
self.class_to_stripped_signatures[t] = self._StrippedSignatures(t)
if name in self.class_to_stripped_signatures[t]:
return sig == self.class_to_stripped_signatures[t][name]
return self._FindNameAndSig(classes, name, sig)
return False
def _MaybeRemoveSignature(self, name, sig, is_abstract):
if (not sig.params or
sig.params[0].name != "self" or
not isinstance(sig.params[0].type, pytd.ClassType)):
return sig # Not a method
cls = sig.params[0].type.cls
if cls is None:
# TODO(kramm): Remove once pytype stops generating ClassType(name, None).
return sig
try:
if self._FindNameAndSig(
mro.GetBasesInMRO(cls), name,
(sig.Replace(params=sig.params[1:]), is_abstract)):
return None # remove (see VisitFunction)
except mro.MROError:
return sig
return sig
def _MaybeDeleteFunction(self, f):
signatures = tuple(self._MaybeRemoveSignature(f.name, sig, f.is_abstract)
for sig in f.signatures)
if any(signatures):
if signatures.count(None):
return f.Replace(
signatures=tuple(s for s in signatures if s is not None))
else:
return f # unchanged
else:
return None # delete function
def VisitClass(self, cls):
methods = tuple(self._MaybeDeleteFunction(m) for m in cls.methods)
if methods.count(None):
return cls.Replace(methods=tuple(m for m in methods if m is not None))
else:
return cls # unchanged
class PullInMethodClasses(visitors.Visitor):
def __init__(self):
super(PullInMethodClasses, self).__init__()
self._module = None
self._total_count = collections.defaultdict(int)
self._processed_count = collections.defaultdict(int)
def _MaybeLookup(self, t):
if isinstance(t, pytd.NamedType):
try:
return self._module.Lookup(t.name)
except KeyError:
return None
elif isinstance(t, pytd.ClassType):
return t.cls
else:
return None
def _HasSelf(self, sig):
return sig.params and sig.params[0].name == "self"
def _LookupIfSimpleCall(self, t):
if not isinstance(t, (pytd.NamedType, pytd.ClassType)):
# We only do this for simple types.
return None
cls = self._MaybeLookup(t)
if not isinstance(cls, pytd.Class):
# This is not a class or it doesn't exist, so assume it's not a method.
return None
if [f.name for f in cls.methods] != ["__call__"]:
return None
method, = cls.methods
return cls if all(self._HasSelf(sig) for sig in method.signatures) else None
def _CanDelete(self, cls):
if not self._processed_count[cls.name]:
# Leave standalone classes alone. E.g. the pytd files in
# pytd/builtins/ defines classes not used by anything else.
return False
return self._processed_count[cls.name] == self._total_count[cls.name]
def EnterTypeDeclUnit(self, module):
# Since modules are hierarchical, we enter TypeDeclUnits multiple times-
# but we only want to record the top-level one.
if not self._module:
self._module = module
def VisitTypeDeclUnit(self, unit):
return unit.Replace(classes=tuple(c for c in unit.classes
if not self._CanDelete(c)))
def VisitClassType(self, t):
self._total_count[t.name] += 1
return t
def VisitNamedType(self, t):
self._total_count[t.name] += 1
return t
def VisitClass(self, cls):
new_constants = []
new_methods = list(cls.methods)
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
for const in cls.constants:
c = self._LookupIfSimpleCall(const.type)
if c:
signatures = c.methods[0].signatures
self._processed_count[c.name] += 1
new_method = pytd.Function(const.name, signatures, c.methods[0].kind)
new_methods.append(new_method.Visit(adjust_self))
else:
new_constants.append(const) # keep
return cls.Replace(constants=tuple(new_constants),
methods=tuple(new_methods))
class AbsorbMutableParameters(visitors.Visitor):
def VisitParameter(self, p):
if p.mutated_type is None:
return p
else:
return p.Replace(type=pytd_utils.JoinTypes([p.type, p.mutated_type]),
mutated_type=None)
class SimplifyContainers(visitors.Visitor):
def _Simplify(self, t):
if all(isinstance(p, pytd.AnythingType) for p in t.parameters):
return t.base_type
else:
return t
def VisitGenericType(self, t):
return self._Simplify(t)
class TypeParameterScope(visitors.Visitor):
def __init__(self):
super(TypeParameterScope, self).__init__()
self.type_params_stack = [{}]
def EnterClass(self, cls):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: cls for t in cls.template})
self.type_params_stack.append(new)
def EnterSignature(self, sig):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: sig for t in sig.template})
self.type_params_stack.append(new)
def IsClassTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Class)
def IsFunctionTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Signature)
def LeaveClass(self, _):
self.type_params_stack.pop()
def LeaveSignature(self, _):
self.type_params_stack.pop()
class MergeTypeParameters(TypeParameterScope):
def __init__(self):
super(MergeTypeParameters, self).__init__()
self.type_param_union = None
def _AppendNew(self, l1, l2):
# l1 and l2 are small (2-3 elements), so just use two loops.
for e2 in l2:
if not any(e1 is e2 for e1 in l1):
l1.append(e2)
def EnterSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).EnterSignature(node)
assert self.type_param_union is None
self.type_param_union = collections.defaultdict(list)
def LeaveSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).LeaveSignature(node)
self.type_param_union = None
def VisitUnionType(self, u):
type_params = [t for t in u.type_list if isinstance(t, pytd.TypeParameter)]
for t in type_params:
if self.IsFunctionTypeParameter(t):
self._AppendNew(self.type_param_union[t.name], type_params)
return u
def _AllContaining(self, type_param, seen=None):
seen = seen or set()
result = [type_param]
for other in self.type_param_union[type_param.name]:
if other in seen:
continue # break cycles
seen.add(other)
self._AppendNew(result, self._AllContaining(other, seen) or [other])
return result
def _ReplaceByOuterIfNecessary(self, item, substitutions):
containing_union = self._AllContaining(item.type_param)
if not containing_union:
return [item]
class_type_parameters = [type_param
for type_param in containing_union
if self.IsClassTypeParameter(type_param)]
if class_type_parameters:
substitutions[item.type_param] = pytd_utils.JoinTypes(
class_type_parameters)
return []
else:
# It's a function type parameter that appears in a union with other
return [item]
def VisitSignature(self, sig):
new_template = []
substitutions = {k: k for k in self.type_params_stack[-1]}
for item in sig.template:
new_template += self._ReplaceByOuterIfNecessary(item, substitutions)
if sig.template == new_template:
return sig
else:
return sig.Replace(template=tuple(new_template)).Visit(
visitors.ReplaceTypeParameters(substitutions)).Visit(SimplifyUnions())
def Optimize(node,
builtins=None,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
can_do_lookup=True):
node = node.Visit(RemoveDuplicates())
node = node.Visit(SimplifyUnions())
node = node.Visit(CombineReturnsAndExceptions())
node = node.Visit(Factorize())
node = node.Visit(ApplyOptionalArguments())
node = node.Visit(CombineContainers())
node = node.Visit(SimplifyContainers())
if builtins:
superclasses = builtins.Visit(visitors.ExtractSuperClassesByName())
superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))
if use_abcs:
superclasses.update(abc_hierarchy.GetSuperClasses())
hierarchy = SuperClassHierarchy(superclasses)
node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))
if lossy:
node = node.Visit(FindCommonSuperClasses(hierarchy))
if max_union:
node = node.Visit(CollapseLongUnions(max_union))
node = node.Visit(AdjustReturnAndConstantGenericType())
if remove_mutable:
node = node.Visit(AbsorbMutableParameters())
node = node.Visit(CombineContainers())
node = node.Visit(MergeTypeParameters())
node = node.Visit(visitors.AdjustSelf())
node = node.Visit(SimplifyContainers())
if builtins and can_do_lookup:
node = visitors.LookupClasses(node, builtins, ignore_late_types=True)
node = node.Visit(RemoveInheritedMethods())
node = node.Visit(RemoveRedundantSignatures(hierarchy))
return node
| true
| true
|
790472ee541ced8da5ee14485c34662ec01fd4c1
| 433
|
py
|
Python
|
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Estrutura de seleção/Exercício 14.py
|
sullyvan15/UVV
|
2390cc2881792d036db1d8b098fe366f47cd98c3
|
[
"MIT"
] | null | null | null |
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Estrutura de seleção/Exercício 14.py
|
sullyvan15/UVV
|
2390cc2881792d036db1d8b098fe366f47cd98c3
|
[
"MIT"
] | 1
|
2020-10-07T23:33:21.000Z
|
2020-10-08T01:15:11.000Z
|
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Estrutura de seleção/Exercício 14.py
|
sullyvan15/Universidade-Vila-Velha
|
2390cc2881792d036db1d8b098fe366f47cd98c3
|
[
"MIT"
] | null | null | null |
av1 = float(input('Nota 1° avaliação'))
av2 = float(input('Nota 2° avaliação'))
mp = av1 + av2 / 2
tf = int(input('Total geral de faltas: ')
pf = float(input('Prova final: '))
final = mp + pf / 2
if tf > 20:
print("Reprovado por falta")
else
elif mp => 7:
print("Aprovado")
elif 3 <= mp < 7:
print("Em recuperação")
elif final => 5:
print("Aprovado")
else:
(print("Reprovado"))
| 19.681818
| 41
| 0.558891
|
av1 = float(input('Nota 1° avaliação'))
av2 = float(input('Nota 2° avaliação'))
mp = av1 + av2 / 2
tf = int(input('Total geral de faltas: ')
pf = float(input('Prova final: '))
final = mp + pf / 2
if tf > 20:
print("Reprovado por falta")
else
elif mp => 7:
print("Aprovado")
elif 3 <= mp < 7:
print("Em recuperação")
elif final => 5:
print("Aprovado")
else:
(print("Reprovado"))
| false
| true
|
7904734f69e8d1103ec2f6c7d5a09c45e4cf3266
| 241
|
py
|
Python
|
json_content.py
|
mell-old/mell-old-echo-bot
|
a59436fe0632254f76c627d52cbb6fd118fb017d
|
[
"MIT"
] | null | null | null |
json_content.py
|
mell-old/mell-old-echo-bot
|
a59436fe0632254f76c627d52cbb6fd118fb017d
|
[
"MIT"
] | null | null | null |
json_content.py
|
mell-old/mell-old-echo-bot
|
a59436fe0632254f76c627d52cbb6fd118fb017d
|
[
"MIT"
] | null | null | null |
import json
def get_text_from_json(file):
with open(file) as f:
json_text = f.read()
text = json.loads(json_text)
return text
content = get_text_from_json('content.json')
test = get_text_from_json('test.json')
| 20.083333
| 44
| 0.672199
|
import json
def get_text_from_json(file):
with open(file) as f:
json_text = f.read()
text = json.loads(json_text)
return text
content = get_text_from_json('content.json')
test = get_text_from_json('test.json')
| true
| true
|
790474c6e3eedd4bf4f4c049f9d76deba7187f54
| 280
|
py
|
Python
|
netbox/ipam/forms/bulk_create.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | 4,994
|
2019-07-01T13:15:44.000Z
|
2022-03-31T19:55:45.000Z
|
netbox/ipam/forms/bulk_create.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | 4,045
|
2019-07-01T14:24:09.000Z
|
2022-03-31T16:07:39.000Z
|
netbox/ipam/forms/bulk_create.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | 1,225
|
2019-07-01T15:34:03.000Z
|
2022-03-31T16:47:09.000Z
|
from django import forms
from utilities.forms import BootstrapMixin, ExpandableIPAddressField
__all__ = (
'IPAddressBulkCreateForm',
)
class IPAddressBulkCreateForm(BootstrapMixin, forms.Form):
pattern = ExpandableIPAddressField(
label='Address pattern'
)
| 20
| 68
| 0.764286
|
from django import forms
from utilities.forms import BootstrapMixin, ExpandableIPAddressField
__all__ = (
'IPAddressBulkCreateForm',
)
class IPAddressBulkCreateForm(BootstrapMixin, forms.Form):
pattern = ExpandableIPAddressField(
label='Address pattern'
)
| true
| true
|
790474e882b4a327f4a6aff90f1ba7404b35dcec
| 187
|
py
|
Python
|
ktrade/queue_messages/buy_message.py
|
webclinic017/ktrade
|
49c02b2e93235f925e41c92603040e15b1885b0d
|
[
"MIT"
] | null | null | null |
ktrade/queue_messages/buy_message.py
|
webclinic017/ktrade
|
49c02b2e93235f925e41c92603040e15b1885b0d
|
[
"MIT"
] | null | null | null |
ktrade/queue_messages/buy_message.py
|
webclinic017/ktrade
|
49c02b2e93235f925e41c92603040e15b1885b0d
|
[
"MIT"
] | null | null | null |
from ktrade.queue_messages.queue_message import QueueMessage
class BuyMessage(QueueMessage):
def __init__(self, ticker: str):
super().__init__(type='BUY')
self.ticker = ticker
| 26.714286
| 60
| 0.759358
|
from ktrade.queue_messages.queue_message import QueueMessage
class BuyMessage(QueueMessage):
def __init__(self, ticker: str):
super().__init__(type='BUY')
self.ticker = ticker
| true
| true
|
79047555a9cb961132f9f4c9bf637bfcd9bcd92d
| 3,907
|
py
|
Python
|
utils/preprocessing_data.py
|
1Stohk1/tami
|
e0aa902bb767631dd2435ed0eac05209b9bd64ed
|
[
"MIT"
] | 2
|
2020-12-17T20:51:48.000Z
|
2021-02-01T09:28:30.000Z
|
utils/preprocessing_data.py
|
Djack1010/claransom
|
e823a64ed957b37ce5f9bcf77ada1e7097a06fc4
|
[
"MIT"
] | 1
|
2020-12-25T19:48:49.000Z
|
2020-12-25T19:48:49.000Z
|
utils/preprocessing_data.py
|
Djack1010/claransom
|
e823a64ed957b37ce5f9bcf77ada1e7097a06fc4
|
[
"MIT"
] | 2
|
2020-11-02T12:00:27.000Z
|
2021-09-20T09:52:45.000Z
|
import os
import numpy as np
import pickle
import pathlib
from random import shuffle, choice
def get_info_dataset(dataset_path, update=False):
# TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data)
storing_data_path = dataset_path + "/info.txt"
if update and os.path.exists(dataset_path + "/info.txt"):
os.remove(dataset_path + "/info.txt")
if os.path.isfile(storing_data_path):
with open(storing_data_path, 'rb') as filehandle:
data = pickle.load(filehandle)
class_info = data['class_info']
ds_info = data['ds_info']
# CHECKS if the paths stored match the DB
# TODO: This check just pick 3 elements and check existence, can be improved
if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \
or not os.path.exists(choice(ds_info['test_paths'])):
print(f"Dataset paths seem incorrect, "
f"you should update the dataset info running '-m DATA -d {dataset_path}")
exit()
# Shuffle elements
else:
shuffle(ds_info['train_paths'])
shuffle(ds_info['val_paths'])
shuffle(ds_info['final_training_paths'])
shuffle(ds_info['test_paths'])
else:
# Create dataset filepaths
train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/train")
for file in f if ".png" in file or ".jpg" in file]
val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/val")
for file in f if ".png" in file or ".jpg" in file]
final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training")
for file in f if ".png" in file or ".jpg" in file]
test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/test")
for file in f if ".png" in file or ".jpg" in file]
ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths,
'final_training_paths': final_training_paths}
temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + "/training/train").glob('*')])
# Sort class_names to keep same order, which influence training in one-hot encore, over different machines
class_names = np.sort(temp_class_names, axis=-1)
nclasses = len(class_names)
class_info = {"class_names": class_names, "n_classes": nclasses}
# GENERAL STATS
size_train = len(train_paths)
size_val = len(val_paths)
size_test = len(test_paths)
class_info.update({"train_size": size_train, "val_size": size_val, "test_size": size_test, 'info': {}})
for name in class_names:
size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/train/{}".format(name))])
size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/val/{}".format(name))])
size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + "/test/{}".format(name))])
class_info['info']["{}".format(name)] = {}
class_info['info']["{}".format(name)]['TRAIN'] = size_trainf
class_info['info']["{}".format(name)]['VAL'] = size_valf
class_info['info']["{}".format(name)]['TEST'] = size_testf
class_info['info']["{}".format(name)]['TOT'] = size_testf + size_valf + size_trainf
with open(storing_data_path, 'wb') as filehandle:
data = {'ds_info': ds_info, 'class_info': class_info}
pickle.dump(data, filehandle)
return class_info, ds_info
| 48.8375
| 120
| 0.604044
|
import os
import numpy as np
import pickle
import pathlib
from random import shuffle, choice
def get_info_dataset(dataset_path, update=False):
storing_data_path = dataset_path + "/info.txt"
if update and os.path.exists(dataset_path + "/info.txt"):
os.remove(dataset_path + "/info.txt")
if os.path.isfile(storing_data_path):
with open(storing_data_path, 'rb') as filehandle:
data = pickle.load(filehandle)
class_info = data['class_info']
ds_info = data['ds_info']
if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \
or not os.path.exists(choice(ds_info['test_paths'])):
print(f"Dataset paths seem incorrect, "
f"you should update the dataset info running '-m DATA -d {dataset_path}")
exit()
# Shuffle elements
else:
shuffle(ds_info['train_paths'])
shuffle(ds_info['val_paths'])
shuffle(ds_info['final_training_paths'])
shuffle(ds_info['test_paths'])
else:
# Create dataset filepaths
train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/train")
for file in f if ".png" in file or ".jpg" in file]
val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/val")
for file in f if ".png" in file or ".jpg" in file]
final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training")
for file in f if ".png" in file or ".jpg" in file]
test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/test")
for file in f if ".png" in file or ".jpg" in file]
ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths,
'final_training_paths': final_training_paths}
temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + "/training/train").glob('*')])
# Sort class_names to keep same order, which influence training in one-hot encore, over different machines
class_names = np.sort(temp_class_names, axis=-1)
nclasses = len(class_names)
class_info = {"class_names": class_names, "n_classes": nclasses}
# GENERAL STATS
size_train = len(train_paths)
size_val = len(val_paths)
size_test = len(test_paths)
class_info.update({"train_size": size_train, "val_size": size_val, "test_size": size_test, 'info': {}})
for name in class_names:
size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/train/{}".format(name))])
size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/val/{}".format(name))])
size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + "/test/{}".format(name))])
class_info['info']["{}".format(name)] = {}
class_info['info']["{}".format(name)]['TRAIN'] = size_trainf
class_info['info']["{}".format(name)]['VAL'] = size_valf
class_info['info']["{}".format(name)]['TEST'] = size_testf
class_info['info']["{}".format(name)]['TOT'] = size_testf + size_valf + size_trainf
with open(storing_data_path, 'wb') as filehandle:
data = {'ds_info': ds_info, 'class_info': class_info}
pickle.dump(data, filehandle)
return class_info, ds_info
| true
| true
|
7904755a1dd1799e443bdc36e69d4061e983af6d
| 1,572
|
py
|
Python
|
services/engine/webs/core/requests/request.py
|
huang-zp/crawloop
|
3411bfaac3b0a6a534cf6518f5132883477e7442
|
[
"Apache-2.0"
] | 19
|
2021-03-10T03:51:31.000Z
|
2022-03-14T01:05:46.000Z
|
services/engine/webs/core/requests/request.py
|
c89758971/crawloop
|
b9fcc21f7ec712a74cb5952686c1f4cce896207e
|
[
"Apache-2.0"
] | 1
|
2021-03-12T02:14:17.000Z
|
2021-03-12T02:14:17.000Z
|
services/engine/webs/core/requests/request.py
|
c89758971/crawloop
|
b9fcc21f7ec712a74cb5952686c1f4cce896207e
|
[
"Apache-2.0"
] | 5
|
2021-03-23T09:51:23.000Z
|
2022-01-14T08:16:09.000Z
|
# -*- coding: utf-8 -*-
import requests
from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists
class RequestMixin(object):
CODE_EXCEPTION_MSG = {
400: InvalidAPIRequest,
404: RecordNotFound,
409: RecordAlreadyExists,
422: InvalidAPIRequest,
500: ServerError,
}
def __init__(self):
self.session = requests.Session()
@property
def _headers(self):
return {
"Content-Type": "application/json",
}
def request(self, server, method, url, json=None, params=None, timeout=60):
try:
response = self.session.request(
method, url, json=json, params=params,
timeout=timeout, headers=self._headers
)
except requests.exceptions.ConnectTimeout:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!")
except requests.exceptions.ConnectionError:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!")
try:
response_data = response.json()
except Exception as e:
raise ServerError(f"{server}服务器参数解析失败!")
if not (200 <= response.status_code < 300):
exception = self.CODE_EXCEPTION_MSG[response.status_code] \
if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]
raise exception(f"{server} Response:{response_data.get('error').get('message')}")
return response_data
web_client = RequestMixin()
| 30.230769
| 107
| 0.632316
|
import requests
from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists
class RequestMixin(object):
CODE_EXCEPTION_MSG = {
400: InvalidAPIRequest,
404: RecordNotFound,
409: RecordAlreadyExists,
422: InvalidAPIRequest,
500: ServerError,
}
def __init__(self):
self.session = requests.Session()
@property
def _headers(self):
return {
"Content-Type": "application/json",
}
def request(self, server, method, url, json=None, params=None, timeout=60):
try:
response = self.session.request(
method, url, json=json, params=params,
timeout=timeout, headers=self._headers
)
except requests.exceptions.ConnectTimeout:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!")
except requests.exceptions.ConnectionError:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!")
try:
response_data = response.json()
except Exception as e:
raise ServerError(f"{server}服务器参数解析失败!")
if not (200 <= response.status_code < 300):
exception = self.CODE_EXCEPTION_MSG[response.status_code] \
if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]
raise exception(f"{server} Response:{response_data.get('error').get('message')}")
return response_data
web_client = RequestMixin()
| true
| true
|
790476bfb1348ab4e0cb7a3cfe0c76769a6bf9e2
| 9,966
|
py
|
Python
|
mmcls/models/backbones/mobilenet_v2.py
|
ChaseMonsterAway/mmclassification
|
85d26b8eb2fc799599c42ca33831c40707311bd7
|
[
"Apache-2.0"
] | null | null | null |
mmcls/models/backbones/mobilenet_v2.py
|
ChaseMonsterAway/mmclassification
|
85d26b8eb2fc799599c42ca33831c40707311bd7
|
[
"Apache-2.0"
] | null | null | null |
mmcls/models/backbones/mobilenet_v2.py
|
ChaseMonsterAway/mmclassification
|
85d26b8eb2fc799599c42ca33831c40707311bd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
deep_stem=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
if deep_stem:
self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
in_channels_ = 16
else:
in_channels_ = 3
self.conv0 = nn.Sequential()
self.conv1 = ConvModule(
in_channels=in_channels_,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 36.639706
| 173
| 0.553783
|
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
deep_stem=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
if deep_stem:
self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
in_channels_ = 16
else:
in_channels_ = 3
self.conv0 = nn.Sequential()
self.conv1 = ConvModule(
in_channels=in_channels_,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| true
| true
|
790476c58be6046db4315612dc2aed8517f59372
| 5,449
|
py
|
Python
|
mmdet3d/datasets/pipelines/test_time_aug.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 2,216
|
2020-07-09T19:10:11.000Z
|
2022-03-31T12:39:26.000Z
|
mmdet3d/datasets/pipelines/test_time_aug.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 1,174
|
2020-07-10T07:02:28.000Z
|
2022-03-31T12:38:56.000Z
|
mmdet3d/datasets/pipelines/test_time_aug.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 681
|
2020-07-09T19:40:06.000Z
|
2022-03-31T11:02:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import warnings
from copy import deepcopy
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug3D(object):
"""Test-time augmentation with multiple scales and flipping.
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]: Images scales for resizing.
pts_scale_ratio (float | list[float]): Points scale ratios for
resizing.
flip (bool): Whether apply flip augmentation. Defaults to False.
flip_direction (str | list[str]): Flip augmentation directions
for images, options are "horizontal" and "vertical".
If flip_direction is list, multiple flip augmentations will
be applied. It has no effect when ``flip == False``.
Defaults to "horizontal".
pcd_horizontal_flip (bool): Whether apply horizontal flip augmentation
to point cloud. Defaults to True. Note that it works only when
'flip' is turned on.
pcd_vertical_flip (bool): Whether apply vertical flip augmentation
to point cloud. Defaults to True. Note that it works only when
'flip' is turned on.
"""
def __init__(self,
transforms,
img_scale,
pts_scale_ratio,
flip=False,
flip_direction='horizontal',
pcd_horizontal_flip=False,
pcd_vertical_flip=False):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.pts_scale_ratio = pts_scale_ratio \
if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)]
assert mmcv.is_list_of(self.img_scale, tuple)
assert mmcv.is_list_of(self.pts_scale_ratio, float)
self.flip = flip
self.pcd_horizontal_flip = pcd_horizontal_flip
self.pcd_vertical_flip = pcd_vertical_flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip and not any([(t['type'] == 'RandomFlip3D'
or t['type'] == 'RandomFlip')
for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to augment common fields in results.
Args:
results (dict): Result dict contains the data to augment.
Returns:
dict: The result dict contains the data that is augmented with \
different scales and flips.
"""
aug_data = []
# modified from `flip_aug = [False, True] if self.flip else [False]`
# to reduce unnecessary scenes when using double flip augmentation
# during test time
flip_aug = [True] if self.flip else [False]
pcd_horizontal_flip_aug = [False, True] \
if self.flip and self.pcd_horizontal_flip else [False]
pcd_vertical_flip_aug = [False, True] \
if self.flip and self.pcd_vertical_flip else [False]
for scale in self.img_scale:
for pts_scale_ratio in self.pts_scale_ratio:
for flip in flip_aug:
for pcd_horizontal_flip in pcd_horizontal_flip_aug:
for pcd_vertical_flip in pcd_vertical_flip_aug:
for direction in self.flip_direction:
# results.copy will cause bug
# since it is shallow copy
_results = deepcopy(results)
_results['scale'] = scale
_results['flip'] = flip
_results['pcd_scale_factor'] = \
pts_scale_ratio
_results['flip_direction'] = direction
_results['pcd_horizontal_flip'] = \
pcd_horizontal_flip
_results['pcd_vertical_flip'] = \
pcd_vertical_flip
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| 45.033058
| 78
| 0.576436
|
import mmcv
import warnings
from copy import deepcopy
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug3D(object):
def __init__(self,
transforms,
img_scale,
pts_scale_ratio,
flip=False,
flip_direction='horizontal',
pcd_horizontal_flip=False,
pcd_vertical_flip=False):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.pts_scale_ratio = pts_scale_ratio \
if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)]
assert mmcv.is_list_of(self.img_scale, tuple)
assert mmcv.is_list_of(self.pts_scale_ratio, float)
self.flip = flip
self.pcd_horizontal_flip = pcd_horizontal_flip
self.pcd_vertical_flip = pcd_vertical_flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip and not any([(t['type'] == 'RandomFlip3D'
or t['type'] == 'RandomFlip')
for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
aug_data = []
flip_aug = [True] if self.flip else [False]
pcd_horizontal_flip_aug = [False, True] \
if self.flip and self.pcd_horizontal_flip else [False]
pcd_vertical_flip_aug = [False, True] \
if self.flip and self.pcd_vertical_flip else [False]
for scale in self.img_scale:
for pts_scale_ratio in self.pts_scale_ratio:
for flip in flip_aug:
for pcd_horizontal_flip in pcd_horizontal_flip_aug:
for pcd_vertical_flip in pcd_vertical_flip_aug:
for direction in self.flip_direction:
_results = deepcopy(results)
_results['scale'] = scale
_results['flip'] = flip
_results['pcd_scale_factor'] = \
pts_scale_ratio
_results['flip_direction'] = direction
_results['pcd_horizontal_flip'] = \
pcd_horizontal_flip
_results['pcd_vertical_flip'] = \
pcd_vertical_flip
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| true
| true
|
790478398a39a0505d83b637eb44b9874a909f47
| 445
|
py
|
Python
|
Crashkurs Python/03_if.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | 2
|
2020-03-23T14:57:50.000Z
|
2021-03-24T18:12:07.000Z
|
Crashkurs Python/03_if.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
Crashkurs Python/03_if.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
# if funktioniert (fast) wie in allen anderen Sprachen
# - Einrückungen ersetzen { } Gilt für Python generell!
# - Es gibt ein elif statt einem else if
weight = 50 # kg
height = 190 # cm
bmi = weight / (height/100)**2
# bmi < 18.5 : Untergewicht
# bmi > 25 : Übergewicht
# sonst : Normalgewicht
if bmi < 18.5:
print("Untergewicht")
print("Mehr essen!")
elif bmi > 25:
print("Übergewicht")
else:
print("Normalgewicht")
| 22.25
| 55
| 0.653933
|
weight = 50
height = 190
bmi = weight / (height/100)**2
if bmi < 18.5:
print("Untergewicht")
print("Mehr essen!")
elif bmi > 25:
print("Übergewicht")
else:
print("Normalgewicht")
| true
| true
|
790478e4b414f4421bbf8c39bb19609c4d28716f
| 556
|
py
|
Python
|
tests/test_croncierge/test_cmd_services.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_croncierge/test_cmd_services.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_croncierge/test_cmd_services.py
|
mburdeev/croncierge
|
b9eb086bdf4f286640e0bdc263f03851f43a13fc
|
[
"Apache-2.0"
] | null | null | null |
import logging
from croncierge import cmd_services
def log_cmd(cmd_response):
logging.info(f"Command response:\n{cmd_response}")
def test_cmd_stdout():
cmd = "python3 /home/maxim/projects/celecron/tests/test_croncierge/debug_cmd.py"
log_cmd(cmd_services.run_cmd(cmd))
def test_cmd_stderr():
cmd = "python3 tests/test_croncierge/debug_cmd_error.py"
log_cmd(cmd_services.run_cmd(cmd))
if __name__ == '__main__':
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.DEBUG)
...
| 25.272727
| 84
| 0.717626
|
import logging
from croncierge import cmd_services
def log_cmd(cmd_response):
logging.info(f"Command response:\n{cmd_response}")
def test_cmd_stdout():
cmd = "python3 /home/maxim/projects/celecron/tests/test_croncierge/debug_cmd.py"
log_cmd(cmd_services.run_cmd(cmd))
def test_cmd_stderr():
cmd = "python3 tests/test_croncierge/debug_cmd_error.py"
log_cmd(cmd_services.run_cmd(cmd))
if __name__ == '__main__':
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.DEBUG)
...
| true
| true
|
790478f6b7ae1e647cd65752e4961dacecadecf8
| 2,065
|
py
|
Python
|
User.py
|
imjacksonchen/weightTracker
|
9dfdd9a943cfdc5148a5a15876df2f159b0623cf
|
[
"MIT"
] | null | null | null |
User.py
|
imjacksonchen/weightTracker
|
9dfdd9a943cfdc5148a5a15876df2f159b0623cf
|
[
"MIT"
] | null | null | null |
User.py
|
imjacksonchen/weightTracker
|
9dfdd9a943cfdc5148a5a15876df2f159b0623cf
|
[
"MIT"
] | null | null | null |
# User class to hold name and __data
class User:
### Instance Variables ###
__userName = ""
__validUser = None
__data = []
__weights = []
__notes = []
__dates = []
__intWeights = []
__avgWeight = 0
__minWeight = 0
__maxWeight = 0
##########################
### Getters ###
def getUserName(self):
return self.__userName
def getData(self):
return self.__data
def getValidUser(self):
return self.__validUser
def getWeights(self):
return self.__weights
def getNotes(self):
return self.__notes
def getDates(self):
return self.__dates
def getAvgWeight(self):
return str(self.__avgWeight)
def getMinWeight(self):
return str(self.__minWeight)
def getMaxWeight(self):
return str(self.__maxWeight)
################
### Setters ###
def setUserName(self, name):
self.__userName = name
def setData(self, data):
self.__data = data
def setValidUser(self, valid):
self.__validUser = valid
def setWeights(self, weights):
self.__weights = weights
def setNotes(self, notes):
self.__notes = notes
def setDates(self, dates):
self.__dates = dates
################
def addData(self, data):
self.__data.append(data)
def addWeight(self, weight):
self.__weights.append(weight)
def addNote(self, note):
self.__notes.append(note)
def addDate(self, date):
self.__dates.append(date)
def calcAvg(self):
self.__avgWeight = int(sum(self.__intWeights)/len(self.__intWeights))
def calcMaxWeight(self):
self.__maxWeight = max(self.__intWeights)
def calacMinWeight(self):
self.__minWeight = min(self.__intWeights)
def averageWeightDelta(self, weightData):
pass
def convertWeightList(self, weightData):
for i in range(len(weightData)):
weightData[i] = int(weightData[i])
self.__intWeights = weightData
| 21.071429
| 77
| 0.595157
|
class User:
= []
__weights = []
__notes = []
__dates = []
__intWeights = []
__avgWeight = 0
__minWeight = 0
__maxWeight = 0
ght(self):
return str(self.__avgWeight)
def getMinWeight(self):
return str(self.__minWeight)
def getMaxWeight(self):
return str(self.__maxWeight)
f, valid):
self.__validUser = valid
def setWeights(self, weights):
self.__weights = weights
def setNotes(self, notes):
self.__notes = notes
def setDates(self, dates):
self.__dates = dates
.append(weight)
def addNote(self, note):
self.__notes.append(note)
def addDate(self, date):
self.__dates.append(date)
def calcAvg(self):
self.__avgWeight = int(sum(self.__intWeights)/len(self.__intWeights))
def calcMaxWeight(self):
self.__maxWeight = max(self.__intWeights)
def calacMinWeight(self):
self.__minWeight = min(self.__intWeights)
def averageWeightDelta(self, weightData):
pass
def convertWeightList(self, weightData):
for i in range(len(weightData)):
weightData[i] = int(weightData[i])
self.__intWeights = weightData
| true
| true
|
790478ff56f8ca4d2493e0f15bcd8e0a387ff1ec
| 4,017
|
py
|
Python
|
cogs/Matchmaking.py
|
DevvyDont/CraneDuels
|
a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4
|
[
"MIT"
] | null | null | null |
cogs/Matchmaking.py
|
DevvyDont/CraneDuels
|
a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4
|
[
"MIT"
] | null | null | null |
cogs/Matchmaking.py
|
DevvyDont/CraneDuels
|
a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4
|
[
"MIT"
] | null | null | null |
import random
import string
from discord import TextChannel
from discord.ext import commands
from discord.ext.tasks import loop
from discord_components import Button, ButtonStyle
from config import settings
from util.Match import Match
class Matchmaking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.match_create_channel: TextChannel = None
self.ongoing_matches_channel: TextChannel = None
self.match_results_channel: TextChannel = None
self.match_create_message_id = None
self.queue = []
self.active_matches = {} # Match ID -> Match instance
@commands.Cog.listener()
async def on_ready(self):
self.match_create_channel = self.bot.get_channel(settings.MATCH_CREATE_CHANNEL)
self.ongoing_matches_channel = self.bot.get_channel(settings.ONGOING_MATCHES_CHANNEL)
self.match_results_channel = self.bot.get_channel(settings.MATCH_RESULTS_CHANNEL)
# Clear the match create channel
await self.match_create_channel.purge()
button = [Button(style=ButtonStyle.green, label='Enter Queue', emoji='✅', custom_id=settings.MATCHMAKING_JOIN_QUEUE_CUSTOM_ID)]
# create the queue message
self.match_create_message_id = await self.match_create_channel.send("enter queue msg", components=button)
# Start the attempt create match loop
self.attempt_create_match.start()
def handle_enter_queue(self, user_id):
if user_id in self.queue:
print(f"tried adding {user_id} to queue but they are already in it")
return
self.queue.append(user_id)
print(f"{user_id} has joined the queue")
async def handle_match_win(self, match, custom_id):
winner_id = None
if custom_id:
winner_id = custom_id.replace(settings.MATCHMAKING_ONGOING_CUSTOM_ID, '')
if winner_id:
msg = await self.match_results_channel.send(content=f"User {winner_id} won match {match.id}!")
del self.active_matches[match.id]
match_msg = self.bot.get_message(self.ongoing_matches_channel, match.message_id)
await self.bot.delete_message(match_msg)
@loop(seconds=settings.MATCHMAKING_CREATE_MATCH_FREQUENCY)
async def attempt_create_match(self):
print(f"[Matchmaking] attempting to create a match with {len(self.queue)} members")
if len(self.queue) <= 1:
print("tried creating match with less than 2 members")
return
#split queues later on based on rank/elo
matched_players = random.sample(self.queue, 2)
u1 = matched_players[0]
u2 = matched_players[1]
await self.create_match(u1, u2)
def generate_match_id(self):
avail_chars = string.ascii_uppercase + string.digits
id_list = []
for _ in range(6):
id_list.append(random.choice(avail_chars))
generated_id = ''.join(id_list)
if generated_id not in self.active_matches:
return generated_id
return self.generate_match_id()
def get_match(self, msg_id):
for match in self.active_matches.values():
if msg_id == match.message_id:
return match
return None
async def create_match(self, u1, u2):
match_id = self.generate_match_id()
buttons = [
Button(style=ButtonStyle.grey, label=f"{u1} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u1}"),
Button(style=ButtonStyle.grey, label=f"{u2} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u2}")
]
msg = await self.ongoing_matches_channel.send(content=f"Match between {u1}, {u2}", components=buttons)
self.active_matches[match_id] = Match(match_id, msg.id, [u1, u2])
# remove them from the queue
self.queue.remove(u1)
self.queue.remove(u2)
def setup(bot):
bot.add_cog(Matchmaking(bot))
| 33.475
| 135
| 0.668658
|
import random
import string
from discord import TextChannel
from discord.ext import commands
from discord.ext.tasks import loop
from discord_components import Button, ButtonStyle
from config import settings
from util.Match import Match
class Matchmaking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.match_create_channel: TextChannel = None
self.ongoing_matches_channel: TextChannel = None
self.match_results_channel: TextChannel = None
self.match_create_message_id = None
self.queue = []
self.active_matches = {}
@commands.Cog.listener()
async def on_ready(self):
self.match_create_channel = self.bot.get_channel(settings.MATCH_CREATE_CHANNEL)
self.ongoing_matches_channel = self.bot.get_channel(settings.ONGOING_MATCHES_CHANNEL)
self.match_results_channel = self.bot.get_channel(settings.MATCH_RESULTS_CHANNEL)
await self.match_create_channel.purge()
button = [Button(style=ButtonStyle.green, label='Enter Queue', emoji='✅', custom_id=settings.MATCHMAKING_JOIN_QUEUE_CUSTOM_ID)]
self.match_create_message_id = await self.match_create_channel.send("enter queue msg", components=button)
self.attempt_create_match.start()
def handle_enter_queue(self, user_id):
if user_id in self.queue:
print(f"tried adding {user_id} to queue but they are already in it")
return
self.queue.append(user_id)
print(f"{user_id} has joined the queue")
async def handle_match_win(self, match, custom_id):
winner_id = None
if custom_id:
winner_id = custom_id.replace(settings.MATCHMAKING_ONGOING_CUSTOM_ID, '')
if winner_id:
msg = await self.match_results_channel.send(content=f"User {winner_id} won match {match.id}!")
del self.active_matches[match.id]
match_msg = self.bot.get_message(self.ongoing_matches_channel, match.message_id)
await self.bot.delete_message(match_msg)
@loop(seconds=settings.MATCHMAKING_CREATE_MATCH_FREQUENCY)
async def attempt_create_match(self):
print(f"[Matchmaking] attempting to create a match with {len(self.queue)} members")
if len(self.queue) <= 1:
print("tried creating match with less than 2 members")
return
matched_players = random.sample(self.queue, 2)
u1 = matched_players[0]
u2 = matched_players[1]
await self.create_match(u1, u2)
def generate_match_id(self):
avail_chars = string.ascii_uppercase + string.digits
id_list = []
for _ in range(6):
id_list.append(random.choice(avail_chars))
generated_id = ''.join(id_list)
if generated_id not in self.active_matches:
return generated_id
return self.generate_match_id()
def get_match(self, msg_id):
for match in self.active_matches.values():
if msg_id == match.message_id:
return match
return None
async def create_match(self, u1, u2):
match_id = self.generate_match_id()
buttons = [
Button(style=ButtonStyle.grey, label=f"{u1} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u1}"),
Button(style=ButtonStyle.grey, label=f"{u2} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u2}")
]
msg = await self.ongoing_matches_channel.send(content=f"Match between {u1}, {u2}", components=buttons)
self.active_matches[match_id] = Match(match_id, msg.id, [u1, u2])
self.queue.remove(u1)
self.queue.remove(u2)
def setup(bot):
bot.add_cog(Matchmaking(bot))
| true
| true
|
7904792449df4d8709b46025734d4a46c9fe9f94
| 11,585
|
py
|
Python
|
src/sage/modular/abvar/cuspidal_subgroup.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2018-09-11T11:16:26.000Z
|
2019-09-10T15:26:37.000Z
|
src/sage/modular/abvar/cuspidal_subgroup.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/modular/abvar/cuspidal_subgroup.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:58.000Z
|
2020-07-23T10:29:58.000Z
|
"""
Cuspidal subgroups of modular abelian varieties
AUTHORS:
- William Stein (2007-03, 2008-02)
EXAMPLES: We compute the cuspidal subgroup of `J_1(13)`::
sage: A = J1(13)
sage: C = A.cuspidal_subgroup(); C
Finite subgroup with invariants [19, 19] over QQ of Abelian variety J1(13) of dimension 2
sage: C.gens()
[[(1/19, 0, 0, 9/19)], [(0, 1/19, 1/19, 18/19)]]
sage: C.order()
361
sage: C.invariants()
[19, 19]
We compute the cuspidal subgroup of `J_0(54)`::
sage: A = J0(54)
sage: C = A.cuspidal_subgroup(); C
Finite subgroup with invariants [3, 3, 3, 3, 3, 9] over QQ of Abelian variety J0(54) of dimension 4
sage: C.gens()
[[(1/3, 0, 0, 0, 0, 1/3, 0, 2/3)], [(0, 1/3, 0, 0, 0, 2/3, 0, 1/3)], [(0, 0, 1/9, 1/9, 1/9, 1/9, 1/9, 2/9)], [(0, 0, 0, 1/3, 0, 1/3, 0, 0)], [(0, 0, 0, 0, 1/3, 1/3, 0, 1/3)], [(0, 0, 0, 0, 0, 0, 1/3, 2/3)]]
sage: C.order()
2187
sage: C.invariants()
[3, 3, 3, 3, 3, 9]
We compute the subgroup of the cuspidal subgroup generated by
rational cusps.
::
sage: C = J0(54).rational_cusp_subgroup(); C
Finite subgroup with invariants [3, 3, 9] over QQ of Abelian variety J0(54) of dimension 4
sage: C.gens()
[[(1/3, 0, 0, 1/3, 2/3, 1/3, 0, 1/3)], [(0, 0, 1/9, 1/9, 7/9, 7/9, 1/9, 8/9)], [(0, 0, 0, 0, 0, 0, 1/3, 2/3)]]
sage: C.order()
81
sage: C.invariants()
[3, 3, 9]
This might not give us the exact rational torsion subgroup, since
it might be bigger than order `81`::
sage: J0(54).rational_torsion_subgroup().multiple_of_order()
243
TESTS::
sage: C = J0(54).cuspidal_subgroup()
sage: loads(dumps(C)) == C
True
sage: D = J0(54).rational_cusp_subgroup()
sage: loads(dumps(D)) == D
True
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2007 William Stein <wstein@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from .finite_subgroup import FiniteSubgroup
from sage.rings.all import infinity, QQ, ZZ
from sage.matrix.all import matrix
from sage.modular.arithgroup.all import is_Gamma0
from sage.modular.cusps import Cusp
class CuspidalSubgroup_generic(FiniteSubgroup):
def _compute_lattice(self, rational_only=False, rational_subgroup=False):
r"""
Return a list of vectors that define elements of the rational
homology that generate this finite subgroup.
INPUT:
- ``rational_only`` - bool (default: False); if
``True``, only use rational cusps.
OUTPUT:
- ``list`` - list of vectors
EXAMPLES::
sage: J = J0(37)
sage: C = sage.modular.abvar.cuspidal_subgroup.CuspidalSubgroup(J)
sage: C._compute_lattice()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[ 1 0 0 0]
[ 0 1 0 0]
[ 0 0 1 0]
[ 0 0 0 1/3]
sage: J = J0(43)
sage: C = sage.modular.abvar.cuspidal_subgroup.CuspidalSubgroup(J)
sage: C._compute_lattice()
Free module of degree 6 and rank 6 over Integer Ring
Echelon basis matrix:
[ 1 0 0 0 0 0]
[ 0 1/7 0 6/7 0 5/7]
[ 0 0 1 0 0 0]
[ 0 0 0 1 0 0]
[ 0 0 0 0 1 0]
[ 0 0 0 0 0 1]
sage: J = J0(22)
sage: C = sage.modular.abvar.cuspidal_subgroup.CuspidalSubgroup(J)
sage: C._compute_lattice()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[1/5 1/5 4/5 0]
[ 0 1 0 0]
[ 0 0 1 0]
[ 0 0 0 1/5]
sage: J = J1(13)
sage: C = sage.modular.abvar.cuspidal_subgroup.CuspidalSubgroup(J)
sage: C._compute_lattice()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[ 1/19 0 0 9/19]
[ 0 1/19 1/19 18/19]
[ 0 0 1 0]
[ 0 0 0 1]
We compute with and without the optional
``rational_only`` option.
::
sage: J = J0(27); G = sage.modular.abvar.cuspidal_subgroup.CuspidalSubgroup(J)
sage: G._compute_lattice()
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1/3 0]
[ 0 1/3]
sage: G._compute_lattice(rational_only=True)
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1/3 0]
[ 0 1]
"""
A = self.abelian_variety()
Cusp = A.modular_symbols()
Amb = Cusp.ambient_module()
Eis = Amb.eisenstein_submodule()
C = Amb.cusps()
N = Amb.level()
if rational_subgroup:
# QQ-rational subgroup of cuspidal subgroup
assert A.is_ambient()
Q = Cusp.abvarquo_rational_cuspidal_subgroup()
return Q.V()
if rational_only:
# subgroup generated by differences of rational cusps
if not is_Gamma0(A.group()):
raise NotImplementedError('computation of rational cusps only implemented in Gamma0 case.')
if not N.is_squarefree():
data = [n for n in N.coprime_integers(N) if n >= 2]
C = [c for c in C if is_rational_cusp_gamma0(c, N, data)]
v = [Amb([infinity, alpha]).element() for alpha in C]
cusp_matrix = matrix(QQ, len(v), Amb.dimension(), v)
# TODO -- refactor something out here
# Now we project onto the cuspidal part.
B = Cusp.free_module().basis_matrix().stack(Eis.free_module().basis_matrix())
X = B.solve_left(cusp_matrix)
X = X.matrix_from_columns(range(Cusp.dimension()))
lattice = X.row_module(ZZ) + A.lattice()
return lattice
class CuspidalSubgroup(CuspidalSubgroup_generic):
"""
EXAMPLES::
sage: a = J0(65)[2]
sage: t = a.cuspidal_subgroup()
sage: t.order()
6
"""
def _repr_(self):
"""
String representation of the cuspidal subgroup.
EXAMPLES::
sage: G = J0(27).cuspidal_subgroup()
sage: G._repr_()
'Finite subgroup with invariants [3, 3] over QQ of Abelian variety J0(27) of dimension 1'
"""
return "Cuspidal subgroup %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
"""
Returned cached tuple of vectors that define elements of the
rational homology that generate this finite subgroup.
OUTPUT:
- ``tuple`` - cached
EXAMPLES::
sage: J = J0(27)
sage: G = J.cuspidal_subgroup()
sage: G.lattice()
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1/3 0]
[ 0 1/3]
Test that the result is cached::
sage: G.lattice() is G.lattice()
True
"""
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_only = False)
self.__lattice = lattice
return lattice
class RationalCuspSubgroup(CuspidalSubgroup_generic):
"""
EXAMPLES::
sage: a = J0(65)[2]
sage: t = a.rational_cusp_subgroup()
sage: t.order()
6
"""
def _repr_(self):
"""
String representation of the cuspidal subgroup.
EXAMPLES::
sage: G = J0(27).rational_cusp_subgroup()
sage: G._repr_()
'Finite subgroup with invariants [3] over QQ of Abelian variety J0(27) of dimension 1'
"""
return "Subgroup generated by differences of rational cusps %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
"""
Return lattice that defines this group.
OUTPUT: lattice
EXAMPLES::
sage: G = J0(27).rational_cusp_subgroup()
sage: G.lattice()
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1/3 0]
[ 0 1]
Test that the result is cached.
::
sage: G.lattice() is G.lattice()
True
"""
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_only = True)
self.__lattice = lattice
return lattice
class RationalCuspidalSubgroup(CuspidalSubgroup_generic):
"""
EXAMPLES::
sage: a = J0(65)[2]
sage: t = a.rational_cuspidal_subgroup()
sage: t.order()
6
"""
def _repr_(self):
"""
String representation of the cuspidal subgroup.
EXAMPLES::
sage: G = J0(27).rational_cuspidal_subgroup()
sage: G._repr_()
'Finite subgroup with invariants [3] over QQ of Abelian variety J0(27) of dimension 1'
"""
return "Rational cuspidal subgroup %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
"""
Return lattice that defines this group.
OUTPUT: lattice
EXAMPLES::
sage: G = J0(27).rational_cuspidal_subgroup()
sage: G.lattice()
Free module of degree 2 and rank 2 over Integer Ring
Echelon basis matrix:
[1/3 0]
[ 0 1]
Test that the result is cached.
::
sage: G.lattice() is G.lattice()
True
"""
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_subgroup = True)
self.__lattice = lattice
return lattice
def is_rational_cusp_gamma0(c, N, data):
"""
Return True if the rational number c is a rational cusp of level N.
This uses remarks in Glenn Steven's Ph.D. thesis.
INPUT:
- ``c`` - a cusp
- ``N`` - a positive integer
- ``data`` - the list [n for n in range(2,N) if
gcd(n,N) == 1], which is passed in as a parameter purely for
efficiency reasons.
EXAMPLES::
sage: from sage.modular.abvar.cuspidal_subgroup import is_rational_cusp_gamma0
sage: N = 27
sage: data = [n for n in range(2,N) if gcd(n,N) == 1]
sage: is_rational_cusp_gamma0(Cusp(1/3), N, data)
False
sage: is_rational_cusp_gamma0(Cusp(1), N, data)
True
sage: is_rational_cusp_gamma0(Cusp(oo), N, data)
True
sage: is_rational_cusp_gamma0(Cusp(2/9), N, data)
False
"""
num = c.numerator()
den = c.denominator()
return all(c.is_gamma0_equiv(Cusp(num, d * den), N) for d in data)
| 30.248042
| 210
| 0.543979
|
from __future__ import absolute_import
from .finite_subgroup import FiniteSubgroup
from sage.rings.all import infinity, QQ, ZZ
from sage.matrix.all import matrix
from sage.modular.arithgroup.all import is_Gamma0
from sage.modular.cusps import Cusp
class CuspidalSubgroup_generic(FiniteSubgroup):
def _compute_lattice(self, rational_only=False, rational_subgroup=False):
A = self.abelian_variety()
Cusp = A.modular_symbols()
Amb = Cusp.ambient_module()
Eis = Amb.eisenstein_submodule()
C = Amb.cusps()
N = Amb.level()
if rational_subgroup:
assert A.is_ambient()
Q = Cusp.abvarquo_rational_cuspidal_subgroup()
return Q.V()
if rational_only:
if not is_Gamma0(A.group()):
raise NotImplementedError('computation of rational cusps only implemented in Gamma0 case.')
if not N.is_squarefree():
data = [n for n in N.coprime_integers(N) if n >= 2]
C = [c for c in C if is_rational_cusp_gamma0(c, N, data)]
v = [Amb([infinity, alpha]).element() for alpha in C]
cusp_matrix = matrix(QQ, len(v), Amb.dimension(), v)
B = Cusp.free_module().basis_matrix().stack(Eis.free_module().basis_matrix())
X = B.solve_left(cusp_matrix)
X = X.matrix_from_columns(range(Cusp.dimension()))
lattice = X.row_module(ZZ) + A.lattice()
return lattice
class CuspidalSubgroup(CuspidalSubgroup_generic):
def _repr_(self):
return "Cuspidal subgroup %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_only = False)
self.__lattice = lattice
return lattice
class RationalCuspSubgroup(CuspidalSubgroup_generic):
def _repr_(self):
return "Subgroup generated by differences of rational cusps %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_only = True)
self.__lattice = lattice
return lattice
class RationalCuspidalSubgroup(CuspidalSubgroup_generic):
def _repr_(self):
return "Rational cuspidal subgroup %sover QQ of %s"%(self._invariants_repr(), self.abelian_variety())
def lattice(self):
try:
return self.__lattice
except AttributeError:
lattice = self._compute_lattice(rational_subgroup = True)
self.__lattice = lattice
return lattice
def is_rational_cusp_gamma0(c, N, data):
num = c.numerator()
den = c.denominator()
return all(c.is_gamma0_equiv(Cusp(num, d * den), N) for d in data)
| true
| true
|
79047a1921bb946a12fb69de9909965c1f79cd84
| 7,283
|
py
|
Python
|
tests/python/sealapi/test_sanity.py
|
ZSoumia/TenSEAL
|
02697d41f92b834daa1559cc12162ffc8b2bc44d
|
[
"Apache-2.0"
] | null | null | null |
tests/python/sealapi/test_sanity.py
|
ZSoumia/TenSEAL
|
02697d41f92b834daa1559cc12162ffc8b2bc44d
|
[
"Apache-2.0"
] | null | null | null |
tests/python/sealapi/test_sanity.py
|
ZSoumia/TenSEAL
|
02697d41f92b834daa1559cc12162ffc8b2bc44d
|
[
"Apache-2.0"
] | null | null | null |
import sys, os
import pytest
import tenseal.sealapi as sealapi
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import *
@pytest.mark.parametrize(
"compr_type",
[sealapi.COMPR_MODE_TYPE.NONE, sealapi.COMPR_MODE_TYPE.ZLIB, sealapi.COMPR_MODE_TYPE.ZSTD],
)
def test_serialization_compression(compr_type):
assert sealapi.Serialization.IsSupportedComprMode(compr_type) is True
assert sealapi.Serialization.ComprSizeEstimate(8, compr_type) > 0
def test_serialization_sanity():
assert int(sealapi.COMPR_MODE_TYPE.NONE) == 0
assert int(sealapi.COMPR_MODE_TYPE.ZLIB) == 1
assert int(sealapi.COMPR_MODE_TYPE.ZSTD) == 2
header = sealapi.Serialization.SEALHeader()
assert header.magic == 0xA15E
assert header.header_size == 0x10
assert header.version_major == 3
assert header.version_minor == 0x6
assert header.compr_mode == sealapi.COMPR_MODE_TYPE.NONE
assert header.size == 0
assert header.reserved == 0
assert sealapi.Serialization.IsSupportedComprMode(15) is False
header = sealapi.Serialization.SEALHeader()
assert sealapi.Serialization.IsValidHeader(header) is True
header = sealapi.Serialization.SEALHeader()
header.compr_mode = sealapi.COMPR_MODE_TYPE.ZLIB
def save_load(path):
sealapi.Serialization.SaveHeader(header, path)
save_test = sealapi.Serialization.SEALHeader()
sealapi.Serialization.LoadHeader(path, save_test, True)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
sealapi.Serialization.LoadHeader(path, save_test, False)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
tmp_file(save_load)
@pytest.mark.parametrize(
"factory",
[
sealapi.Blake2xbPRNGFactory.DefaultFactory(),
sealapi.Blake2xbPRNGFactory(),
sealapi.Blake2xbPRNGFactory([sealapi.random_uint64() for i in range(8)]),
sealapi.Shake256PRNGFactory.DefaultFactory(),
sealapi.Shake256PRNGFactory(),
sealapi.Shake256PRNGFactory([sealapi.random_uint64() for i in range(8)]),
],
)
def test_randomgen(factory):
assert sealapi.random_uint64() != sealapi.random_uint64()
for generator in [
factory.create(),
factory.create([sealapi.random_uint64() for i in range(8)]),
]:
assert generator.generate() != generator.generate()
adapter = sealapi.RandomToStandardAdapter(generator)
assert adapter() != adapter()
for i in range(1024):
generator.refresh()
generator.generate()
def test_intarray():
testcase = sealapi.Plaintext("3x^3 + 1x^1 + 3")
int_arr = testcase.dyn_array()
assert int_arr[0] == 3
assert int_arr.at(3) == 3
assert int_arr.empty() is False
assert int_arr.max_size() == 2 ** 64 - 1
assert int_arr.size() == 4
assert int_arr.capacity() == 4
def save_load(path):
int_arr.save(path)
save_test = sealapi.DynArray()
save_test.load(path)
assert save_test[0] == 3
tmp_file(save_load)
int_arr.resize(10, True)
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.reserve(30)
assert int_arr.capacity() == 30
assert int_arr.capacity() == 30
int_arr.shrink_to_fit()
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.clear()
assert int_arr.size() == 0
assert int_arr.capacity() == 10
assert int_arr.empty() is True
int_arr.release()
assert int_arr.capacity() == 0
def test_plaintext():
testcase = sealapi.Plaintext()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext(15)
assert testcase.coeff_count() == 15
testcase = sealapi.Plaintext(100, 15)
assert testcase.coeff_count() == 15
assert testcase.capacity() == 100
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.significant_coeff_count() == 4
assert testcase.capacity() == 4
testcase2 = testcase
assert testcase2.coeff_count() == 4
assert testcase2.capacity() == 4
testcase = sealapi.Plaintext(100, 15)
testcase.reserve(200)
assert testcase.capacity() == 200
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.capacity() == 4
testcase.reserve(200)
assert testcase.capacity() == 200
testcase.shrink_to_fit()
assert testcase.capacity() == 4
assert testcase.dyn_array()[3] == 0x7FF
assert testcase.data(3) == 0x7FF
assert testcase.parms_id() == [0, 0, 0, 0]
assert testcase.scale == 1.0
assert testcase[3] == 0x7FF
assert testcase.to_string() == "7FFx^3 + 1x^1 + 3"
testcase.release()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.nonzero_coeff_count() == 3
testcase.resize(10)
assert testcase.coeff_count() == 10
testcase.set_zero()
assert testcase.is_zero()
assert testcase.nonzero_coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
assert testcase.is_ntt_form() is False
def save_load(path):
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
testcase.save(path)
ctx = helper_context_bfv()
save_test = sealapi.Plaintext()
save_test.load(ctx, path)
assert save_test.coeff_count() == 4
tmp_file(save_load)
@pytest.mark.parametrize("testcase", [[1, 2, 3, 4, 5, 6, 7, 8], [i for i in range(200)]])
@pytest.mark.parametrize(
"scheme,ctx",
[
(sealapi.SCHEME_TYPE.BFV, helper_context_bfv()),
(sealapi.SCHEME_TYPE.CKKS, helper_context_ckks()),
],
)
def test_ciphertext(testcase, scheme, ctx):
poly_modulus_degree = helper_poly_modulus_degree(ctx)
ctx_data = ctx.key_context_data()
parms = ctx_data.parms()
coeff_mod_count = len(parms.coeff_modulus())
keygen = sealapi.KeyGenerator(ctx)
ciphertext = sealapi.Ciphertext(ctx)
plaintext = helper_encode(scheme, ctx, testcase)
pk = sealapi.PublicKey()
keygen.create_public_key(pk)
encryptor = sealapi.Encryptor(ctx, pk)
decryptor = sealapi.Decryptor(ctx, keygen.secret_key())
encryptor.encrypt(plaintext, ciphertext)
assert len(ciphertext.parms_id()) > 0
assert ciphertext.scale > 0
assert ciphertext.coeff_modulus_size() == coeff_mod_count - 1
assert ciphertext.poly_modulus_degree() == poly_modulus_degree
assert ciphertext.dyn_array().size() > 0
assert ciphertext.size() == 2
assert ciphertext.size_capacity() == 2
assert ciphertext.is_transparent() is False
assert ciphertext.is_ntt_form() is (scheme == sealapi.SCHEME_TYPE.CKKS)
def save_load(path):
ciphertext.save(path)
save_test = sealapi.Ciphertext(ctx)
save_test.load(ctx, path)
decryptor.decrypt(save_test, plaintext)
decoded = helper_decode(scheme, ctx, plaintext)
is_close_enough(decoded[: len(testcase)], testcase)
tmp_file(save_load)
ciphertext.resize(ctx, 10)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 10
ciphertext.reserve(15)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 15
| 30.472803
| 95
| 0.679665
|
import sys, os
import pytest
import tenseal.sealapi as sealapi
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import *
@pytest.mark.parametrize(
"compr_type",
[sealapi.COMPR_MODE_TYPE.NONE, sealapi.COMPR_MODE_TYPE.ZLIB, sealapi.COMPR_MODE_TYPE.ZSTD],
)
def test_serialization_compression(compr_type):
assert sealapi.Serialization.IsSupportedComprMode(compr_type) is True
assert sealapi.Serialization.ComprSizeEstimate(8, compr_type) > 0
def test_serialization_sanity():
assert int(sealapi.COMPR_MODE_TYPE.NONE) == 0
assert int(sealapi.COMPR_MODE_TYPE.ZLIB) == 1
assert int(sealapi.COMPR_MODE_TYPE.ZSTD) == 2
header = sealapi.Serialization.SEALHeader()
assert header.magic == 0xA15E
assert header.header_size == 0x10
assert header.version_major == 3
assert header.version_minor == 0x6
assert header.compr_mode == sealapi.COMPR_MODE_TYPE.NONE
assert header.size == 0
assert header.reserved == 0
assert sealapi.Serialization.IsSupportedComprMode(15) is False
header = sealapi.Serialization.SEALHeader()
assert sealapi.Serialization.IsValidHeader(header) is True
header = sealapi.Serialization.SEALHeader()
header.compr_mode = sealapi.COMPR_MODE_TYPE.ZLIB
def save_load(path):
sealapi.Serialization.SaveHeader(header, path)
save_test = sealapi.Serialization.SEALHeader()
sealapi.Serialization.LoadHeader(path, save_test, True)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
sealapi.Serialization.LoadHeader(path, save_test, False)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
tmp_file(save_load)
@pytest.mark.parametrize(
"factory",
[
sealapi.Blake2xbPRNGFactory.DefaultFactory(),
sealapi.Blake2xbPRNGFactory(),
sealapi.Blake2xbPRNGFactory([sealapi.random_uint64() for i in range(8)]),
sealapi.Shake256PRNGFactory.DefaultFactory(),
sealapi.Shake256PRNGFactory(),
sealapi.Shake256PRNGFactory([sealapi.random_uint64() for i in range(8)]),
],
)
def test_randomgen(factory):
assert sealapi.random_uint64() != sealapi.random_uint64()
for generator in [
factory.create(),
factory.create([sealapi.random_uint64() for i in range(8)]),
]:
assert generator.generate() != generator.generate()
adapter = sealapi.RandomToStandardAdapter(generator)
assert adapter() != adapter()
for i in range(1024):
generator.refresh()
generator.generate()
def test_intarray():
testcase = sealapi.Plaintext("3x^3 + 1x^1 + 3")
int_arr = testcase.dyn_array()
assert int_arr[0] == 3
assert int_arr.at(3) == 3
assert int_arr.empty() is False
assert int_arr.max_size() == 2 ** 64 - 1
assert int_arr.size() == 4
assert int_arr.capacity() == 4
def save_load(path):
int_arr.save(path)
save_test = sealapi.DynArray()
save_test.load(path)
assert save_test[0] == 3
tmp_file(save_load)
int_arr.resize(10, True)
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.reserve(30)
assert int_arr.capacity() == 30
assert int_arr.capacity() == 30
int_arr.shrink_to_fit()
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.clear()
assert int_arr.size() == 0
assert int_arr.capacity() == 10
assert int_arr.empty() is True
int_arr.release()
assert int_arr.capacity() == 0
def test_plaintext():
testcase = sealapi.Plaintext()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext(15)
assert testcase.coeff_count() == 15
testcase = sealapi.Plaintext(100, 15)
assert testcase.coeff_count() == 15
assert testcase.capacity() == 100
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.significant_coeff_count() == 4
assert testcase.capacity() == 4
testcase2 = testcase
assert testcase2.coeff_count() == 4
assert testcase2.capacity() == 4
testcase = sealapi.Plaintext(100, 15)
testcase.reserve(200)
assert testcase.capacity() == 200
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.capacity() == 4
testcase.reserve(200)
assert testcase.capacity() == 200
testcase.shrink_to_fit()
assert testcase.capacity() == 4
assert testcase.dyn_array()[3] == 0x7FF
assert testcase.data(3) == 0x7FF
assert testcase.parms_id() == [0, 0, 0, 0]
assert testcase.scale == 1.0
assert testcase[3] == 0x7FF
assert testcase.to_string() == "7FFx^3 + 1x^1 + 3"
testcase.release()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.nonzero_coeff_count() == 3
testcase.resize(10)
assert testcase.coeff_count() == 10
testcase.set_zero()
assert testcase.is_zero()
assert testcase.nonzero_coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
assert testcase.is_ntt_form() is False
def save_load(path):
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
testcase.save(path)
ctx = helper_context_bfv()
save_test = sealapi.Plaintext()
save_test.load(ctx, path)
assert save_test.coeff_count() == 4
tmp_file(save_load)
@pytest.mark.parametrize("testcase", [[1, 2, 3, 4, 5, 6, 7, 8], [i for i in range(200)]])
@pytest.mark.parametrize(
"scheme,ctx",
[
(sealapi.SCHEME_TYPE.BFV, helper_context_bfv()),
(sealapi.SCHEME_TYPE.CKKS, helper_context_ckks()),
],
)
def test_ciphertext(testcase, scheme, ctx):
poly_modulus_degree = helper_poly_modulus_degree(ctx)
ctx_data = ctx.key_context_data()
parms = ctx_data.parms()
coeff_mod_count = len(parms.coeff_modulus())
keygen = sealapi.KeyGenerator(ctx)
ciphertext = sealapi.Ciphertext(ctx)
plaintext = helper_encode(scheme, ctx, testcase)
pk = sealapi.PublicKey()
keygen.create_public_key(pk)
encryptor = sealapi.Encryptor(ctx, pk)
decryptor = sealapi.Decryptor(ctx, keygen.secret_key())
encryptor.encrypt(plaintext, ciphertext)
assert len(ciphertext.parms_id()) > 0
assert ciphertext.scale > 0
assert ciphertext.coeff_modulus_size() == coeff_mod_count - 1
assert ciphertext.poly_modulus_degree() == poly_modulus_degree
assert ciphertext.dyn_array().size() > 0
assert ciphertext.size() == 2
assert ciphertext.size_capacity() == 2
assert ciphertext.is_transparent() is False
assert ciphertext.is_ntt_form() is (scheme == sealapi.SCHEME_TYPE.CKKS)
def save_load(path):
ciphertext.save(path)
save_test = sealapi.Ciphertext(ctx)
save_test.load(ctx, path)
decryptor.decrypt(save_test, plaintext)
decoded = helper_decode(scheme, ctx, plaintext)
is_close_enough(decoded[: len(testcase)], testcase)
tmp_file(save_load)
ciphertext.resize(ctx, 10)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 10
ciphertext.reserve(15)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 15
| true
| true
|
79047d7fba7698ce873fce1411a0eb4c9c2d536b
| 516
|
py
|
Python
|
vcr/files.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
vcr/files.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
vcr/files.py
|
charlax/vcrpy
|
1d3fe5c33ecf06b494fa6cbea4acd62585820687
|
[
"MIT"
] | null | null | null |
import os
import yaml
from .cassette import Cassette
def load_cassette(cassette_path):
try:
pc = yaml.load(open(cassette_path))
cassette = Cassette(pc)
return cassette
except IOError:
return None
def save_cassette(cassette_path, cassette):
dirname, filename = os.path.split(cassette_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(cassette_path, 'wc') as cassette_file:
cassette_file.write(yaml.dump(cassette.serialize()))
| 24.571429
| 60
| 0.689922
|
import os
import yaml
from .cassette import Cassette
def load_cassette(cassette_path):
try:
pc = yaml.load(open(cassette_path))
cassette = Cassette(pc)
return cassette
except IOError:
return None
def save_cassette(cassette_path, cassette):
dirname, filename = os.path.split(cassette_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(cassette_path, 'wc') as cassette_file:
cassette_file.write(yaml.dump(cassette.serialize()))
| true
| true
|
79047d8bf589e4ad1e9f31097f15d6aed736d7d8
| 831
|
py
|
Python
|
examples/plot_raw_minimpl.py
|
jrobrien91/ACT
|
604b93d75366d23029f89d88df9053d52825c214
|
[
"BSD-3-Clause"
] | 9
|
2019-03-11T19:41:34.000Z
|
2019-09-17T08:34:19.000Z
|
examples/plot_raw_minimpl.py
|
jrobrien91/ACT
|
604b93d75366d23029f89d88df9053d52825c214
|
[
"BSD-3-Clause"
] | 127
|
2019-03-18T12:24:17.000Z
|
2020-01-06T20:53:06.000Z
|
examples/plot_raw_minimpl.py
|
jrobrien91/ACT
|
604b93d75366d23029f89d88df9053d52825c214
|
[
"BSD-3-Clause"
] | 15
|
2019-03-11T15:30:56.000Z
|
2019-11-01T19:10:11.000Z
|
"""
Read and plot a PPI from raw mini-MPL data
------------------------------------------
Example of how to read in raw data from the mini-MPL
and plot out the PPI by converting it to PyART
Author: Adam Theisen
"""
from matplotlib import pyplot as plt
import act
try:
import pyart
PYART_AVAILABLE = True
except ImportError:
PYART_AVAILABLE = False
# Read in sample mini-MPL data
files = act.tests.sample_files.EXAMPLE_SIGMA_MPLV5
obj = act.io.mpl.read_sigma_mplv5(files)
# Create a PyART Radar Object
radar = act.utils.create_pyart_obj(
obj, azimuth='azimuth_angle', elevation='elevation_angle', range_var='range'
)
# Creat Plot Display
if PYART_AVAILABLE:
display = pyart.graph.RadarDisplay(radar)
display.plot('nrb_copol', sweep=0, title_flag=False, vmin=0, vmax=1.0, cmap='jet')
plt.show()
| 22.459459
| 86
| 0.701564
|
from matplotlib import pyplot as plt
import act
try:
import pyart
PYART_AVAILABLE = True
except ImportError:
PYART_AVAILABLE = False
files = act.tests.sample_files.EXAMPLE_SIGMA_MPLV5
obj = act.io.mpl.read_sigma_mplv5(files)
radar = act.utils.create_pyart_obj(
obj, azimuth='azimuth_angle', elevation='elevation_angle', range_var='range'
)
if PYART_AVAILABLE:
display = pyart.graph.RadarDisplay(radar)
display.plot('nrb_copol', sweep=0, title_flag=False, vmin=0, vmax=1.0, cmap='jet')
plt.show()
| true
| true
|
79047e67658193682158b84ee11a1f02342e0e08
| 2,992
|
py
|
Python
|
chemfiles/selection.py
|
chemfiles/Chemharp.py
|
45b8a02b7a0f07d6dcafa52db39df6a39f6f496c
|
[
"BSD-3-Clause"
] | 2
|
2019-04-17T11:13:13.000Z
|
2021-04-28T20:34:49.000Z
|
chemfiles/selection.py
|
chemfiles/Chemharp.py
|
45b8a02b7a0f07d6dcafa52db39df6a39f6f496c
|
[
"BSD-3-Clause"
] | 15
|
2016-02-19T21:51:33.000Z
|
2021-07-21T09:01:52.000Z
|
chemfiles/selection.py
|
chemfiles/Chemharp.py
|
45b8a02b7a0f07d6dcafa52db39df6a39f6f496c
|
[
"BSD-3-Clause"
] | 3
|
2020-06-16T08:41:24.000Z
|
2021-07-22T14:51:33.000Z
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_uint64
import numpy as np
from .utils import CxxPointer, _call_with_growing_buffer
from .ffi import chfl_match
class Selection(CxxPointer):
"""
Select atoms in a :py:class:`Frame` with a selection language.
The selection language is built by combining basic operations. Each basic
operation follows the ``<selector>[(<variable>)] <operator> <value>``
structure, where ``<operator>`` is a comparison operator in
``== != < <= > >=``. Refer to the `full documentation
<selections-doc>`_ to know the allowed selectors and how to use them.
.. selections-doc: https://chemfiles.org/chemfiles/latest/selections.html
"""
def __init__(self, selection):
"""
Create a new :py:class:`Selection` from the given ``selection`` string.
"""
ptr = self.ffi.chfl_selection(selection.encode("utf8"))
super(Selection, self).__init__(ptr, is_const=False)
def __copy__(self):
return Selection.from_mutable_ptr(None, self.ffi.chfl_selection_copy(self.ptr))
def __repr__(self):
return "Selection('{}')".format(self.string)
@property
def size(self):
"""
Get the size of this :py:class:`Selection`.
The size of a selection is the number of atoms we are selecting
together. This value is 1 for the 'atom' context, 2 for the 'pair' and
'bond' context, 3 for the 'three' and 'angles' contextes and 4 for the
'four' and 'dihedral' contextes.
"""
size = c_uint64()
self.ffi.chfl_selection_size(self.ptr, size)
return size.value
@property
def string(self):
"""
Get the selection string used to create this :py:class:`Selection`.
"""
return _call_with_growing_buffer(
lambda buffer, size: self.ffi.chfl_selection_string(self.ptr, buffer, size),
initial=128,
)
def evaluate(self, frame):
"""
Evaluate a :py:class:`Selection` for a given :py:class:`Frame`, and
return a list of matching atoms, either as a list of index or a list
of tuples of indexes.
"""
matching = c_uint64()
self.ffi.chfl_selection_evaluate(self.mut_ptr, frame.ptr, matching)
matches = np.zeros(matching.value, chfl_match)
self.ffi.chfl_selection_matches(self.mut_ptr, matches, matching)
size = self.size
result = []
for match in matches:
assert match[0] == size
atoms = match[1]
if size == 1:
result.append(atoms[0])
elif size == 2:
result.append((atoms[0], atoms[1]))
elif size == 3:
result.append((atoms[0], atoms[1], atoms[2]))
elif size == 4:
result.append((atoms[0], atoms[1], atoms[2], atoms[3]))
return result
| 34.390805
| 88
| 0.612634
|
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_uint64
import numpy as np
from .utils import CxxPointer, _call_with_growing_buffer
from .ffi import chfl_match
class Selection(CxxPointer):
def __init__(self, selection):
ptr = self.ffi.chfl_selection(selection.encode("utf8"))
super(Selection, self).__init__(ptr, is_const=False)
def __copy__(self):
return Selection.from_mutable_ptr(None, self.ffi.chfl_selection_copy(self.ptr))
def __repr__(self):
return "Selection('{}')".format(self.string)
@property
def size(self):
size = c_uint64()
self.ffi.chfl_selection_size(self.ptr, size)
return size.value
@property
def string(self):
return _call_with_growing_buffer(
lambda buffer, size: self.ffi.chfl_selection_string(self.ptr, buffer, size),
initial=128,
)
def evaluate(self, frame):
matching = c_uint64()
self.ffi.chfl_selection_evaluate(self.mut_ptr, frame.ptr, matching)
matches = np.zeros(matching.value, chfl_match)
self.ffi.chfl_selection_matches(self.mut_ptr, matches, matching)
size = self.size
result = []
for match in matches:
assert match[0] == size
atoms = match[1]
if size == 1:
result.append(atoms[0])
elif size == 2:
result.append((atoms[0], atoms[1]))
elif size == 3:
result.append((atoms[0], atoms[1], atoms[2]))
elif size == 4:
result.append((atoms[0], atoms[1], atoms[2], atoms[3]))
return result
| true
| true
|
79047eb85e32823c2a4d4d6af7c2191e1a040f52
| 3,617
|
py
|
Python
|
osdf/adapters/policy/utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | 3
|
2019-04-15T13:33:57.000Z
|
2019-10-21T17:19:19.000Z
|
osdf/adapters/policy/utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
osdf/adapters/policy/utils.py
|
onap/optf-osdf
|
2b9e7f4fca3d510a201283a8561f6ff3424f5fd6
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
from collections import defaultdict
import itertools
from osdf.utils.programming_utils import dot_notation, list_flatten
def group_policies_gen(flat_policies, config):
"""Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies
"""
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type'
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [x if isinstance(x, list) else [x] for x in attrs]
attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
#aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True)
prioritized_policy = aggregated_policies[key][0]
if list(prioritized_policy.keys())[0] not in policy_name:
# TODO: Check logic here... should policy appear only once across all groups?
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
def policy_name_as_regex(policy_name):
"""Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
"""
p = policy_name.partition('.')
return p[0] + p[1] + ".*" + p[2] + ".*"
def retrieve_node(req_json, reference):
"""
Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.
"""
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return list_flatten(info) if isinstance(info, list) else info
| 46.371795
| 123
| 0.685651
|
import copy
import json
from collections import defaultdict
import itertools
from osdf.utils.programming_utils import dot_notation, list_flatten
def group_policies_gen(flat_policies, config):
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [x if isinstance(x, list) else [x] for x in attrs]
attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if list(prioritized_policy.keys())[0] not in policy_name:
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
def policy_name_as_regex(policy_name):
p = policy_name.partition('.')
return p[0] + p[1] + ".*" + p[2] + ".*"
def retrieve_node(req_json, reference):
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return list_flatten(info) if isinstance(info, list) else info
| true
| true
|
79047f4ad8f97f702fadf845f7f020c37c878e91
| 3,515
|
py
|
Python
|
userbot/modules/gabut.py
|
abinaya0101/Kampang-Bot
|
281ce0e11a8d3f87b57ff87c0358c30bf9a8d5b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gabut.py
|
abinaya0101/Kampang-Bot
|
281ce0e11a8d3f87b57ff87c0358c30bf9a8d5b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gabut.py
|
abinaya0101/Kampang-Bot
|
281ce0e11a8d3f87b57ff87c0358c30bf9a8d5b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from datetime import datetime
import time
from time import sleep
from platform import uname
from userbot import ALIVE_NAME, CMD_HELP, StartTime
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["Dtk", "Mnt", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(
seconds, 60) if count < 3 else divmod(
seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@register(outgoing=True, pattern="^.keping$")
async def pingme(pong):
""" For .ping command, ping the userbot from any chat. """
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await pong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await pong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await pong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit(f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ Pala ᴷᵒⁿᵗᵒˡ `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ Ano <3 ᴷᵃᵐᵖᵃⁿᵍ『`{ALIVE_NAME}`』 \n" % (duration))
@register(outgoing=True, pattern='^kntl(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**LU KONTOL**")
sleep(3)
await typew.edit("`KONTOL KONTOL KONTOL!!!`")
sleep(3)
await typew.edit("`DASAR KEPALA KONTOL!!!`")
# Owner @Si_Dian
@register(outgoing=True, pattern='^G(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**JAKA SEMBUNG BAWA GOLOK**")
sleep(3)
await typew.edit("`NIMBRUNG GOBLOKK!!!`")
# Owner @Si_Dian
@register(outgoing=True, pattern='^ass(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**нαι αρα кαвαя ㋛**")
sleep(3)
await typew.edit("`السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ`")
# Owner @manusiarakitann
@register(outgoing=True, pattern='^wss(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**`Jawab Salam Dulu Gaes`**")
sleep(3)
await typew.edit("`وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ`")
# Owner @manusiarakitann
@register(outgoing=True, pattern='^.usange(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit("`Getting Information...`")
sleep(1)
await typew.edit("**Kampang Usage 🐨**:\n\n╭━━━━━━━━━━━━━━━━━━━━╮\n" f"-> `Penggunaan Kealayan ` **{ALIVE_NAME}**:\n" f" •**0 jam - " f"0 menit - 0%**" "\n ◐━─━─━─━─━──━─━─━─━─━◐\n" "-> `Sisa Alay Bulan Ini`:\n" f" •**9999 jam - 9999 menit " f"- 100%**\n" "╰━━━━━━━━━━━━━━━━━━━━╯"
)
# @mixiologist
CMD_HELP.update({
"fakedyno":
"`.usange`\
\nUsage: tipu tipu anjeeeng.\
\n\n`L`\
\nUsage: Untuk Menjawab Salam."
})
| 29.788136
| 283
| 0.572688
|
from datetime import datetime
import time
from time import sleep
from platform import uname
from userbot import ALIVE_NAME, CMD_HELP, StartTime
from userbot.events import register
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["Dtk", "Mnt", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(
seconds, 60) if count < 3 else divmod(
seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@register(outgoing=True, pattern="^.keping$")
async def pingme(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await pong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await pong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await pong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit(f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ Pala ᴷᵒⁿᵗᵒˡ `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ Ano <3 ᴷᵃᵐᵖᵃⁿᵍ『`{ALIVE_NAME}`』 \n" % (duration))
@register(outgoing=True, pattern='^kntl(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**LU KONTOL**")
sleep(3)
await typew.edit("`KONTOL KONTOL KONTOL!!!`")
sleep(3)
await typew.edit("`DASAR KEPALA KONTOL!!!`")
@register(outgoing=True, pattern='^G(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**JAKA SEMBUNG BAWA GOLOK**")
sleep(3)
await typew.edit("`NIMBRUNG GOBLOKK!!!`")
@register(outgoing=True, pattern='^ass(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**нαι αρα кαвαя ㋛**")
sleep(3)
await typew.edit("`السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ`")
@register(outgoing=True, pattern='^wss(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**`Jawab Salam Dulu Gaes`**")
sleep(3)
await typew.edit("`وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ`")
@register(outgoing=True, pattern='^.usange(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit("`Getting Information...`")
sleep(1)
await typew.edit("**Kampang Usage 🐨**:\n\n╭━━━━━━━━━━━━━━━━━━━━╮\n" f"-> `Penggunaan Kealayan ` **{ALIVE_NAME}**:\n" f" •**0 jam - " f"0 menit - 0%**" "\n ◐━─━─━─━─━──━─━─━─━─━◐\n" "-> `Sisa Alay Bulan Ini`:\n" f" •**9999 jam - 9999 menit " f"- 100%**\n" "╰━━━━━━━━━━━━━━━━━━━━╯"
)
CMD_HELP.update({
"fakedyno":
"`.usange`\
\nUsage: tipu tipu anjeeeng.\
\n\n`L`\
\nUsage: Untuk Menjawab Salam."
})
| true
| true
|
79047f4f73742dccd3820d22f7afa068a36e9b11
| 10,702
|
py
|
Python
|
luigi/contrib/scalding.py
|
soxofaan/luigi
|
865cc4e97540b5adc19898cbb5f31f39bff791de
|
[
"Apache-2.0"
] | 5
|
2015-02-26T18:52:56.000Z
|
2017-07-07T05:47:18.000Z
|
luigi/contrib/scalding.py
|
RileyEv/luigi
|
00a094ddf6438aca5b481ac94bf03c2b7d8d2f73
|
[
"Apache-2.0"
] | 9
|
2017-03-22T23:38:48.000Z
|
2019-01-28T21:13:06.000Z
|
luigi/contrib/scalding.py
|
RileyEv/luigi
|
00a094ddf6438aca5b481ac94bf03c2b7d8d2f73
|
[
"Apache-2.0"
] | 9
|
2015-01-26T14:47:57.000Z
|
2020-07-07T17:01:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| 34.411576
| 100
| 0.611942
|
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
def relpath(self, current_file, rel_path):
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
return None
def jar(self):
return None
def extra_jars(self):
return []
def job_class(self):
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
return True
def requires(self):
return {}
def job_args(self):
return []
def args(self):
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| true
| true
|
79047f716fd6e7825a04bd9da395cccf0a71fea9
| 1,690
|
py
|
Python
|
cmsplugin_blocks/migrations/0004_change_image_as_filefield_.py
|
emencia/cmsplugin-blocks
|
7ec99afd542948aef5d9069bd001729f5c14bded
|
[
"MIT"
] | 1
|
2019-04-14T01:30:37.000Z
|
2019-04-14T01:30:37.000Z
|
cmsplugin_blocks/migrations/0004_change_image_as_filefield_.py
|
emencia/cmsplugin-blocks
|
7ec99afd542948aef5d9069bd001729f5c14bded
|
[
"MIT"
] | 16
|
2018-02-19T11:13:15.000Z
|
2022-02-05T00:10:41.000Z
|
cmsplugin_blocks/migrations/0004_change_image_as_filefield_.py
|
emencia/cmsplugin-blocks
|
7ec99afd542948aef5d9069bd001729f5c14bded
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.9 on 2020-03-20 00:50
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_blocks', '0003_slideitem_title'),
]
operations = [
migrations.AlterField(
model_name='albumitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/album/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='card',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/card/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='hero',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/hero/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='slideitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/slider/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
]
| 48.285714
| 258
| 0.646154
|
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_blocks', '0003_slideitem_title'),
]
operations = [
migrations.AlterField(
model_name='albumitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/album/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='card',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/card/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='hero',
name='image',
field=models.FileField(blank=True, default=None, max_length=255, null=True, upload_to='blocks/hero/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
migrations.AlterField(
model_name='slideitem',
name='image',
field=models.FileField(default=None, max_length=255, null=True, upload_to='blocks/slider/%y/%m', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'svg', 'gif', 'png'])], verbose_name='Image'),
),
]
| true
| true
|
79047ff73d39d72ec6d8b62aeae198e9367c9603
| 4,076
|
py
|
Python
|
benchmark/startQiskit953.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit953.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit953.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=45
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[1],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=39
prog.cz(input_qubit[1],input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.x(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=34
prog.cz(input_qubit[1],input_qubit[0]) # number=35
prog.h(input_qubit[0]) # number=36
prog.x(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.x(input_qubit[2]) # number=26
prog.cx(input_qubit[0],input_qubit[2]) # number=27
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.cx(input_qubit[2],input_qubit[4]) # number=37
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit953.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.878788
| 82
| 0.603042
|
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[4])
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[1],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[3])
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0])
prog.x(input_qubit[1])
prog.x(input_qubit[2])
prog.x(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cx(input_qubit[2],input_qubit[4])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit953.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
7904801ab0a3f99c8b161f5051596acd78dda87a
| 3,154
|
py
|
Python
|
src/_query.py
|
SlumberDemon/AioTube
|
c2423175d58619a0998a67c9e3c059bbfd5659b3
|
[
"MIT"
] | null | null | null |
src/_query.py
|
SlumberDemon/AioTube
|
c2423175d58619a0998a67c9e3c059bbfd5659b3
|
[
"MIT"
] | null | null | null |
src/_query.py
|
SlumberDemon/AioTube
|
c2423175d58619a0998a67c9e3c059bbfd5659b3
|
[
"MIT"
] | null | null | null |
import re
from ._video import Video
from ._channel import Channel
from ._playlist import Playlist
from ._videobulk import _VideoBulk
from ._channelbulk import _ChannelBulk
from ._playlistbulk import _PlaylistBulk
from ._auxiliary import _parser, _filter, _src
class Search:
def __init__(self):
pass
@staticmethod
def video(keywords: str):
"""
:return: < video object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
video_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
return Video(video_ids[0]) if video_ids else None
@staticmethod
def channel(keywords: str):
"""
:return: < channel object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
channel_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
return Channel(channel_ids[0]) if channel_ids else None
@staticmethod
def videos(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of videos to be searched
:return: list of < video object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
raw_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _VideoBulk(pureList) if pureList else None
@staticmethod
def channels(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total number of channels to be searched
:return: list of < channel object > of each video regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
raw_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _ChannelBulk(pureList) if pureList else None
@staticmethod
def playlist(keywords: str):
"""
:return: < playlist object > regarding the query
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
return Playlist(found[0]) if found else None
@staticmethod
def playlists(keywords: str, limit: int):
"""
:param str keywords: query to be searched on YouTube
:param int limit: total playlists be searched
:return: list of < playlist object > of each playlist regarding the query (consider limit)
"""
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
pure = _filter(limit=limit, iterable=found)
return _PlaylistBulk(pure) if pure else None
| 40.435897
| 107
| 0.640457
|
import re
from ._video import Video
from ._channel import Channel
from ._playlist import Playlist
from ._videobulk import _VideoBulk
from ._channelbulk import _ChannelBulk
from ._playlistbulk import _PlaylistBulk
from ._auxiliary import _parser, _filter, _src
class Search:
def __init__(self):
pass
@staticmethod
def video(keywords: str):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
video_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
return Video(video_ids[0]) if video_ids else None
@staticmethod
def channel(keywords: str):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
channel_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
return Channel(channel_ids[0]) if channel_ids else None
@staticmethod
def videos(keywords: str, limit: int):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAQ%253D%253D')
raw_ids = re.findall(r"\"videoId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _VideoBulk(pureList) if pureList else None
@staticmethod
def channels(keywords: str, limit: int):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAg%253D%253D')
raw_ids = re.findall(r"{\"channelId\":\"(.*?)\"", raw)
pureList = _filter(limit=limit, iterable=raw_ids)
return _ChannelBulk(pureList) if pureList else None
@staticmethod
def playlist(keywords: str):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
return Playlist(found[0]) if found else None
@staticmethod
def playlists(keywords: str, limit: int):
raw = _src(f'https://www.youtube.com/results?search_query={_parser(keywords)}&sp=EgIQAw%253D%253D')
found = re.findall(r"playlistId\":\"(.*?)\"", raw)
pure = _filter(limit=limit, iterable=found)
return _PlaylistBulk(pure) if pure else None
| true
| true
|
7904808e2822971be1e1963ca0e5bea097efa146
| 13,590
|
py
|
Python
|
texttospeech/google/cloud/texttospeech_v1beta1/gapic/text_to_speech_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2019-04-16T08:13:06.000Z
|
2019-04-16T08:13:06.000Z
|
texttospeech/google/cloud/texttospeech_v1beta1/gapic/text_to_speech_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | null | null | null |
texttospeech/google/cloud/texttospeech_v1beta1/gapic/text_to_speech_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2020-11-15T11:44:36.000Z
|
2020-11-15T11:44:36.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
"""Service that implements Google Cloud Text-to-Speech API."""
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.TextToSpeechGrpcTransport,
Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of ``Voice`` supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional (but recommended)
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Synthesizes speech synchronously: receive results after all text input
has been processed.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> # TODO: Initialize `input_`:
>>> input_ = {}
>>>
>>> # TODO: Initialize `voice`:
>>> voice = {}
>>>
>>> # TODO: Initialize `audio_config`:
>>> audio_config = {}
>>>
>>> response = client.synthesize_speech(input_, voice, audio_config)
Args:
input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput`
voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams`
audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| 43.980583
| 161
| 0.634216
|
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
self._inner_api_calls = {}
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.