max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
codechef/solution.py | anandamideShakyan/HackJIIT | 0 | 6620451 | t=int(input())
for i in range(t):
a=list(int(num) for num in input().strip().split( ))[:7]
c=a.count(1)
d=a.count(0)
if c>d:
print("yes")
else:
print("no")
| t=int(input())
for i in range(t):
a=list(int(num) for num in input().strip().split( ))[:7]
c=a.count(1)
d=a.count(0)
if c>d:
print("yes")
else:
print("no")
| none | 1 | 3.304422 | 3 | |
listoperations.py | Glenn-Po/LearningPython | 0 | 6620452 | #dealing with lists
numbers=[]
print('PROGRAM TO CARRY OUT SOME OPERATIONS ON LISTS(analogous to arrays in C/C++)')
print('1. SORTING...')
num=int(input('Enter number of elements to sort: '))
print('Now, Enter the elements one by one :')
for _ in range(num):
x=int(input(''))
numbers.append(x)#add x to the empty list
#now to sort the elements
print('Elements in ascending order: ',end='')
for i in range(num):
for j in range(num-1):
if numbers[j]>numbers[j+1]:
numbers[j],numbers[j+1]=numbers[j+1],numbers[j]
print(f'List after sorting --> {numbers}')
print()#empty line
print('2. SEARCHING ...')
target=int(input('Enter element to search in th above list: '))
if target in numbers:
pos=numbers.index(target)
print(f'{target} found at position {pos}')
else:
print('Element is not present in the list !!')
print()#emty line
print('3. REMOVING an element from the list...')
target=int(input("Enter an element to remove from the list: "))
if target in numbers:
numbers.remove(target)
print(f'List after {target} was removed is {numbers}')
else:
print(f"invalid operation-->{target} is not found in the list")
| #dealing with lists
numbers=[]
print('PROGRAM TO CARRY OUT SOME OPERATIONS ON LISTS(analogous to arrays in C/C++)')
print('1. SORTING...')
num=int(input('Enter number of elements to sort: '))
print('Now, Enter the elements one by one :')
for _ in range(num):
x=int(input(''))
numbers.append(x)#add x to the empty list
#now to sort the elements
print('Elements in ascending order: ',end='')
for i in range(num):
for j in range(num-1):
if numbers[j]>numbers[j+1]:
numbers[j],numbers[j+1]=numbers[j+1],numbers[j]
print(f'List after sorting --> {numbers}')
print()#empty line
print('2. SEARCHING ...')
target=int(input('Enter element to search in th above list: '))
if target in numbers:
pos=numbers.index(target)
print(f'{target} found at position {pos}')
else:
print('Element is not present in the list !!')
print()#emty line
print('3. REMOVING an element from the list...')
target=int(input("Enter an element to remove from the list: "))
if target in numbers:
numbers.remove(target)
print(f'List after {target} was removed is {numbers}')
else:
print(f"invalid operation-->{target} is not found in the list")
| en | 0.65167 | #dealing with lists #add x to the empty list #now to sort the elements #empty line #emty line | 4.255035 | 4 |
pychord/chord.py | adamnemecek/pychord | 158 | 6620453 | from typing import List, Union
from .constants import NOTE_VAL_DICT, VAL_NOTE_DICT
from .constants.scales import RELATIVE_KEY_DICT
from .parser import parse
from .quality import QualityManager, Quality
from .utils import transpose_note, display_appended, display_on, note_to_val, val_to_note
class Chord:
""" Class to handle a chord.
Attributes:
_chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
_root: The root note of chord. (e.g. C, A, F#)
_quality: The quality of chord. (e.g. maj, m7, m7-5)
_appended: The appended notes on chord.
_on: The base note of slash chord.
"""
def __init__(self, chord: str):
""" Constructor of Chord instance
:param chord: Name of chord (e.g. C, Am7, F#m7-5/A).
"""
root, quality, appended, on = parse(chord)
self._chord: str = chord
self._root: str = root
self._quality: Quality = quality
self._appended: List[str] = appended
self._on: str = on
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return f"<Chord: {self._chord}>"
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError(f"Cannot compare Chord object with {type(other)} object")
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_note_index(cls, note: int, quality: str, scale: str, diatonic: bool = False) -> 'Chord':
""" Create a Chord from note index in a scale
Chord.from_note_index(1, "", "Cmaj") returns I of C major => Chord("C")
Chord.from_note_index(3, "m7", "Fmaj") returns IIImin of F major => Chord("Am7")
Chord.from_note_index(5, "7", "Amin") returns Vmin of A minor => Chord("E7")
:param note: Note index in a Scale I, II, ..., VIII
:param quality: Quality of a chord (m7, sus4, ...)
:param scale: Base scale (Cmaj, Amin, F#maj, Ebmin, ...)
:param diatonic: Adjust certain chord qualities according to the scale
"""
if not 1 <= note <= 8:
raise ValueError(f"Invalid note {note}")
relative_key = RELATIVE_KEY_DICT[scale[-3:]][note - 1]
root_num = NOTE_VAL_DICT[scale[:-3]]
root = VAL_NOTE_DICT[(root_num + relative_key) % 12][0]
scale_degrees = RELATIVE_KEY_DICT[scale[-3:]]
if diatonic:
# construct the chord based on scale degrees, within 1 octave
third = scale_degrees[(note + 1) % 7]
fifth = scale_degrees[(note + 3) % 7]
seventh = scale_degrees[(note + 5) % 7]
# adjust the chord to its root position (as a stack of thirds),
# then set the root to 0
def get_diatonic_chord(chord):
uninverted = []
for note in chord:
if not uninverted:
uninverted.append(note)
elif note > uninverted[-1]:
uninverted.append(note)
else:
uninverted.append(note + 12)
uninverted = [x - uninverted[0] for x in uninverted]
return uninverted
if quality in ["", "-", "maj", "m", "min"]:
triad = (relative_key, third, fifth)
q = get_diatonic_chord(triad)
elif quality in ["7", "M7", "maj7", "m7"]:
seventh_chord = (relative_key, third, fifth, seventh)
q = get_diatonic_chord(seventh_chord)
else:
raise NotImplementedError("Only generic chords (triads, sevenths) are supported")
# look up QualityManager to determine chord quality
quality_manager = QualityManager()
quality = quality_manager.find_quality_from_components(q)
if not quality:
raise RuntimeError(f"Quality with components {q} not found")
return cls(f"{root}{quality}")
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return f"""{self._chord}
root={self._root}
quality={self._quality}
appended={self._appended}
on={self._on}"""
def transpose(self, trans: int, scale: str = "C") -> None:
""" Transpose the chord
:param trans: Transpose key
:param scale: key scale
"""
if not isinstance(trans, int):
raise TypeError(f"Expected integers, not {type(trans)}")
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def components(self, visible: bool = True) -> Union[List[str], List[int]]:
""" Return the component notes of chord
:param visible: returns the name of notes if True else list of int
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def components_with_pitch(self, root_pitch: int) -> List[str]:
""" Return the component notes of chord formatted like ["C4", "E4", "G4"]
:param root_pitch: the pitch of the root note
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
components = self._quality.get_components(root=self._root)
if components[0] < 0:
components = [c + 12 for c in components]
return [f"{val_to_note(c, scale=self._root)}{root_pitch + c // 12}" for c in components]
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality.quality,
display_appended(self._appended),
display_on(self._on))
| from typing import List, Union
from .constants import NOTE_VAL_DICT, VAL_NOTE_DICT
from .constants.scales import RELATIVE_KEY_DICT
from .parser import parse
from .quality import QualityManager, Quality
from .utils import transpose_note, display_appended, display_on, note_to_val, val_to_note
class Chord:
""" Class to handle a chord.
Attributes:
_chord: Name of the chord. (e.g. C, Am7, F#m7-5/A)
_root: The root note of chord. (e.g. C, A, F#)
_quality: The quality of chord. (e.g. maj, m7, m7-5)
_appended: The appended notes on chord.
_on: The base note of slash chord.
"""
def __init__(self, chord: str):
""" Constructor of Chord instance
:param chord: Name of chord (e.g. C, Am7, F#m7-5/A).
"""
root, quality, appended, on = parse(chord)
self._chord: str = chord
self._root: str = root
self._quality: Quality = quality
self._appended: List[str] = appended
self._on: str = on
def __unicode__(self):
return self._chord
def __str__(self):
return self._chord
def __repr__(self):
return f"<Chord: {self._chord}>"
def __eq__(self, other):
if not isinstance(other, Chord):
raise TypeError(f"Cannot compare Chord object with {type(other)} object")
if note_to_val(self._root) != note_to_val(other.root):
return False
if self._quality != other.quality:
return False
if self._appended != other.appended:
return False
if self._on and other.on:
if note_to_val(self._on) != note_to_val(other.on):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_note_index(cls, note: int, quality: str, scale: str, diatonic: bool = False) -> 'Chord':
""" Create a Chord from note index in a scale
Chord.from_note_index(1, "", "Cmaj") returns I of C major => Chord("C")
Chord.from_note_index(3, "m7", "Fmaj") returns IIImin of F major => Chord("Am7")
Chord.from_note_index(5, "7", "Amin") returns Vmin of A minor => Chord("E7")
:param note: Note index in a Scale I, II, ..., VIII
:param quality: Quality of a chord (m7, sus4, ...)
:param scale: Base scale (Cmaj, Amin, F#maj, Ebmin, ...)
:param diatonic: Adjust certain chord qualities according to the scale
"""
if not 1 <= note <= 8:
raise ValueError(f"Invalid note {note}")
relative_key = RELATIVE_KEY_DICT[scale[-3:]][note - 1]
root_num = NOTE_VAL_DICT[scale[:-3]]
root = VAL_NOTE_DICT[(root_num + relative_key) % 12][0]
scale_degrees = RELATIVE_KEY_DICT[scale[-3:]]
if diatonic:
# construct the chord based on scale degrees, within 1 octave
third = scale_degrees[(note + 1) % 7]
fifth = scale_degrees[(note + 3) % 7]
seventh = scale_degrees[(note + 5) % 7]
# adjust the chord to its root position (as a stack of thirds),
# then set the root to 0
def get_diatonic_chord(chord):
uninverted = []
for note in chord:
if not uninverted:
uninverted.append(note)
elif note > uninverted[-1]:
uninverted.append(note)
else:
uninverted.append(note + 12)
uninverted = [x - uninverted[0] for x in uninverted]
return uninverted
if quality in ["", "-", "maj", "m", "min"]:
triad = (relative_key, third, fifth)
q = get_diatonic_chord(triad)
elif quality in ["7", "M7", "maj7", "m7"]:
seventh_chord = (relative_key, third, fifth, seventh)
q = get_diatonic_chord(seventh_chord)
else:
raise NotImplementedError("Only generic chords (triads, sevenths) are supported")
# look up QualityManager to determine chord quality
quality_manager = QualityManager()
quality = quality_manager.find_quality_from_components(q)
if not quality:
raise RuntimeError(f"Quality with components {q} not found")
return cls(f"{root}{quality}")
@property
def chord(self):
""" The name of chord """
return self._chord
@property
def root(self):
""" The root note of chord """
return self._root
@property
def quality(self):
""" The quality of chord """
return self._quality
@property
def appended(self):
""" The appended notes on chord """
return self._appended
@property
def on(self):
""" The base note of slash chord """
return self._on
def info(self):
""" Return information of chord to display """
return f"""{self._chord}
root={self._root}
quality={self._quality}
appended={self._appended}
on={self._on}"""
def transpose(self, trans: int, scale: str = "C") -> None:
""" Transpose the chord
:param trans: Transpose key
:param scale: key scale
"""
if not isinstance(trans, int):
raise TypeError(f"Expected integers, not {type(trans)}")
self._root = transpose_note(self._root, trans, scale)
if self._on:
self._on = transpose_note(self._on, trans, scale)
self._reconfigure_chord()
def components(self, visible: bool = True) -> Union[List[str], List[int]]:
""" Return the component notes of chord
:param visible: returns the name of notes if True else list of int
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
return self._quality.get_components(root=self._root, visible=visible)
def components_with_pitch(self, root_pitch: int) -> List[str]:
""" Return the component notes of chord formatted like ["C4", "E4", "G4"]
:param root_pitch: the pitch of the root note
:return: component notes of chord
"""
if self._on:
self._quality.append_on_chord(self.on, self.root)
components = self._quality.get_components(root=self._root)
if components[0] < 0:
components = [c + 12 for c in components]
return [f"{val_to_note(c, scale=self._root)}{root_pitch + c // 12}" for c in components]
def _reconfigure_chord(self):
# TODO: Use appended
self._chord = "{}{}{}{}".format(self._root,
self._quality.quality,
display_appended(self._appended),
display_on(self._on))
| en | 0.680972 | Class to handle a chord. Attributes: _chord: Name of the chord. (e.g. C, Am7, F#m7-5/A) _root: The root note of chord. (e.g. C, A, F#) _quality: The quality of chord. (e.g. maj, m7, m7-5) _appended: The appended notes on chord. _on: The base note of slash chord. Constructor of Chord instance :param chord: Name of chord (e.g. C, Am7, F#m7-5/A). Create a Chord from note index in a scale Chord.from_note_index(1, "", "Cmaj") returns I of C major => Chord("C") Chord.from_note_index(3, "m7", "Fmaj") returns IIImin of F major => Chord("Am7") Chord.from_note_index(5, "7", "Amin") returns Vmin of A minor => Chord("E7") :param note: Note index in a Scale I, II, ..., VIII :param quality: Quality of a chord (m7, sus4, ...) :param scale: Base scale (Cmaj, Amin, F#maj, Ebmin, ...) :param diatonic: Adjust certain chord qualities according to the scale # construct the chord based on scale degrees, within 1 octave # adjust the chord to its root position (as a stack of thirds), # then set the root to 0 # look up QualityManager to determine chord quality The name of chord The root note of chord The quality of chord The appended notes on chord The base note of slash chord Return information of chord to display {self._chord} root={self._root} quality={self._quality} appended={self._appended} on={self._on} Transpose the chord :param trans: Transpose key :param scale: key scale Return the component notes of chord :param visible: returns the name of notes if True else list of int :return: component notes of chord Return the component notes of chord formatted like ["C4", "E4", "G4"] :param root_pitch: the pitch of the root note :return: component notes of chord # TODO: Use appended | 2.55663 | 3 |
yt_dlp/WS_Extractor/gyao.py | evolution-ant/local-youtube-dl | 0 | 6620454 |
from ..extractor.common import InfoExtractor
class GyaoIE(InfoExtractor):
IE_NAME = 'gyao.yahoo.co.jp'
_VALID_URL = r'https?://gyao.yahoo.co.jp'
_TEST = {
'url': 'http://gyao.yahoo.co.jp/player/00597/v12448/v1000000000000003690/?list_id=1654725',
}
def extractFromBCPlayer(self, title, video_id, webpage):
player_url = 'https://s.yimg.jp/images/gyao/bc-player/hls/player.gyao.js?0004'
space_id = self._html_search_regex(r'data-spaceid=([^\']+)', webpage, 'space_id')
service_id = self._html_search_regex(r'data-serviceid="([^"]+)', webpage, 'data-serviceid')
video_uni_id = self._html_search_regex(r'data-vid=([^\']+)', webpage, 'data-vid')
webpage = self._download_webpage(player_url, player_url)
account = self._html_search_regex(r'videoElement\.setAttribute\("data-account","([^"]+)', webpage, 'account')
index_min_js_url = self._html_search_regex(r'BC_PLAYER_URL="([^"]+)', webpage, 'index_min_js_url')
webpage = self._download_webpage(index_min_js_url, index_min_js_url)
app_id = self._html_search_regex(r',m\s*=\s*"(.+)";b.checkVisible=', webpage,
'index_min_js_url') # m="dj0zaiZpPXFuMjk4YTJZcU4wUCZzPWNvbnN1bWVyc2VjcmV0Jng9YjQ-";
url2 = 'https://gyao.yahooapis.jp/rio/getVideo'
query = {
'appid': app_id,
'output': 'json',
'space_id': space_id,
'domain': 'gyao.yahoo.co.jp',
'start': '1',
'results': '1',
'service_id': service_id, # 'gy
'video_uni_id': video_uni_id,
'device_type': '1100',
'delivery_type': '2,6',
'premiumgyao_limited_contents_flag': '1',
'callback': 'jsonp_1499686946866_5949'
}
webpage = self._download_webpage(url2, url2, query=query)
deliveryId = self._search_regex(r'"deliveryId":([^\"]+),"deliveryType":6', webpage, 'deliveryId')
url3 = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account, deliveryId)
program_info = self._download_json(
url3, video_id,
headers={
'Accept': 'application/json;pk=BCpkADawqM3UI7LN8vy-xZ-f0EG6Xuch56dMQLuXX-VST0YZFntoAghnCk04EswbZ56BAX20HkAWwYw5M4YbCcSRWgDNcGlbKIUOlw2DNT15MyrRvG2n2y3WAoy1IWfTAlhMgZLc2pa3rZPbjCB23KBFaGZ1ezN5bgDFpOCQ4Rmb8MAx3BSPVrsprtQ'})
formats = [{
'ext': 'm3u8',
'url': program_info['sources'][0]['src'],
}]
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': program_info['duration'],
'thumbnail': program_info['thumbnail']
}
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
title = self._html_search_regex(r'<title>(.+)</title>', webpage, 'title')
video_id = self._search_regex(r'video_uni_id=(.+?)"', webpage, 'video_id')
try:
return self.extractFromGYAOPlayer(title, video_id, webpage)
except:
return self.extractFromBCPlayer(title, video_id, webpage)
def extractFromGYAOPlayer(self, title, video_id, webpage):
try:
player_url = self._html_search_regex(r'src="(.+)\/player.js.*?">', webpage,
'player_url') + '/player.js'
except:
player_url = 'http://i.yimg.jp/images/gyao/player/js/player.js'
webpage = self._download_webpage(player_url, player_url)
appID = self._html_search_regex(r'APPID\s*:\s*"(.+?)"', webpage, 'appID')
appKey = self._html_search_regex(r'GATE_WAY_APP_KEY\s*:\s*"(.+?)"', webpage, 'GATE_WAY_APP_KEY')
video_url = 'https://gw.gyao.yahoo.co.jp/v1/hls/%s/variant.m3u8?device_type=1100&' \
'delivery_type=2&min_bandwidth=246&appkey=%s&appid=%s' % (video_id, appKey, appID)
return {
'id': video_id,
'_type': 'video',
'title': title,
'url': video_url,
'ext': 'mp4',
}
'''
http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725
http://i.yimg.jp/images/gyao/bc-player/player.gyao.js?0002
http://players.brightcove.net/4235717419001/H17bGYqS_default/index.min.js
https://vod01-gyao.c.yimg.jp/4235717419001/4235717419001_4971788934001_4971736491001.mpd
GET https://edge.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001 HTTP/1.1
Accept: application/json;pk=BCpkADawqM2QSOsdGmTVDZ4_Y10f_FHAfpcCmG99ZZC4tNNQclHy44k7klaWnFhZLQvByouh2G0bkPY7xOC5sYPx-Ich7wVBIHCSLxsH-r0eps_GbXxXpMa96eHTJEb_G404XOUt-hpkg21S
Referer: http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725
Accept-Language: zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3
Origin: http://gyao.yahoo.co.jp
Accept-Encoding: gzip, deflate
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240
Host: edge.api.brightcove.com
Connection: Keep-Alive
Cache-Control: no-cache
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 1736
Connection: keep-alive
access-control-allow-origin: *
access-control-expose-headers: x-cache,via,bcov-debug-cache-stats,bcov-instance,x-amz-cf-id
BCOV-instance: i-58dfacc4, ca69ff2, 2016-07-07 07:59:18.679Z
Cache-Control: max-age=0, no-cache, no-store
Date: Thu, 07 Jul 2016 07:59:18 GMT
Server: Jetty(9.2.z-SNAPSHOT)
Strict-Transport-Security: max-age=600
X-Originating-URL: https://edge-elb.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001
X-Cache: Miss from cloudfront
Via: 1.1 2bb00e225b1b6c3d82913e7c9db706c5.cloudfront.net (CloudFront)
X-Amz-Cf-Id: DYQa8nLJhIDMmKfN6ZyJkp9iz11XD2B1ygSkELoh1g6EYbKLxWQf8Q==
''' |
from ..extractor.common import InfoExtractor
class GyaoIE(InfoExtractor):
IE_NAME = 'gyao.yahoo.co.jp'
_VALID_URL = r'https?://gyao.yahoo.co.jp'
_TEST = {
'url': 'http://gyao.yahoo.co.jp/player/00597/v12448/v1000000000000003690/?list_id=1654725',
}
def extractFromBCPlayer(self, title, video_id, webpage):
player_url = 'https://s.yimg.jp/images/gyao/bc-player/hls/player.gyao.js?0004'
space_id = self._html_search_regex(r'data-spaceid=([^\']+)', webpage, 'space_id')
service_id = self._html_search_regex(r'data-serviceid="([^"]+)', webpage, 'data-serviceid')
video_uni_id = self._html_search_regex(r'data-vid=([^\']+)', webpage, 'data-vid')
webpage = self._download_webpage(player_url, player_url)
account = self._html_search_regex(r'videoElement\.setAttribute\("data-account","([^"]+)', webpage, 'account')
index_min_js_url = self._html_search_regex(r'BC_PLAYER_URL="([^"]+)', webpage, 'index_min_js_url')
webpage = self._download_webpage(index_min_js_url, index_min_js_url)
app_id = self._html_search_regex(r',m\s*=\s*"(.+)";b.checkVisible=', webpage,
'index_min_js_url') # m="dj0zaiZpPXFuMjk4YTJZcU4wUCZzPWNvbnN1bWVyc2VjcmV0Jng9YjQ-";
url2 = 'https://gyao.yahooapis.jp/rio/getVideo'
query = {
'appid': app_id,
'output': 'json',
'space_id': space_id,
'domain': 'gyao.yahoo.co.jp',
'start': '1',
'results': '1',
'service_id': service_id, # 'gy
'video_uni_id': video_uni_id,
'device_type': '1100',
'delivery_type': '2,6',
'premiumgyao_limited_contents_flag': '1',
'callback': 'jsonp_1499686946866_5949'
}
webpage = self._download_webpage(url2, url2, query=query)
deliveryId = self._search_regex(r'"deliveryId":([^\"]+),"deliveryType":6', webpage, 'deliveryId')
url3 = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account, deliveryId)
program_info = self._download_json(
url3, video_id,
headers={
'Accept': 'application/json;pk=BCpkADawqM3UI7LN8vy-xZ-f0EG6Xuch56dMQLuXX-VST0YZFntoAghnCk04EswbZ56BAX20HkAWwYw5M4YbCcSRWgDNcGlbKIUOlw2DNT15MyrRvG2n2y3WAoy1IWfTAlhMgZLc2pa3rZPbjCB23KBFaGZ1ezN5bgDFpOCQ4Rmb8MAx3BSPVrsprtQ'})
formats = [{
'ext': 'm3u8',
'url': program_info['sources'][0]['src'],
}]
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': program_info['duration'],
'thumbnail': program_info['thumbnail']
}
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
title = self._html_search_regex(r'<title>(.+)</title>', webpage, 'title')
video_id = self._search_regex(r'video_uni_id=(.+?)"', webpage, 'video_id')
try:
return self.extractFromGYAOPlayer(title, video_id, webpage)
except:
return self.extractFromBCPlayer(title, video_id, webpage)
def extractFromGYAOPlayer(self, title, video_id, webpage):
try:
player_url = self._html_search_regex(r'src="(.+)\/player.js.*?">', webpage,
'player_url') + '/player.js'
except:
player_url = 'http://i.yimg.jp/images/gyao/player/js/player.js'
webpage = self._download_webpage(player_url, player_url)
appID = self._html_search_regex(r'APPID\s*:\s*"(.+?)"', webpage, 'appID')
appKey = self._html_search_regex(r'GATE_WAY_APP_KEY\s*:\s*"(.+?)"', webpage, 'GATE_WAY_APP_KEY')
video_url = 'https://gw.gyao.yahoo.co.jp/v1/hls/%s/variant.m3u8?device_type=1100&' \
'delivery_type=2&min_bandwidth=246&appkey=%s&appid=%s' % (video_id, appKey, appID)
return {
'id': video_id,
'_type': 'video',
'title': title,
'url': video_url,
'ext': 'mp4',
}
'''
http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725
http://i.yimg.jp/images/gyao/bc-player/player.gyao.js?0002
http://players.brightcove.net/4235717419001/H17bGYqS_default/index.min.js
https://vod01-gyao.c.yimg.jp/4235717419001/4235717419001_4971788934001_4971736491001.mpd
GET https://edge.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001 HTTP/1.1
Accept: application/json;pk=BCpkADawqM2QSOsdGmTVDZ4_Y10f_FHAfpcCmG99ZZC4tNNQclHy44k7klaWnFhZLQvByouh2G0bkPY7xOC5sYPx-Ich7wVBIHCSLxsH-r0eps_GbXxXpMa96eHTJEb_G404XOUt-hpkg21S
Referer: http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725
Accept-Language: zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3
Origin: http://gyao.yahoo.co.jp
Accept-Encoding: gzip, deflate
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240
Host: edge.api.brightcove.com
Connection: Keep-Alive
Cache-Control: no-cache
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 1736
Connection: keep-alive
access-control-allow-origin: *
access-control-expose-headers: x-cache,via,bcov-debug-cache-stats,bcov-instance,x-amz-cf-id
BCOV-instance: i-58dfacc4, ca69ff2, 2016-07-07 07:59:18.679Z
Cache-Control: max-age=0, no-cache, no-store
Date: Thu, 07 Jul 2016 07:59:18 GMT
Server: Jetty(9.2.z-SNAPSHOT)
Strict-Transport-Security: max-age=600
X-Originating-URL: https://edge-elb.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001
X-Cache: Miss from cloudfront
Via: 1.1 2bb00e225b1b6c3d82913e7c9db706c5.cloudfront.net (CloudFront)
X-Amz-Cf-Id: DYQa8nLJhIDMmKfN6ZyJkp9iz11XD2B1ygSkELoh1g6EYbKLxWQf8Q==
''' | en | 0.409566 | # m="dj0zaiZpPXFuMjk4YTJZcU4wUCZzPWNvbnN1bWVyc2VjcmV0Jng9YjQ-"; # 'gy http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725 http://i.yimg.jp/images/gyao/bc-player/player.gyao.js?0002 http://players.brightcove.net/4235717419001/H17bGYqS_default/index.min.js https://vod01-gyao.c.yimg.jp/4235717419001/4235717419001_4971788934001_4971736491001.mpd GET https://edge.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001 HTTP/1.1 Accept: application/json;pk=BCpkADawqM2QSOsdGmTVDZ4_Y10f_FHAfpcCmG99ZZC4tNNQclHy44k7klaWnFhZLQvByouh2G0bkPY7xOC5sYPx-Ich7wVBIHCSLxsH-r0eps_GbXxXpMa96eHTJEb_G404XOUt-hpkg21S Referer: http://gyao.yahoo.co.jp/player/00597/v12416/v1000000000000003678/?list_id=1654725 Accept-Language: zh-Hans-CN,zh-Hans;q=0.8,en-US;q=0.5,en;q=0.3 Origin: http://gyao.yahoo.co.jp Accept-Encoding: gzip, deflate User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240 Host: edge.api.brightcove.com Connection: Keep-Alive Cache-Control: no-cache HTTP/1.1 200 OK Content-Type: application/json; charset=UTF-8 Content-Length: 1736 Connection: keep-alive access-control-allow-origin: * access-control-expose-headers: x-cache,via,bcov-debug-cache-stats,bcov-instance,x-amz-cf-id BCOV-instance: i-58dfacc4, ca69ff2, 2016-07-07 07:59:18.679Z Cache-Control: max-age=0, no-cache, no-store Date: Thu, 07 Jul 2016 07:59:18 GMT Server: Jetty(9.2.z-SNAPSHOT) Strict-Transport-Security: max-age=600 X-Originating-URL: https://edge-elb.api.brightcove.com/playback/v1/accounts/4235717419001/videos/4971736491001 X-Cache: Miss from cloudfront Via: 1.1 2bb00e225b1b6c3d82913e7c9db706c5.cloudfront.net (CloudFront) X-Amz-Cf-Id: DYQa8nLJhIDMmKfN6ZyJkp9iz11XD2B1ygSkELoh1g6EYbKLxWQf8Q== | 2.510561 | 3 |
mercado_btc_monitor/api/exceptions/__init__.py | Leothi/mercado_btc_monitor | 3 | 6620455 | <reponame>Leothi/mercado_btc_monitor<gh_stars>1-10
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
class APIException(Exception):
"""Classe base para criação de Exceções personalizadas da API.
:param Exception: Classe Python de Exceções.
"""
def __init__(self, status: int, mensagem: str):
self.status_code = status
self.mensagem = mensagem
# Substituição/criação das exceptions
class ExceptionHandler:
def __init__(self, app: FastAPI):
app.exception_handler(Exception)(self.exception_handler)
app.exception_handler(HTTPException)(self.http_excep)
app.exception_handler(APIException)(self.camara_exception_handler)
app.exception_handler(RequestValidationError)(
self.validation_exception_handler)
@staticmethod
async def exception_handler(request: Request, excecao: Exception):
return JSONResponse(
status_code=500, content={
"status": 500,
"mensagem": 'Internal Server Error'
}
)
@staticmethod
async def http_excep(requisicao: Request, excecao: HTTPException):
mensagem = {404: "Não encontrado",
500: "Erro interno", 400: "Bad Request"}
return JSONResponse(
status_code=excecao.status_code,
content={
"status": excecao.status_code,
"mensagem": mensagem[excecao.status_code]
}
)
@staticmethod
async def camara_exception_handler(requisicao: Request, excecao: APIException):
return JSONResponse(
status_code=excecao.status_code,
content={
"status": excecao.status_code,
"mensagem": excecao.mensagem
}
)
@staticmethod
async def validation_exception_handler(requisicao: Request, excecao: RequestValidationError):
return JSONResponse(
status_code=422,
content={
"status": 422,
"mensagem": "Parâmetros da requisição inválidos!",
"details": str(excecao)
}
)
| from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
class APIException(Exception):
"""Classe base para criação de Exceções personalizadas da API.
:param Exception: Classe Python de Exceções.
"""
def __init__(self, status: int, mensagem: str):
self.status_code = status
self.mensagem = mensagem
# Substituição/criação das exceptions
class ExceptionHandler:
def __init__(self, app: FastAPI):
app.exception_handler(Exception)(self.exception_handler)
app.exception_handler(HTTPException)(self.http_excep)
app.exception_handler(APIException)(self.camara_exception_handler)
app.exception_handler(RequestValidationError)(
self.validation_exception_handler)
@staticmethod
async def exception_handler(request: Request, excecao: Exception):
return JSONResponse(
status_code=500, content={
"status": 500,
"mensagem": 'Internal Server Error'
}
)
@staticmethod
async def http_excep(requisicao: Request, excecao: HTTPException):
mensagem = {404: "Não encontrado",
500: "Erro interno", 400: "Bad Request"}
return JSONResponse(
status_code=excecao.status_code,
content={
"status": excecao.status_code,
"mensagem": mensagem[excecao.status_code]
}
)
@staticmethod
async def camara_exception_handler(requisicao: Request, excecao: APIException):
return JSONResponse(
status_code=excecao.status_code,
content={
"status": excecao.status_code,
"mensagem": excecao.mensagem
}
)
@staticmethod
async def validation_exception_handler(requisicao: Request, excecao: RequestValidationError):
return JSONResponse(
status_code=422,
content={
"status": 422,
"mensagem": "Parâmetros da requisição inválidos!",
"details": str(excecao)
}
) | pt | 0.996208 | Classe base para criação de Exceções personalizadas da API. :param Exception: Classe Python de Exceções. # Substituição/criação das exceptions | 2.868663 | 3 |
main_analysis_of_EDLine_urban_street_dataset.py | cristi161/eecvf | 0 | 6620456 | # noinspection PyUnresolvedReferences
import Application
# noinspection PyUnresolvedReferences
import Benchmarking
# noinspection PyUnresolvedReferences
import MachineLearning
# noinspection PyUnresolvedReferences
import config_main as CONFIG
# noinspection PyUnresolvedReferences
import Utils
def main_sobel_parsing_natural():
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/test')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
list = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
]
for edge in first_order_edge:
for kernel_gaus in [3, 5, 7, 9]:
for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:
for anc_thr in [10, 20, 30, 40, 60]:
for sc_int in [1, 3, 5]:
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)
e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,
max_edges=100, max_points_edge=100)
list.append(e3 + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list)
# Application.configure_show_pictures(ports_to_show=list, time_to_show=0)
Application.run_application()
list.reverse()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_sobel_thr_finding_natural',
list_of_data=list, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
def main_sobel_parsing_urban():
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/TMBuD/img/VAL/png')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
list = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
]
for edge in first_order_edge:
for kernel_gaus in [3, 5, 7, 9]:
for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:
for anc_thr in [10, 20, 30, 40, 60]:
for sc_int in [1, 3, 5]:
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)
e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,
max_edges=100, max_points_edge=100)
list.append(e3 + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list)
# Application.configure_show_pictures(ports_to_show=list, time_to_show=0)
Application.run_application()
list.reverse()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/TMBuD/edge/VAL/mat',
raw_image='TestData/TMBuD/img/VAL/png',
jobs_set=list, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_sobel_thr_finding_urban',
list_of_data=list, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
def main_urban():
"""
Main function of framework Please look in example_main for all functions you can use
"""
Application.set_input_image_folder('TestData\TMBuD\img\VAL\png')
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)
list_to_eval_edge = []
list_to_eval_line = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
, CONFIG.FILTERS.PREWITT_3x3
, CONFIG.FILTERS.KIRSCH_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.SCHARR_3x3
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.ORHEI_3x3
]
for edge in first_order_edge:
for gr_thr in [30, 40, 50]:
for anc_thr in [10,20]:
e1, e2, e3, e4 = Application.do_ed_lines_mod_job(port_input_name=blur, operator=edge, min_line_length=20,
gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1, line_fit_err_thr = 1,
max_edges=21000, max_points_edge=8000, max_lines=8000, max_points_line=8000
)
list_to_eval_edge.append(e1 + '_L0')
list_to_eval_line.append(e4 + '_L0')
Application.create_config_file(verbose=False)
Application.configure_save_pictures(job_name_in_port=True, ports_to_save='ALL')
# Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)
Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData\TMBuD\edge\VAL\mat',
raw_image='TestData\TMBuD\img\VAL\png',
jobs_set=list_to_eval_edge, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_urbanl_list',
list_of_data=list_to_eval_edge, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData\TMBuD\edge\VAL\mat',
raw_image='TestData\TMBuD\img\VAL\png',
jobs_set=list_to_eval_line, do_thinning=False)
Utils.plot_first_cpm_results(prefix='ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', level='L0', order_by='f1', name='line_urban_list',
list_of_data=list_to_eval_line, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
# Utils.create_latex_cpm_table_list()
Utils.close_files()
def main_natural():
"""
Main function of framework Please look in example_main for all functions you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/test')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)
list_to_eval_edge = []
list_to_eval_line = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
, CONFIG.FILTERS.PREWITT_3x3
, CONFIG.FILTERS.KIRSCH_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.SCHARR_3x3
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.ORHEI_3x3
]
for edge in first_order_edge:
for gr_thr in [30, 40, 50]:
for anc_thr in [10,20]:
e1, e2, e3, e4 = Application.do_ed_lines_mod_job(port_input_name=blur, operator=edge, min_line_length=20,
gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1, line_fit_err_thr = 1,
max_edges=21000, max_points_edge=8000, max_lines=8000, max_points_line=8000
)
list_to_eval_edge.append(e1 + '_L0')
list_to_eval_line.append(e4 + '_L0')
Application.create_config_file(verbose=False)
Application.configure_save_pictures(job_name_in_port=True, ports_to_save='ALL')
# Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)
Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list_to_eval_edge, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_natural_list',
list_of_data=list_to_eval_edge, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list_to_eval_line, do_thinning=False)
Utils.plot_first_cpm_results(prefix='ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', level='L0',
order_by='f1', name='line_natural_list',
list_of_data=list_to_eval_line, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
if __name__ == "__main__":
main_sobel_parsing_natural()
main_sobel_parsing_urban()
main_natural()
main_urban()
| # noinspection PyUnresolvedReferences
import Application
# noinspection PyUnresolvedReferences
import Benchmarking
# noinspection PyUnresolvedReferences
import MachineLearning
# noinspection PyUnresolvedReferences
import config_main as CONFIG
# noinspection PyUnresolvedReferences
import Utils
def main_sobel_parsing_natural():
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/test')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
list = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
]
for edge in first_order_edge:
for kernel_gaus in [3, 5, 7, 9]:
for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:
for anc_thr in [10, 20, 30, 40, 60]:
for sc_int in [1, 3, 5]:
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)
e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,
max_edges=100, max_points_edge=100)
list.append(e3 + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list)
# Application.configure_show_pictures(ports_to_show=list, time_to_show=0)
Application.run_application()
list.reverse()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_sobel_thr_finding_natural',
list_of_data=list, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
def main_sobel_parsing_urban():
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/TMBuD/img/VAL/png')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
list = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
]
for edge in first_order_edge:
for kernel_gaus in [3, 5, 7, 9]:
for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:
for anc_thr in [10, 20, 30, 40, 60]:
for sc_int in [1, 3, 5]:
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)
e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,
max_edges=100, max_points_edge=100)
list.append(e3 + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list)
# Application.configure_show_pictures(ports_to_show=list, time_to_show=0)
Application.run_application()
list.reverse()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/TMBuD/edge/VAL/mat',
raw_image='TestData/TMBuD/img/VAL/png',
jobs_set=list, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_sobel_thr_finding_urban',
list_of_data=list, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
def main_urban():
"""
Main function of framework Please look in example_main for all functions you can use
"""
Application.set_input_image_folder('TestData\TMBuD\img\VAL\png')
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)
list_to_eval_edge = []
list_to_eval_line = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
, CONFIG.FILTERS.PREWITT_3x3
, CONFIG.FILTERS.KIRSCH_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.SCHARR_3x3
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.ORHEI_3x3
]
for edge in first_order_edge:
for gr_thr in [30, 40, 50]:
for anc_thr in [10,20]:
e1, e2, e3, e4 = Application.do_ed_lines_mod_job(port_input_name=blur, operator=edge, min_line_length=20,
gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1, line_fit_err_thr = 1,
max_edges=21000, max_points_edge=8000, max_lines=8000, max_points_line=8000
)
list_to_eval_edge.append(e1 + '_L0')
list_to_eval_line.append(e4 + '_L0')
Application.create_config_file(verbose=False)
Application.configure_save_pictures(job_name_in_port=True, ports_to_save='ALL')
# Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)
Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData\TMBuD\edge\VAL\mat',
raw_image='TestData\TMBuD\img\VAL\png',
jobs_set=list_to_eval_edge, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_urbanl_list',
list_of_data=list_to_eval_edge, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData\TMBuD\edge\VAL\mat',
raw_image='TestData\TMBuD\img\VAL\png',
jobs_set=list_to_eval_line, do_thinning=False)
Utils.plot_first_cpm_results(prefix='ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', level='L0', order_by='f1', name='line_urban_list',
list_of_data=list_to_eval_line, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
# Utils.create_latex_cpm_table_list()
Utils.close_files()
def main_natural():
"""
Main function of framework Please look in example_main for all functions you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/test')
Application.delete_folder_appl_out()
Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)
list_to_eval_edge = []
list_to_eval_line = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
, CONFIG.FILTERS.PREWITT_3x3
, CONFIG.FILTERS.KIRSCH_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.SCHARR_3x3
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.ORHEI_3x3
]
for edge in first_order_edge:
for gr_thr in [30, 40, 50]:
for anc_thr in [10,20]:
e1, e2, e3, e4 = Application.do_ed_lines_mod_job(port_input_name=blur, operator=edge, min_line_length=20,
gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1, line_fit_err_thr = 1,
max_edges=21000, max_points_edge=8000, max_lines=8000, max_points_line=8000
)
list_to_eval_edge.append(e1 + '_L0')
list_to_eval_line.append(e4 + '_L0')
Application.create_config_file(verbose=False)
Application.configure_save_pictures(job_name_in_port=True, ports_to_save='ALL')
# Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)
Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list_to_eval_edge, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='edge_natural_list',
list_of_data=list_to_eval_edge, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
raw_image='TestData/BSR/BSDS500/data/images/test',
jobs_set=list_to_eval_line, do_thinning=False)
Utils.plot_first_cpm_results(prefix='ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', level='L0',
order_by='f1', name='line_natural_list',
list_of_data=list_to_eval_line, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('ED_LINES_IMG_MIN_LEN_20_LINE_FIT_ERR_1_EDGE_DRAWING_MOD_SEGMENTS_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='),
('_SOBEL_3x3_', ' Sobel'), ('_PREWITT_3x3_', ' Prewitt'), ('_SCHARR_3x3_', ' Scharr'),
('_KROON_3x3_', ' Kroon'), ('_KITCHEN_3x3_', ' Kitchen'), ('_ORHEI_3x3_', ' Orhei'),
('_KAYYALI_3x3_', ' Kayyali'),('_KIRSCH_3x3_', ' Kirsch'), ('GAUSS_BLUR_K_9', ''),],
save_plot=True, show_plot=False, set_all_to_legend=True)
Utils.close_files()
if __name__ == "__main__":
main_sobel_parsing_natural()
main_sobel_parsing_urban()
main_natural()
main_urban()
| en | 0.558367 | # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences Main function of framework Please look in example_main for all functions you can use # Application.configure_show_pictures(ports_to_show=list, time_to_show=0) # Do bsds benchmarking # Be ware not to activate job_name_in_port in Application.configure_save_pictures Main function of framework Please look in example_main for all functions you can use # Application.configure_show_pictures(ports_to_show=list, time_to_show=0) # Do bsds benchmarking # Be ware not to activate job_name_in_port in Application.configure_save_pictures Main function of framework Please look in example_main for all functions you can use # Application.delete_folder_appl_out() # Benchmarking.delete_folder_benchmark_out() # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200) # Do bsds benchmarking # Be ware not to activate job_name_in_port in Application.configure_save_pictures # Utils.create_latex_cpm_table_list() Main function of framework Please look in example_main for all functions you can use # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200) # Do bsds benchmarking # Be ware not to activate job_name_in_port in Application.configure_save_pictures | 2.178035 | 2 |
rot_encoder_test.py | ifurusato/ros | 9 | 6620457 | <filename>rot_encoder_test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-11-13
# modified: 2020-11-13
#
import pytest
import sys, traceback
from colorama import init, Fore, Style
init()
from lib.i2c_scanner import I2CScanner
from lib.config_loader import ConfigLoader
from lib.rate import Rate
from lib.logger import Logger, Level
from lib.rotary_encoder import RotaryEncoder
# ..............................................................................
@pytest.mark.unit
def test_rot_encoder():
_log = Logger("rot-test", Level.INFO)
_i2c_scanner = I2CScanner(Level.WARN)
if not _i2c_scanner.has_address([0x16]):
_log.warning('test ignored: no rotary encoder found.')
return
_rot = None
try:
# read YAML configuration
_loader = ConfigLoader(Level.INFO)
filename = 'config.yaml'
_config = _loader.configure(filename)
_rot = RotaryEncoder(_config, 0x0F, Level.INFO)
_count = 0
_updates = 0
_update_led = True
_last_value = 0
_rate = Rate(20)
_log.info(Fore.WHITE + Style.BRIGHT + 'waiting for rotary encoder to make 10 ticks...')
while _updates < 10:
# _value = _rot.update() # original method
_value = _rot.read(_update_led) # improved method
if _value != _last_value:
_log.info(Style.BRIGHT + 'returned value: {:d}'.format(_value))
_updates += 1
_last_value = _value
_count += 1
if _count % 33 == 0:
_log.info(Fore.BLACK + Style.BRIGHT + 'waiting…')
_rate.wait()
finally:
if _rot:
_log.info('resetting rotary encoder...')
_rot.reset()
# main .........................................................................
_rot = None
def main(argv):
try:
test_rot_encoder()
except KeyboardInterrupt:
print(Fore.CYAN + 'caught Ctrl-C; exiting...' + Style.RESET_ALL)
except Exception:
print(Fore.RED + 'error starting ros: {}'.format(traceback.format_exc()) + Style.RESET_ALL)
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| <filename>rot_encoder_test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-11-13
# modified: 2020-11-13
#
import pytest
import sys, traceback
from colorama import init, Fore, Style
init()
from lib.i2c_scanner import I2CScanner
from lib.config_loader import ConfigLoader
from lib.rate import Rate
from lib.logger import Logger, Level
from lib.rotary_encoder import RotaryEncoder
# ..............................................................................
@pytest.mark.unit
def test_rot_encoder():
_log = Logger("rot-test", Level.INFO)
_i2c_scanner = I2CScanner(Level.WARN)
if not _i2c_scanner.has_address([0x16]):
_log.warning('test ignored: no rotary encoder found.')
return
_rot = None
try:
# read YAML configuration
_loader = ConfigLoader(Level.INFO)
filename = 'config.yaml'
_config = _loader.configure(filename)
_rot = RotaryEncoder(_config, 0x0F, Level.INFO)
_count = 0
_updates = 0
_update_led = True
_last_value = 0
_rate = Rate(20)
_log.info(Fore.WHITE + Style.BRIGHT + 'waiting for rotary encoder to make 10 ticks...')
while _updates < 10:
# _value = _rot.update() # original method
_value = _rot.read(_update_led) # improved method
if _value != _last_value:
_log.info(Style.BRIGHT + 'returned value: {:d}'.format(_value))
_updates += 1
_last_value = _value
_count += 1
if _count % 33 == 0:
_log.info(Fore.BLACK + Style.BRIGHT + 'waiting…')
_rate.wait()
finally:
if _rot:
_log.info('resetting rotary encoder...')
_rot.reset()
# main .........................................................................
_rot = None
def main(argv):
try:
test_rot_encoder()
except KeyboardInterrupt:
print(Fore.CYAN + 'caught Ctrl-C; exiting...' + Style.RESET_ALL)
except Exception:
print(Fore.RED + 'error starting ros: {}'.format(traceback.format_exc()) + Style.RESET_ALL)
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| en | 0.668479 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by <NAME>. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: <NAME> # created: 2020-11-13 # modified: 2020-11-13 # # .............................................................................. # read YAML configuration # _value = _rot.update() # original method # improved method # main ......................................................................... # call main .................................................................... #EOF | 2.233719 | 2 |
teslacam/unit.py | SillyGoat/teslacam | 1 | 6620458 | <gh_stars>1-10
' Unit classes for simple serialization and validation '
class Units:
' Multiple units '
def __init__(self, *units):
self.units = units
def __bool__(self):
return all((True for unit in self.units if unit))
def __str__(self):
valid_units = [str(unit) for unit in self.units if unit]
if valid_units:
return ', '.join(valid_units)
return str(self.units[-1])
class Unit:
' Single unit '
def __init__(self, value, value_type):
self.value = value
self.value_type = value_type
def __bool__(self):
return bool(self.value)
def __str__(self):
formatted_value = self.value if self.value < 1 else int(self.value)
suffix = 's' if formatted_value != 1 else ''
return f'{formatted_value} {self.value_type}{suffix}'
| ' Unit classes for simple serialization and validation '
class Units:
' Multiple units '
def __init__(self, *units):
self.units = units
def __bool__(self):
return all((True for unit in self.units if unit))
def __str__(self):
valid_units = [str(unit) for unit in self.units if unit]
if valid_units:
return ', '.join(valid_units)
return str(self.units[-1])
class Unit:
' Single unit '
def __init__(self, value, value_type):
self.value = value
self.value_type = value_type
def __bool__(self):
return bool(self.value)
def __str__(self):
formatted_value = self.value if self.value < 1 else int(self.value)
suffix = 's' if formatted_value != 1 else ''
return f'{formatted_value} {self.value_type}{suffix}' | none | 1 | 3.670174 | 4 | |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/video_module/video_module.py | osoco/better-ways-of-thinking-about-software | 3 | 6620459 | """Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
from collections import OrderedDict, defaultdict
from operator import itemgetter
from django.conf import settings
from edx_django_utils.cache import RequestCache
from lxml import etree
from opaque_keys.edx.locator import AssetLocator
from web_fragments.fragment import Fragment
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from openedx.core.djangoapps.video_config.models import HLSPlaybackEnabledFlag, CourseYoutubeBlockedFlag
from openedx.core.djangoapps.video_pipeline.config.waffle import DEPRECATE_YOUTUBE, waffle_flags
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.license import LicenseMixin
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingMixin, TabsEditingMixin
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.raw_module import EmptyDataRawMixin
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.video_module import manage_video_subtitles_save
from xmodule.x_module import (
PUBLIC_VIEW, STUDENT_VIEW,
HTMLSnippet, ResourceTemplates, shim_xmodule_js,
XModuleMixin, XModuleToXBlockMixin, XModuleDescriptorToXBlockMixin,
)
from xmodule.xml_module import XmlMixin, deserialize_field, is_pointer_tag, name_to_pathname
from .bumper_utils import bumperize
from .transcripts_utils import (
Transcript,
VideoTranscriptsMixin,
clean_video_id,
get_html5_ids,
get_transcript,
subs_filename
)
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from .video_utils import create_youtube_string, format_xml_exception_message, get_poster, rewrite_video_url
from .video_xfields import VideoFields
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoBlock should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoBlock out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoBlock should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoBlock tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from lms.djangoapps.branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
EXPORT_IMPORT_COURSE_DIR = 'course'
EXPORT_IMPORT_STATIC_DIR = 'static'
@XBlock.wants('settings', 'completion', 'i18n', 'request_cache')
class VideoBlock(
VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers, VideoStudentViewHandlers,
TabsEditingMixin, EmptyDataRawMixin, XmlMixin, EditingMixin,
XModuleDescriptorToXBlockMixin, XModuleToXBlockMixin, HTMLSnippet, ResourceTemplates, XModuleMixin,
LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
video_time = 0
icon_class = 'video'
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
uses_xmodule_styles_setup = True
requires_per_student_anonymous_id = True
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(list(languages.items()), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
@property
def youtube_deprecated(self):
"""
Return True if youtube is deprecated and hls as primary playback is enabled else False
"""
# Return False if `hls` playback feature is disabled.
if not HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key):
return False
# check if youtube has been deprecated and hls as primary playback
# is enabled for this course
return waffle_flags()[DEPRECATE_YOUTUBE].is_enabled(self.location.course_key)
def youtube_disabled_for_course(self): # lint-amnesty, pylint: disable=missing-function-docstring
if not self.location.context_key.is_course:
return False # Only courses have this flag
request_cache = RequestCache('youtube_disabled_for_course')
cache_response = request_cache.get_cached_response(self.location.context_key)
if cache_response.is_found:
return cache_response.value
youtube_is_disabled = CourseYoutubeBlockedFlag.feature_enabled(self.location.course_key)
request_cache.set(self.location.context_key, youtube_is_disabled)
return youtube_is_disabled
def prioritize_hls(self, youtube_streams, html5_sources):
"""
Decide whether hls can be prioritized as primary playback or not.
If both the youtube and hls sources are present then make decision on flag
If only either youtube or hls is present then play whichever is present
"""
yt_present = bool(youtube_streams.strip()) if youtube_streams else False
hls_present = any(source for source in html5_sources)
if yt_present and hls_present:
return self.youtube_deprecated
return False
def student_view(self, _context):
"""
Return the student view.
"""
fragment = Fragment(self.get_html())
add_webpack_to_fragment(fragment, 'VideoBlockPreview')
shim_xmodule_js(fragment, 'Video')
return fragment
def author_view(self, context):
"""
Renders the Studio preview view.
"""
return self.student_view(context)
def studio_view(self, _context):
"""
Return the studio view.
"""
fragment = Fragment(
self.system.render_template(self.mako_template, self.get_context())
)
add_webpack_to_fragment(fragment, 'VideoBlockStudio')
shim_xmodule_js(fragment, 'TabsEditingDescriptor')
return fragment
def public_view(self, context):
"""
Returns a fragment that contains the html for the public view
"""
if getattr(self.runtime, 'suppports_state_for_anonymous_users', False):
# The new runtime can support anonymous users as fully as regular users:
return self.student_view(context)
fragment = Fragment(self.get_html(view=PUBLIC_VIEW))
add_webpack_to_fragment(fragment, 'VideoBlockPreview')
shim_xmodule_js(fragment, 'Video')
return fragment
def get_html(self, view=STUDENT_VIEW): # lint-amnesty, pylint: disable=arguments-differ, too-many-statements
track_status = (self.download_track and self.track)
transcript_download_format = self.transcript_download_format if not track_status else None
sources = [source for source in self.html5_sources if source]
download_video_link = None
branding_info = None
youtube_streams = ""
video_duration = None
video_status = None
# Determine if there is an alternative source for this video
# based on user locale. This exists to support cases where
# we leverage a geography specific CDN, like China.
default_cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get('default')
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location, default_cdn_url)
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api: # lint-amnesty, pylint: disable=too-many-nested-blocks
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
if HLSPlaybackEnabledFlag.feature_enabled(self.course_id):
val_profiles.append('hls')
# strip edx_video_id to prevent ValVideoNotFoundError error if unwanted spaces are there. TNL-5769
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id.strip(), val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube non-hls url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
# don't include hls urls for download
if self.download_video and not url.endswith('.m3u8'):
# function returns None when the url cannot be re-written
rewritten_link = rewrite_video_url(cdn_url, url)
if rewritten_link:
download_video_link = rewritten_link
else:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
# get video duration
video_data = edxval_api.get_video_info(self.edx_video_id.strip())
video_duration = video_data.get('duration')
video_status = video_data.get('status')
except (edxval_api.ValInternalError, edxval_api.ValVideoNotFoundError):
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoBlock.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
if self.edx_video_id and edxval_api and video_status != 'external':
for index, source_url in enumerate(sources):
new_url = rewrite_video_url(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoBlock.
if not download_video_link and self.download_video:
if self.html5_sources:
download_video_link = self.html5_sources[0]
# don't give the option to download HLS video urls
if download_video_link and download_video_link.endswith('.m3u8'):
download_video_link = None
transcripts = self.get_transcripts_info()
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(transcripts=transcripts)
cdn_eval = False
cdn_exp_group = None
if self.youtube_disabled_for_course():
self.youtube_streams = '' # lint-amnesty, pylint: disable=attribute-defined-outside-init
else:
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings') # lint-amnesty, pylint: disable=unused-variable
poster = None
if edxval_api and self.edx_video_id:
poster = edxval_api.get_course_video_image_url(
course_id=self.runtime.course_id.for_branch(None),
edx_video_id=self.edx_video_id.strip()
)
completion_service = self.runtime.service(self, 'completion')
if completion_service:
completion_enabled = completion_service.completion_tracking_enabled()
else:
completion_enabled = False
# This is the setting that controls whether the autoadvance button will be visible, not whether the
# video will autoadvance or not.
# For autoadvance controls to be shown, both the feature flag and the course setting must be true.
# This allows to enable the feature for certain courses only.
autoadvance_enabled = settings.FEATURES.get('ENABLE_AUTOADVANCE_VIDEOS', False) and \
getattr(self, 'video_auto_advance', False)
# This is the current status of auto-advance (not the control visibility).
# But when controls aren't visible we force it to off. The student might have once set the preference to
# true, but now staff or admin have hidden the autoadvance button and the student won't be able to disable
# it anymore; therefore we force-disable it in this case (when controls aren't visible).
autoadvance_this_video = self.auto_advance and autoadvance_enabled
metadata = {
'autoAdvance': autoadvance_this_video,
# For now, the option "data-autohide-html5" is hard coded. This option
# either enables or disables autohiding of controls and captions on mouse
# inactivity. If set to true, controls and captions will autohide for
# HTML5 sources (non-YouTube) after a period of mouse inactivity over the
# whole video. When the mouse moves (or a key is pressed while any part of
# the video player is focused), the captions and controls will be shown
# once again.
#
# There is no option in the "Advanced Editor" to set this option. However,
# this option will have an effect if changed to "True". The code on
# front-end exists.
'autohideHtml5': False,
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'completionEnabled': completion_enabled,
'completionPercentage': settings.COMPLETION_VIDEO_COMPLETE_PERCENTAGE,
'duration': video_duration,
'end': self.end_time.total_seconds(), # pylint: disable=no-member
'generalSpeed': self.global_speed,
'lmsRootURL': settings.LMS_ROOT_URL,
'poster': poster,
'prioritizeHls': self.prioritize_hls(self.youtube_streams, sources),
'publishCompletionUrl': self.runtime.handler_url(self, 'publish_completion', '').rstrip('?'),
# This is the server's guess at whether youtube is available for
# this user, based on what was recorded the last time we saw the
# user, and defaulting to True.
'recordedYoutubeIsAvailable': self.youtube_is_available,
'savedVideoPosition': self.saved_video_position.total_seconds(), # pylint: disable=no-member
'saveStateEnabled': view != PUBLIC_VIEW,
'saveStateUrl': self.ajax_url + '/save_user_state',
'showCaptions': json.dumps(self.show_captions),
'sources': sources,
'speed': self.speed,
'start': self.start_time.total_seconds(), # pylint: disable=no-member
'streams': self.youtube_streams,
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'ytApiUrl': settings.YOUTUBE['API'],
'ytMetadataEndpoint': (
# In the new runtime, get YouTube metadata via a handler. The handler supports anonymous users and
# can work in sandboxed iframes. In the old runtime, the JS will call the LMS's yt_video_metadata
# API endpoint directly (not an XBlock handler).
self.runtime.handler_url(self, 'yt_video_metadata')
if getattr(self.runtime, 'suppports_state_for_anonymous_users', False) else ''
),
'ytTestTimeout': settings.YOUTUBE['TEST_TIMEOUT'],
}
bumperize(self)
context = {
'autoadvance_enabled': autoadvance_enabled,
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.fields['transcript_download_format'].values, # lint-amnesty, pylint: disable=unsubscriptable-object
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
def validate(self):
"""
Validates the state of this Video XBlock instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super().validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
no_transcript_lang = []
for lang_code, transcript in self.transcripts.items():
if not transcript:
no_transcript_lang.append([label for code, label in settings.ALL_LANGUAGES if code == lang_code][0])
if no_transcript_lang:
ungettext = self.runtime.service(self, "i18n").ungettext
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
ungettext(
'There is no transcript file associated with the {lang} language.',
'There are no transcript files associated with the {lang} languages.',
len(no_transcript_lang)
).format(lang=', '.join(sorted(no_transcript_lang)))
)
)
return validation
def editor_saved(self, user, old_metadata, old_content): # lint-amnesty, pylint: disable=unused-argument
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
# There is an edge case when old_metadata and own_metadata are same and we are importing transcript from youtube
# then there is a syncing issue where html5_subs are not syncing with youtube sub, We can make sync better by
# checking if transcript is present for the video and if any html5_ids transcript is not present then trigger
# the manage_video_subtitles_save to create the missing transcript with particular html5_id.
if not metadata_was_changed_by_user and self.sub and hasattr(self, 'html5_sources'):
html5_ids = get_html5_ids(self.html5_sources)
for subs_id in html5_ids:
try:
Transcript.asset(self.location, subs_id)
except NotFoundError:
# If a transcript does not not exist with particular html5_id then there is no need to check other
# html5_ids because we have to create a new transcript with this missing html5_id by turning on
# metadata_was_changed_by_user flag.
metadata_was_changed_by_user = True
break
if metadata_was_changed_by_user:
self.edx_video_id = self.edx_video_id and self.edx_video_id.strip()
# We want to override `youtube_id_1_0` with val youtube profile in the first place when someone adds/edits
# an `edx_video_id` or its underlying YT val profile. Without this, override will only happen when a user
# saves the video second time. This is because of the syncing of basic and advanced video settings which
# also syncs val youtube id from basic tab's `Video Url` to advanced tab's `Youtube ID`.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, 'youtube')
if val_youtube_id and self.youtube_id_1_0 != val_youtube_id:
self.youtube_id_1_0 = val_youtube_id
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super().editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
# Default Timed Transcript a.k.a `sub` has been deprecated and end users shall
# not be able to modify it.
editable_fields.pop('sub')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES]
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['custom'] = True
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
# We need to send ajax requests to show transcript status
# whenever edx_video_id changes on frontend. Thats why we
# are changing type to `VideoID` so that a specific
# Backbonjs view can handle it.
editable_fields['edx_video_id']['type'] = 'VideoID'
# construct transcripts info and also find if `en` subs exist
transcripts_info = self.get_transcripts_info()
possible_sub_ids = [self.sub, self.youtube_id_1_0] + get_html5_ids(self.html5_sources)
for sub_id in possible_sub_ids:
try:
_, sub_id, _ = get_transcript(self, lang='en', output_format=Transcript.TXT)
transcripts_info['transcripts'] = dict(transcripts_info['transcripts'], en=sub_id)
break
except NotFoundError:
continue
editable_fields['transcripts']['value'] = transcripts_info['transcripts']
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(
self,
'studio_transcript',
'translation'
).rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def parse_xml_new_runtime(cls, node, runtime, keys):
"""
Implement the video block's special XML parsing requirements for the
new runtime only. For all other runtimes, use the existing XModule-style
methods like .from_xml().
"""
video_block = runtime.construct_xblock_from_class(cls, keys)
field_data = cls.parse_video_xml(node)
for key, val in field_data.items():
if key not in cls.fields: # lint-amnesty, pylint: disable=unsupported-membership-test
continue # parse_video_xml returns some old non-fields like 'source'
setattr(video_block, key, cls.fields[key].from_json(val)) # lint-amnesty, pylint: disable=unsubscriptable-object
# Don't use VAL in the new runtime:
video_block.edx_video_id = None
return video_block
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls.parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
# Update VAL with info extracted from `xml_object`
video.edx_video_id = video.import_video_info_into_val(
xml_object,
system.resources_fs,
getattr(id_generator, 'target_course_id', None)
)
return video
def definition_to_xml(self, resource_fs): # lint-amnesty, pylint: disable=too-many-statements
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', str(youtube_string))
xml.set('url_name', self.url_name)
attrs = [
('display_name', self.display_name),
('show_captions', json.dumps(self.show_captions)),
('start_time', self.start_time),
('end_time', self.end_time),
('sub', self.sub),
('download_track', json.dumps(self.download_track)),
('download_video', json.dumps(self.download_video))
]
for key, value in attrs:
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self): # lint-amnesty, pylint: disable=unsubscriptable-object, unsupported-membership-test
try:
xml.set(key, str(value))
except UnicodeDecodeError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
# If exception is UnicodeDecodeError set value using unicode 'utf-8' scheme.
log.info("Setting xml value using 'utf-8' scheme.")
xml.set(key, str(value, 'utf-8'))
except ValueError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
raise
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
transcripts = {}
if self.transcripts is not None:
transcripts.update(self.transcripts)
edx_video_id = clean_video_id(self.edx_video_id)
if edxval_api and edx_video_id:
try:
# Create static dir if not created earlier.
resource_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Backward compatible exports
# edxval exports new transcripts into the course OLX and returns a transcript
# files map so that it can also be rewritten in old transcript metadata fields
# (i.e. `self.transcripts`) on import and older open-releases (<= ginkgo),
# who do not have deprecated contentstore yet, can also import and use new-style
# transcripts into their openedX instances.
exported_metadata = edxval_api.export_to_xml(
video_id=edx_video_id,
resource_fs=resource_fs,
static_dir=EXPORT_IMPORT_STATIC_DIR,
course_id=str(self.runtime.course_id.for_branch(None))
)
# Update xml with edxval metadata
xml.append(exported_metadata['xml'])
# we don't need sub if english transcript
# is also in new transcripts.
new_transcripts = exported_metadata['transcripts']
transcripts.update(new_transcripts)
if new_transcripts.get('en'):
xml.set('sub', '')
# Update `transcripts` attribute in the xml
xml.set('transcripts', json.dumps(transcripts, sort_keys=True))
except edxval_api.ValVideoNotFoundError:
pass
# Sorting transcripts for easy testing of resulting xml
for transcript_language in sorted(transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', transcripts[transcript_language])
xml.append(ele)
# handle license specifically
self.add_license_to_xml(xml)
return xml
def create_youtube_url(self, youtube_id):
"""
Args:
youtube_id: The ID of the video to create a link for
Returns:
A full youtube url to the video whose ID is passed in
"""
if youtube_id:
return f'https://www.youtube.com/watch?v={youtube_id}'
else:
return ''
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super().get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
video_id = metadata_fields['edx_video_id']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
"""
Returns the fully-qualified YouTube URL for the given video identifier
"""
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
return self.create_youtube_url(video_id)
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or '
'.webm video file hosted elsewhere on the Internet.'),
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
source_url = self.create_youtube_url(youtube_id_1_0['value'])
# First try a lookup in VAL. If any video encoding is found given the video id then
# override the source_url with it.
if self.edx_video_id and edxval_api:
val_profiles = ['youtube', 'desktop_webm', 'desktop_mp4']
if HLSPlaybackEnabledFlag.feature_enabled(self.runtime.course_id.for_branch(None)):
val_profiles.append('hls')
# Get video encodings for val profiles.
val_video_encodings = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL's youtube source has greater priority over external youtube source.
if val_video_encodings.get('youtube'):
source_url = self.create_youtube_url(val_video_encodings['youtube'])
# If no youtube source is provided externally or in VAl, update source_url in order: hls > mp4 and webm
if not source_url:
if val_video_encodings.get('hls'):
source_url = val_video_encodings['hls']
elif val_video_encodings.get('desktop_mp4'):
source_url = val_video_encodings['desktop_mp4']
elif val_video_encodings.get('desktop_webm'):
source_url = val_video_encodings['desktop_webm']
# Only add if html5 sources do not already contain source_url.
if source_url and source_url not in video_url['value']:
video_url['value'].insert(0, source_url)
metadata = {
'display_name': display_name,
'video_url': video_url,
'edx_video_id': video_id
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
if isinstance(xml, str):
xml = etree.fromstring(xml)
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys: # lint-amnesty, pylint: disable=consider-using-get
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields: # lint-amnesty, pylint: disable=unsupported-membership-test
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value) # lint-amnesty, pylint: disable=unsubscriptable-object
course_id = getattr(id_generator, 'target_course_id', None)
# Update the handout location with current course_id
if 'handout' in list(field_data.keys()) and course_id:
handout_location = StaticContent.get_location_from_path(field_data['handout'])
if isinstance(handout_location, AssetLocator):
handout_new_location = StaticContent.compute_location(course_id, handout_location.path)
field_data['handout'] = StaticContent.serialize_asset_key_with_slash(handout_new_location)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def import_video_info_into_val(self, xml, resource_fs, course_id):
"""
Import parsed video info from `xml` into edxval.
Arguments:
xml (lxml object): xml representation of video to be imported.
resource_fs (OSFS): Import file system.
course_id (str): course id
"""
edx_video_id = clean_video_id(self.edx_video_id)
# Create video_asset is not already present.
video_asset_elem = xml.find('video_asset')
if video_asset_elem is None:
video_asset_elem = etree.Element('video_asset')
# This will be a dict containing the list of names of the external transcripts.
# Example:
# {
# 'en': ['The_Flash.srt', 'Harry_Potter.srt'],
# 'es': ['Green_Arrow.srt']
# }
external_transcripts = defaultdict(list)
# Add trancript from self.sub and self.youtube_id_1_0 fields.
external_transcripts['en'] = [
subs_filename(transcript, 'en')
for transcript in [self.sub, self.youtube_id_1_0] if transcript
]
for language_code, transcript in self.transcripts.items():
external_transcripts[language_code].append(transcript)
if edxval_api:
edx_video_id = edxval_api.import_from_xml(
video_asset_elem,
edx_video_id,
resource_fs,
EXPORT_IMPORT_STATIC_DIR,
external_transcripts,
course_id=course_id
)
return edx_video_id
def index_dictionary(self):
xblock_body = super().index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcript = get_transcript(self, lang=language, output_format=Transcript.TXT)[0].replace("\n", " ")
transcript_index_name = f"transcript_{language if language else self.transcript_language}"
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@classmethod
@request_cached(
request_cache_getter=lambda args, kwargs: args[1],
)
def get_cached_val_data_for_course(cls, request_cache, video_profile_names, course_id): # lint-amnesty, pylint: disable=unused-argument
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(str(course_id), video_profile_names)
def student_view_data(self, context=None):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
context = context or {}
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
all_sources = self.html5_sources or []
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", ["mobile_low", 'desktop_mp4', 'desktop_webm', 'mobile_high'])
if HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key) and 'hls' not in video_profile_names:
video_profile_names.append('hls')
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(
self.request_cache,
video_profile_names,
self.location.course_key,
)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
if enc_vid['profile'] in video_profile_names:
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
if all_sources:
encoded_videos["fallback"] = {
"url": all_sources[0],
"file_size": 0, # File size is unknown for fallback URLs
}
# Include youtube link if there is no encoding for mobile- ie only a fallback URL or no encodings at all
# We are including a fallback URL for older versions of the mobile app that don't handle Youtube urls
if self.youtube_id_1_0:
encoded_videos["youtube"] = {
"url": self.create_youtube_url(self.youtube_id_1_0),
"file_size": 0, # File size is not relevant for external link
}
available_translations = self.available_translations(self.get_transcripts_info())
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in available_translations
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
"all_sources": all_sources,
}
| """Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
from collections import OrderedDict, defaultdict
from operator import itemgetter
from django.conf import settings
from edx_django_utils.cache import RequestCache
from lxml import etree
from opaque_keys.edx.locator import AssetLocator
from web_fragments.fragment import Fragment
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from openedx.core.djangoapps.video_config.models import HLSPlaybackEnabledFlag, CourseYoutubeBlockedFlag
from openedx.core.djangoapps.video_pipeline.config.waffle import DEPRECATE_YOUTUBE, waffle_flags
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.license import LicenseMixin
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingMixin, TabsEditingMixin
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.raw_module import EmptyDataRawMixin
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.video_module import manage_video_subtitles_save
from xmodule.x_module import (
PUBLIC_VIEW, STUDENT_VIEW,
HTMLSnippet, ResourceTemplates, shim_xmodule_js,
XModuleMixin, XModuleToXBlockMixin, XModuleDescriptorToXBlockMixin,
)
from xmodule.xml_module import XmlMixin, deserialize_field, is_pointer_tag, name_to_pathname
from .bumper_utils import bumperize
from .transcripts_utils import (
Transcript,
VideoTranscriptsMixin,
clean_video_id,
get_html5_ids,
get_transcript,
subs_filename
)
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from .video_utils import create_youtube_string, format_xml_exception_message, get_poster, rewrite_video_url
from .video_xfields import VideoFields
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoBlock should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoBlock out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoBlock should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoBlock tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from lms.djangoapps.branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
EXPORT_IMPORT_COURSE_DIR = 'course'
EXPORT_IMPORT_STATIC_DIR = 'static'
@XBlock.wants('settings', 'completion', 'i18n', 'request_cache')
class VideoBlock(
VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers, VideoStudentViewHandlers,
TabsEditingMixin, EmptyDataRawMixin, XmlMixin, EditingMixin,
XModuleDescriptorToXBlockMixin, XModuleToXBlockMixin, HTMLSnippet, ResourceTemplates, XModuleMixin,
LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
video_time = 0
icon_class = 'video'
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
uses_xmodule_styles_setup = True
requires_per_student_anonymous_id = True
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(list(languages.items()), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
@property
def youtube_deprecated(self):
"""
Return True if youtube is deprecated and hls as primary playback is enabled else False
"""
# Return False if `hls` playback feature is disabled.
if not HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key):
return False
# check if youtube has been deprecated and hls as primary playback
# is enabled for this course
return waffle_flags()[DEPRECATE_YOUTUBE].is_enabled(self.location.course_key)
def youtube_disabled_for_course(self): # lint-amnesty, pylint: disable=missing-function-docstring
if not self.location.context_key.is_course:
return False # Only courses have this flag
request_cache = RequestCache('youtube_disabled_for_course')
cache_response = request_cache.get_cached_response(self.location.context_key)
if cache_response.is_found:
return cache_response.value
youtube_is_disabled = CourseYoutubeBlockedFlag.feature_enabled(self.location.course_key)
request_cache.set(self.location.context_key, youtube_is_disabled)
return youtube_is_disabled
def prioritize_hls(self, youtube_streams, html5_sources):
"""
Decide whether hls can be prioritized as primary playback or not.
If both the youtube and hls sources are present then make decision on flag
If only either youtube or hls is present then play whichever is present
"""
yt_present = bool(youtube_streams.strip()) if youtube_streams else False
hls_present = any(source for source in html5_sources)
if yt_present and hls_present:
return self.youtube_deprecated
return False
def student_view(self, _context):
"""
Return the student view.
"""
fragment = Fragment(self.get_html())
add_webpack_to_fragment(fragment, 'VideoBlockPreview')
shim_xmodule_js(fragment, 'Video')
return fragment
def author_view(self, context):
"""
Renders the Studio preview view.
"""
return self.student_view(context)
def studio_view(self, _context):
"""
Return the studio view.
"""
fragment = Fragment(
self.system.render_template(self.mako_template, self.get_context())
)
add_webpack_to_fragment(fragment, 'VideoBlockStudio')
shim_xmodule_js(fragment, 'TabsEditingDescriptor')
return fragment
def public_view(self, context):
"""
Returns a fragment that contains the html for the public view
"""
if getattr(self.runtime, 'suppports_state_for_anonymous_users', False):
# The new runtime can support anonymous users as fully as regular users:
return self.student_view(context)
fragment = Fragment(self.get_html(view=PUBLIC_VIEW))
add_webpack_to_fragment(fragment, 'VideoBlockPreview')
shim_xmodule_js(fragment, 'Video')
return fragment
def get_html(self, view=STUDENT_VIEW): # lint-amnesty, pylint: disable=arguments-differ, too-many-statements
track_status = (self.download_track and self.track)
transcript_download_format = self.transcript_download_format if not track_status else None
sources = [source for source in self.html5_sources if source]
download_video_link = None
branding_info = None
youtube_streams = ""
video_duration = None
video_status = None
# Determine if there is an alternative source for this video
# based on user locale. This exists to support cases where
# we leverage a geography specific CDN, like China.
default_cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get('default')
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location, default_cdn_url)
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api: # lint-amnesty, pylint: disable=too-many-nested-blocks
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
if HLSPlaybackEnabledFlag.feature_enabled(self.course_id):
val_profiles.append('hls')
# strip edx_video_id to prevent ValVideoNotFoundError error if unwanted spaces are there. TNL-5769
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id.strip(), val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube non-hls url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
# don't include hls urls for download
if self.download_video and not url.endswith('.m3u8'):
# function returns None when the url cannot be re-written
rewritten_link = rewrite_video_url(cdn_url, url)
if rewritten_link:
download_video_link = rewritten_link
else:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
# get video duration
video_data = edxval_api.get_video_info(self.edx_video_id.strip())
video_duration = video_data.get('duration')
video_status = video_data.get('status')
except (edxval_api.ValInternalError, edxval_api.ValVideoNotFoundError):
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoBlock.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
if self.edx_video_id and edxval_api and video_status != 'external':
for index, source_url in enumerate(sources):
new_url = rewrite_video_url(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoBlock.
if not download_video_link and self.download_video:
if self.html5_sources:
download_video_link = self.html5_sources[0]
# don't give the option to download HLS video urls
if download_video_link and download_video_link.endswith('.m3u8'):
download_video_link = None
transcripts = self.get_transcripts_info()
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(transcripts=transcripts)
cdn_eval = False
cdn_exp_group = None
if self.youtube_disabled_for_course():
self.youtube_streams = '' # lint-amnesty, pylint: disable=attribute-defined-outside-init
else:
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings') # lint-amnesty, pylint: disable=unused-variable
poster = None
if edxval_api and self.edx_video_id:
poster = edxval_api.get_course_video_image_url(
course_id=self.runtime.course_id.for_branch(None),
edx_video_id=self.edx_video_id.strip()
)
completion_service = self.runtime.service(self, 'completion')
if completion_service:
completion_enabled = completion_service.completion_tracking_enabled()
else:
completion_enabled = False
# This is the setting that controls whether the autoadvance button will be visible, not whether the
# video will autoadvance or not.
# For autoadvance controls to be shown, both the feature flag and the course setting must be true.
# This allows to enable the feature for certain courses only.
autoadvance_enabled = settings.FEATURES.get('ENABLE_AUTOADVANCE_VIDEOS', False) and \
getattr(self, 'video_auto_advance', False)
# This is the current status of auto-advance (not the control visibility).
# But when controls aren't visible we force it to off. The student might have once set the preference to
# true, but now staff or admin have hidden the autoadvance button and the student won't be able to disable
# it anymore; therefore we force-disable it in this case (when controls aren't visible).
autoadvance_this_video = self.auto_advance and autoadvance_enabled
metadata = {
'autoAdvance': autoadvance_this_video,
# For now, the option "data-autohide-html5" is hard coded. This option
# either enables or disables autohiding of controls and captions on mouse
# inactivity. If set to true, controls and captions will autohide for
# HTML5 sources (non-YouTube) after a period of mouse inactivity over the
# whole video. When the mouse moves (or a key is pressed while any part of
# the video player is focused), the captions and controls will be shown
# once again.
#
# There is no option in the "Advanced Editor" to set this option. However,
# this option will have an effect if changed to "True". The code on
# front-end exists.
'autohideHtml5': False,
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'completionEnabled': completion_enabled,
'completionPercentage': settings.COMPLETION_VIDEO_COMPLETE_PERCENTAGE,
'duration': video_duration,
'end': self.end_time.total_seconds(), # pylint: disable=no-member
'generalSpeed': self.global_speed,
'lmsRootURL': settings.LMS_ROOT_URL,
'poster': poster,
'prioritizeHls': self.prioritize_hls(self.youtube_streams, sources),
'publishCompletionUrl': self.runtime.handler_url(self, 'publish_completion', '').rstrip('?'),
# This is the server's guess at whether youtube is available for
# this user, based on what was recorded the last time we saw the
# user, and defaulting to True.
'recordedYoutubeIsAvailable': self.youtube_is_available,
'savedVideoPosition': self.saved_video_position.total_seconds(), # pylint: disable=no-member
'saveStateEnabled': view != PUBLIC_VIEW,
'saveStateUrl': self.ajax_url + '/save_user_state',
'showCaptions': json.dumps(self.show_captions),
'sources': sources,
'speed': self.speed,
'start': self.start_time.total_seconds(), # pylint: disable=no-member
'streams': self.youtube_streams,
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'ytApiUrl': settings.YOUTUBE['API'],
'ytMetadataEndpoint': (
# In the new runtime, get YouTube metadata via a handler. The handler supports anonymous users and
# can work in sandboxed iframes. In the old runtime, the JS will call the LMS's yt_video_metadata
# API endpoint directly (not an XBlock handler).
self.runtime.handler_url(self, 'yt_video_metadata')
if getattr(self.runtime, 'suppports_state_for_anonymous_users', False) else ''
),
'ytTestTimeout': settings.YOUTUBE['TEST_TIMEOUT'],
}
bumperize(self)
context = {
'autoadvance_enabled': autoadvance_enabled,
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.fields['transcript_download_format'].values, # lint-amnesty, pylint: disable=unsubscriptable-object
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
def validate(self):
"""
Validates the state of this Video XBlock instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super().validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
no_transcript_lang = []
for lang_code, transcript in self.transcripts.items():
if not transcript:
no_transcript_lang.append([label for code, label in settings.ALL_LANGUAGES if code == lang_code][0])
if no_transcript_lang:
ungettext = self.runtime.service(self, "i18n").ungettext
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
ungettext(
'There is no transcript file associated with the {lang} language.',
'There are no transcript files associated with the {lang} languages.',
len(no_transcript_lang)
).format(lang=', '.join(sorted(no_transcript_lang)))
)
)
return validation
def editor_saved(self, user, old_metadata, old_content): # lint-amnesty, pylint: disable=unused-argument
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
# There is an edge case when old_metadata and own_metadata are same and we are importing transcript from youtube
# then there is a syncing issue where html5_subs are not syncing with youtube sub, We can make sync better by
# checking if transcript is present for the video and if any html5_ids transcript is not present then trigger
# the manage_video_subtitles_save to create the missing transcript with particular html5_id.
if not metadata_was_changed_by_user and self.sub and hasattr(self, 'html5_sources'):
html5_ids = get_html5_ids(self.html5_sources)
for subs_id in html5_ids:
try:
Transcript.asset(self.location, subs_id)
except NotFoundError:
# If a transcript does not not exist with particular html5_id then there is no need to check other
# html5_ids because we have to create a new transcript with this missing html5_id by turning on
# metadata_was_changed_by_user flag.
metadata_was_changed_by_user = True
break
if metadata_was_changed_by_user:
self.edx_video_id = self.edx_video_id and self.edx_video_id.strip()
# We want to override `youtube_id_1_0` with val youtube profile in the first place when someone adds/edits
# an `edx_video_id` or its underlying YT val profile. Without this, override will only happen when a user
# saves the video second time. This is because of the syncing of basic and advanced video settings which
# also syncs val youtube id from basic tab's `Video Url` to advanced tab's `Youtube ID`.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, 'youtube')
if val_youtube_id and self.youtube_id_1_0 != val_youtube_id:
self.youtube_id_1_0 = val_youtube_id
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super().editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
# Default Timed Transcript a.k.a `sub` has been deprecated and end users shall
# not be able to modify it.
editable_fields.pop('sub')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES]
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['custom'] = True
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
# We need to send ajax requests to show transcript status
# whenever edx_video_id changes on frontend. Thats why we
# are changing type to `VideoID` so that a specific
# Backbonjs view can handle it.
editable_fields['edx_video_id']['type'] = 'VideoID'
# construct transcripts info and also find if `en` subs exist
transcripts_info = self.get_transcripts_info()
possible_sub_ids = [self.sub, self.youtube_id_1_0] + get_html5_ids(self.html5_sources)
for sub_id in possible_sub_ids:
try:
_, sub_id, _ = get_transcript(self, lang='en', output_format=Transcript.TXT)
transcripts_info['transcripts'] = dict(transcripts_info['transcripts'], en=sub_id)
break
except NotFoundError:
continue
editable_fields['transcripts']['value'] = transcripts_info['transcripts']
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(
self,
'studio_transcript',
'translation'
).rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def parse_xml_new_runtime(cls, node, runtime, keys):
"""
Implement the video block's special XML parsing requirements for the
new runtime only. For all other runtimes, use the existing XModule-style
methods like .from_xml().
"""
video_block = runtime.construct_xblock_from_class(cls, keys)
field_data = cls.parse_video_xml(node)
for key, val in field_data.items():
if key not in cls.fields: # lint-amnesty, pylint: disable=unsupported-membership-test
continue # parse_video_xml returns some old non-fields like 'source'
setattr(video_block, key, cls.fields[key].from_json(val)) # lint-amnesty, pylint: disable=unsubscriptable-object
# Don't use VAL in the new runtime:
video_block.edx_video_id = None
return video_block
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls.parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
# Update VAL with info extracted from `xml_object`
video.edx_video_id = video.import_video_info_into_val(
xml_object,
system.resources_fs,
getattr(id_generator, 'target_course_id', None)
)
return video
def definition_to_xml(self, resource_fs): # lint-amnesty, pylint: disable=too-many-statements
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', str(youtube_string))
xml.set('url_name', self.url_name)
attrs = [
('display_name', self.display_name),
('show_captions', json.dumps(self.show_captions)),
('start_time', self.start_time),
('end_time', self.end_time),
('sub', self.sub),
('download_track', json.dumps(self.download_track)),
('download_video', json.dumps(self.download_video))
]
for key, value in attrs:
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self): # lint-amnesty, pylint: disable=unsubscriptable-object, unsupported-membership-test
try:
xml.set(key, str(value))
except UnicodeDecodeError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
# If exception is UnicodeDecodeError set value using unicode 'utf-8' scheme.
log.info("Setting xml value using 'utf-8' scheme.")
xml.set(key, str(value, 'utf-8'))
except ValueError:
exception_message = format_xml_exception_message(self.location, key, value)
log.exception(exception_message)
raise
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
transcripts = {}
if self.transcripts is not None:
transcripts.update(self.transcripts)
edx_video_id = clean_video_id(self.edx_video_id)
if edxval_api and edx_video_id:
try:
# Create static dir if not created earlier.
resource_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Backward compatible exports
# edxval exports new transcripts into the course OLX and returns a transcript
# files map so that it can also be rewritten in old transcript metadata fields
# (i.e. `self.transcripts`) on import and older open-releases (<= ginkgo),
# who do not have deprecated contentstore yet, can also import and use new-style
# transcripts into their openedX instances.
exported_metadata = edxval_api.export_to_xml(
video_id=edx_video_id,
resource_fs=resource_fs,
static_dir=EXPORT_IMPORT_STATIC_DIR,
course_id=str(self.runtime.course_id.for_branch(None))
)
# Update xml with edxval metadata
xml.append(exported_metadata['xml'])
# we don't need sub if english transcript
# is also in new transcripts.
new_transcripts = exported_metadata['transcripts']
transcripts.update(new_transcripts)
if new_transcripts.get('en'):
xml.set('sub', '')
# Update `transcripts` attribute in the xml
xml.set('transcripts', json.dumps(transcripts, sort_keys=True))
except edxval_api.ValVideoNotFoundError:
pass
# Sorting transcripts for easy testing of resulting xml
for transcript_language in sorted(transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', transcripts[transcript_language])
xml.append(ele)
# handle license specifically
self.add_license_to_xml(xml)
return xml
def create_youtube_url(self, youtube_id):
"""
Args:
youtube_id: The ID of the video to create a link for
Returns:
A full youtube url to the video whose ID is passed in
"""
if youtube_id:
return f'https://www.youtube.com/watch?v={youtube_id}'
else:
return ''
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super().get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
video_id = metadata_fields['edx_video_id']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
"""
Returns the fully-qualified YouTube URL for the given video identifier
"""
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
return self.create_youtube_url(video_id)
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or '
'.webm video file hosted elsewhere on the Internet.'),
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
source_url = self.create_youtube_url(youtube_id_1_0['value'])
# First try a lookup in VAL. If any video encoding is found given the video id then
# override the source_url with it.
if self.edx_video_id and edxval_api:
val_profiles = ['youtube', 'desktop_webm', 'desktop_mp4']
if HLSPlaybackEnabledFlag.feature_enabled(self.runtime.course_id.for_branch(None)):
val_profiles.append('hls')
# Get video encodings for val profiles.
val_video_encodings = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL's youtube source has greater priority over external youtube source.
if val_video_encodings.get('youtube'):
source_url = self.create_youtube_url(val_video_encodings['youtube'])
# If no youtube source is provided externally or in VAl, update source_url in order: hls > mp4 and webm
if not source_url:
if val_video_encodings.get('hls'):
source_url = val_video_encodings['hls']
elif val_video_encodings.get('desktop_mp4'):
source_url = val_video_encodings['desktop_mp4']
elif val_video_encodings.get('desktop_webm'):
source_url = val_video_encodings['desktop_webm']
# Only add if html5 sources do not already contain source_url.
if source_url and source_url not in video_url['value']:
video_url['value'].insert(0, source_url)
metadata = {
'display_name': display_name,
'video_url': video_url,
'edx_video_id': video_id
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
if isinstance(xml, str):
xml = etree.fromstring(xml)
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys: # lint-amnesty, pylint: disable=consider-using-get
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields: # lint-amnesty, pylint: disable=unsupported-membership-test
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value) # lint-amnesty, pylint: disable=unsubscriptable-object
course_id = getattr(id_generator, 'target_course_id', None)
# Update the handout location with current course_id
if 'handout' in list(field_data.keys()) and course_id:
handout_location = StaticContent.get_location_from_path(field_data['handout'])
if isinstance(handout_location, AssetLocator):
handout_new_location = StaticContent.compute_location(course_id, handout_location.path)
field_data['handout'] = StaticContent.serialize_asset_key_with_slash(handout_new_location)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def import_video_info_into_val(self, xml, resource_fs, course_id):
"""
Import parsed video info from `xml` into edxval.
Arguments:
xml (lxml object): xml representation of video to be imported.
resource_fs (OSFS): Import file system.
course_id (str): course id
"""
edx_video_id = clean_video_id(self.edx_video_id)
# Create video_asset is not already present.
video_asset_elem = xml.find('video_asset')
if video_asset_elem is None:
video_asset_elem = etree.Element('video_asset')
# This will be a dict containing the list of names of the external transcripts.
# Example:
# {
# 'en': ['The_Flash.srt', 'Harry_Potter.srt'],
# 'es': ['Green_Arrow.srt']
# }
external_transcripts = defaultdict(list)
# Add trancript from self.sub and self.youtube_id_1_0 fields.
external_transcripts['en'] = [
subs_filename(transcript, 'en')
for transcript in [self.sub, self.youtube_id_1_0] if transcript
]
for language_code, transcript in self.transcripts.items():
external_transcripts[language_code].append(transcript)
if edxval_api:
edx_video_id = edxval_api.import_from_xml(
video_asset_elem,
edx_video_id,
resource_fs,
EXPORT_IMPORT_STATIC_DIR,
external_transcripts,
course_id=course_id
)
return edx_video_id
def index_dictionary(self):
xblock_body = super().index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcript = get_transcript(self, lang=language, output_format=Transcript.TXT)[0].replace("\n", " ")
transcript_index_name = f"transcript_{language if language else self.transcript_language}"
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@classmethod
@request_cached(
request_cache_getter=lambda args, kwargs: args[1],
)
def get_cached_val_data_for_course(cls, request_cache, video_profile_names, course_id): # lint-amnesty, pylint: disable=unused-argument
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(str(course_id), video_profile_names)
def student_view_data(self, context=None):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
context = context or {}
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
all_sources = self.html5_sources or []
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", ["mobile_low", 'desktop_mp4', 'desktop_webm', 'mobile_high'])
if HLSPlaybackEnabledFlag.feature_enabled(self.location.course_key) and 'hls' not in video_profile_names:
video_profile_names.append('hls')
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(
self.request_cache,
video_profile_names,
self.location.course_key,
)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
if enc_vid['profile'] in video_profile_names:
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
if all_sources:
encoded_videos["fallback"] = {
"url": all_sources[0],
"file_size": 0, # File size is unknown for fallback URLs
}
# Include youtube link if there is no encoding for mobile- ie only a fallback URL or no encodings at all
# We are including a fallback URL for older versions of the mobile app that don't handle Youtube urls
if self.youtube_id_1_0:
encoded_videos["youtube"] = {
"url": self.create_youtube_url(self.youtube_id_1_0),
"file_size": 0, # File size is not relevant for external link
}
available_translations = self.available_translations(self.get_transcripts_info())
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in available_translations
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
"all_sources": all_sources,
}
| en | 0.825469 | Video is ungraded Xmodule for support video content. It's new improved video module, which support additional feature: - Can play non-YouTube video sources via in-browser HTML5 video player. - YouTube defaults to HTML5 mode from the start. - Speed changes in both YouTube and non-YouTube videos happen via in-browser HTML5 video method (when in HTML5 mode). - Navigational subtitles can be disabled altogether via an attribute in XML. Examples of html5 videos for manual testing: https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4 https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv # The following import/except block for edxval is temporary measure until # edxval is a proper XBlock Runtime Service. # # Here's the deal: the VideoBlock should be able to take advantage of edx-val # (https://github.com/edx/edx-val) to figure out what URL to give for video # resources that have an edx_video_id specified. edx-val is a Django app, and # including it causes tests to fail because we run common/lib tests standalone # without Django dependencies. The alternatives seem to be: # # 1. Move VideoBlock out of edx-platform. # 2. Accept the Django dependency in common/lib. # 3. Try to import, catch the exception on failure, and check for the existence # of edxval_api before invoking it in the code. # 4. Make edxval an XBlock Runtime Service # # (1) is a longer term goal. VideoBlock should be made into an XBlock and # extracted from edx-platform entirely. But that's expensive to do because of # the various dependencies (like templates). Need to sort this out. # (2) is explicitly discouraged. # (3) is what we're doing today. The code is still functional when called within # the context of the LMS, but does not cause failure on import when running # standalone tests. Most VideoBlock tests tend to be in the LMS anyway, # probably for historical reasons, so we're not making things notably worse. # (4) is one of the next items on the backlog for edxval, and should get rid # of this particular import silliness. It's just that I haven't made one before, # and I was worried about trying it with my deadline constraints. # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file XML source example: <video show_captions="true" youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg" url_name="lecture_21_3" display_name="S19V3: Vacancies" > <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/> <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/> <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/> </video> Return transcript information necessary for rendering the XModule student view. This is more or less a direct extraction from `get_html`. Args: transcripts (dict): A dict with all transcripts and a sub. Returns: Tuple of (track_url, transcript_language, sorted_languages) track_url -> subtitle download url transcript_language -> default transcript language sorted_languages -> dictionary of available transcript languages # OrderedDict for easy testing of rendered context in tests Return True if youtube is deprecated and hls as primary playback is enabled else False # Return False if `hls` playback feature is disabled. # check if youtube has been deprecated and hls as primary playback # is enabled for this course # lint-amnesty, pylint: disable=missing-function-docstring # Only courses have this flag Decide whether hls can be prioritized as primary playback or not. If both the youtube and hls sources are present then make decision on flag If only either youtube or hls is present then play whichever is present Return the student view. Renders the Studio preview view. Return the studio view. Returns a fragment that contains the html for the public view # The new runtime can support anonymous users as fully as regular users: # lint-amnesty, pylint: disable=arguments-differ, too-many-statements # Determine if there is an alternative source for this video # based on user locale. This exists to support cases where # we leverage a geography specific CDN, like China. # If we have an edx_video_id, we prefer its values over what we store # internally for download links (source, html5_sources) and the youtube # stream. # lint-amnesty, pylint: disable=too-many-nested-blocks # strip edx_video_id to prevent ValVideoNotFoundError error if unwanted spaces are there. TNL-5769 # VAL will always give us the keys for the profiles we asked for, but # if it doesn't have an encoded video entry for that Video + Profile, the # value will map to `None` # add the non-youtube urls to the list of alternative sources # use the last non-None non-youtube non-hls url as the link to download the video # don't include hls urls for download # function returns None when the url cannot be re-written # set the youtube url # get video duration # VAL raises this exception if it can't find data for the edx video ID. This can happen if the # course data is ported to a machine that does not have the VAL data. So for now, pass on this # exception and fallback to whatever we find in the VideoBlock. # If the user comes from China use China CDN for html5 videos. # 'CN' is China ISO 3166-1 country code. # Video caching is disabled for Studio. User_location is always None in Studio. # CountryMiddleware disabled for Studio. # If there was no edx_video_id, or if there was no download specified # for it, we fall back on whatever we find in the VideoBlock. # don't give the option to download HLS video urls # lint-amnesty, pylint: disable=attribute-defined-outside-init # pylint: disable=W0201 # lint-amnesty, pylint: disable=unused-variable # This is the setting that controls whether the autoadvance button will be visible, not whether the # video will autoadvance or not. # For autoadvance controls to be shown, both the feature flag and the course setting must be true. # This allows to enable the feature for certain courses only. # This is the current status of auto-advance (not the control visibility). # But when controls aren't visible we force it to off. The student might have once set the preference to # true, but now staff or admin have hidden the autoadvance button and the student won't be able to disable # it anymore; therefore we force-disable it in this case (when controls aren't visible). # For now, the option "data-autohide-html5" is hard coded. This option # either enables or disables autohiding of controls and captions on mouse # inactivity. If set to true, controls and captions will autohide for # HTML5 sources (non-YouTube) after a period of mouse inactivity over the # whole video. When the mouse moves (or a key is pressed while any part of # the video player is focused), the captions and controls will be shown # once again. # # There is no option in the "Advanced Editor" to set this option. However, # this option will have an effect if changed to "True". The code on # front-end exists. # This won't work when we move to data that # isn't on the filesystem # pylint: disable=no-member # This is the server's guess at whether youtube is available for # this user, based on what was recorded the last time we saw the # user, and defaulting to True. # pylint: disable=no-member # pylint: disable=no-member # In the new runtime, get YouTube metadata via a handler. The handler supports anonymous users and # can work in sandboxed iframes. In the old runtime, the JS will call the LMS's yt_video_metadata # API endpoint directly (not an XBlock handler). # pylint: disable=E1101 # lint-amnesty, pylint: disable=unsubscriptable-object Validates the state of this Video XBlock instance. This is the override of the general XBlock method, and it will also ask its superclass to validate. # lint-amnesty, pylint: disable=unused-argument Used to update video values during `self`:save method from CMS. old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user. old_content, same as `old_metadata` but for scope=content. Due to nature of code flow in item.py::_save_item, before current function is called, fields of `self` instance have been already updated, but not yet saved. To obtain values, which were changed by user input, one should compare own_metadata(self) and old_medatada. Video player has two tabs, and due to nature of sync between tabs, metadata from Basic tab is always sent when video player is edited and saved first time, for example: {'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []}, that's why these fields will always present in old_metadata after first save. This should be fixed. At consequent save requests html5_sources are always sent too, disregard of their change by user. That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item). This should be fixed too. # There is an edge case when old_metadata and own_metadata are same and we are importing transcript from youtube # then there is a syncing issue where html5_subs are not syncing with youtube sub, We can make sync better by # checking if transcript is present for the video and if any html5_ids transcript is not present then trigger # the manage_video_subtitles_save to create the missing transcript with particular html5_id. # If a transcript does not not exist with particular html5_id then there is no need to check other # html5_ids because we have to create a new transcript with this missing html5_id by turning on # metadata_was_changed_by_user flag. # We want to override `youtube_id_1_0` with val youtube profile in the first place when someone adds/edits # an `edx_video_id` or its underlying YT val profile. Without this, override will only happen when a user # saves the video second time. This is because of the syncing of basic and advanced video settings which # also syncs val youtube id from basic tab's `Video Url` to advanced tab's `Youtube ID`. Save module with updated metadata to database." # Default Timed Transcript a.k.a `sub` has been deprecated and end users shall # not be able to modify it. # We need to send ajax requests to show transcript status # whenever edx_video_id changes on frontend. Thats why we # are changing type to `VideoID` so that a specific # Backbonjs view can handle it. # construct transcripts info and also find if `en` subs exist Implement the video block's special XML parsing requirements for the new runtime only. For all other runtimes, use the existing XModule-style methods like .from_xml(). # lint-amnesty, pylint: disable=unsupported-membership-test # parse_video_xml returns some old non-fields like 'source' # lint-amnesty, pylint: disable=unsubscriptable-object # Don't use VAL in the new runtime: Creates an instance of this descriptor from the supplied xml_data. This may be overridden by subclasses xml_data: A string of xml that will be translated into data and children for this module system: A DescriptorSystem for interacting with external resources id_generator is used to generate course-specific urls and identifiers # We're loading a descriptor, so student_id is meaningless # We also don't have separate notions of definition and usage ids yet, # so we use the location for both # Update VAL with info extracted from `xml_object` # lint-amnesty, pylint: disable=too-many-statements Returns an xml string representing this module. # Mild workaround to ensure that tests pass -- if a field # is set to its default value, we don't need to write it out. # Mild workaround to ensure that tests pass -- if a field # is set to its default value, we don't write it out. # lint-amnesty, pylint: disable=unsubscriptable-object, unsupported-membership-test # If exception is UnicodeDecodeError set value using unicode 'utf-8' scheme. # Create static dir if not created earlier. # Backward compatible exports # edxval exports new transcripts into the course OLX and returns a transcript # files map so that it can also be rewritten in old transcript metadata fields # (i.e. `self.transcripts`) on import and older open-releases (<= ginkgo), # who do not have deprecated contentstore yet, can also import and use new-style # transcripts into their openedX instances. # Update xml with edxval metadata # we don't need sub if english transcript # is also in new transcripts. # Update `transcripts` attribute in the xml # Sorting transcripts for easy testing of resulting xml # handle license specifically Args: youtube_id: The ID of the video to create a link for Returns: A full youtube url to the video whose ID is passed in Extend context by data for transcript basic tab. Returns the fully-qualified YouTube URL for the given video identifier # First try a lookup in VAL. If we have a YouTube entry there, it overrides the # one passed in. # First try a lookup in VAL. If any video encoding is found given the video id then # override the source_url with it. # Get video encodings for val profiles. # VAL's youtube source has greater priority over external youtube source. # If no youtube source is provided externally or in VAl, update source_url in order: hls > mp4 and webm # Only add if html5 sources do not already contain source_url. Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD" into a dictionary. Necessary for backwards compatibility with XML-based courses. # normalize speed # Handle the fact that youtube IDs got double-quoted for a period of time. # Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String-- # it doesn't matter what the actual speed is for the purposes of deserializing. Parse video fields out of xml_data. The fields are set if they are present in the XML. Arguments: id_generator is used to generate course-specific urls and identifiers # Convert between key types for certain attributes -- # necessary for backwards compatibility. # example: 'start_time': cls._example_convert_start_time # Convert between key names for certain attributes -- # necessary for backwards compatibility. # lint-amnesty, pylint: disable=consider-using-get # should have made these youtube_id_1_00 for # cleanliness, but hindsight doesn't need glasses # If the user has specified html5 sources, make sure we don't use the default video # lint-amnesty, pylint: disable=unsupported-membership-test # We export values with json.dumps (well, except for Strings, but # for about a month we did it for Strings also). # lint-amnesty, pylint: disable=unsubscriptable-object # Update the handout location with current course_id # For backwards compatibility: Add `source` if XML doesn't have `download_video` # attribute. # For backwards compatibility: if XML doesn't have `download_track` attribute, # it means that it is an old format. So, if `track` has some value, # `download_track` needs to have value `True`. # load license if it exists Import parsed video info from `xml` into edxval. Arguments: xml (lxml object): xml representation of video to be imported. resource_fs (OSFS): Import file system. course_id (str): course id # Create video_asset is not already present. # This will be a dict containing the list of names of the external transcripts. # Example: # { # 'en': ['The_Flash.srt', 'Harry_Potter.srt'], # 'es': ['Green_Arrow.srt'] # } # Add trancript from self.sub and self.youtube_id_1_0 fields. Find video transcript - if not found, don't update index # Check to see if there are transcripts in other languages besides default transcript Returns the request_cache from the runtime. # lint-amnesty, pylint: disable=unused-argument Returns the VAL data for the requested video profiles for the given course. Returns a JSON representation of the student_view of this XModule. The contract of the JSON content is between the caller and the particular XModule. # If the "only_on_web" field is set on this video, do not return the rest of the video's data # in this json view, since this video is to be accessed only through its web view." # Check in VAL data first if edx_video_id exists # get and cache bulk VAL data for course # Get the encoded videos if data from VAL is found # If information for this edx_video_id is not found in the bulk course data, make a # separate request for this individual edx_video_id, unless cache misses are disabled. # This is useful/required for videos that don't have a course designated, such as the introductory video # that is shared across many courses. However, this results in a separate database request so watch # out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter # to False to disable this fall back. # Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here. # Fall back to other video URLs in the video module if not found in VAL # File size is unknown for fallback URLs # Include youtube link if there is no encoding for mobile- ie only a fallback URL or no encodings at all # We are including a fallback URL for older versions of the mobile app that don't handle Youtube urls # File size is not relevant for external link | 1.878158 | 2 |
python/py3study/async-mix-stream-websocket/udp-websocket-server.py | sillyemperor/langstudy | 0 | 6620460 | """
演示如何结合UDP服务和websocket
"""
import asyncio
import aiohttp_jinja2
from aiohttp import web
import jinja2
import sys
event = asyncio.Event()
message = None
class MyServerUdpEchoProtocol:
def connection_made(self, transport):
print('start', transport)
self.transport = transport
def datagram_received(self, data, addr):
global message
print('Data received:', data, addr)
message = data.decode()
event.set()
def error_received(self, exc):
print('Error received:', exc)
def connection_lost(self, exc):
print('stop', exc)
async def index(request):
global event
ws_current = web.WebSocketResponse()
ws_ready = ws_current.can_prepare(request)
if not ws_ready.ok:
return aiohttp_jinja2.render_template('index.html', request, {})
await ws_current.prepare(request)
while True:
await event.wait()
await ws_current.send_str(message)
event = asyncio.Event()
async def init_app():
app = web.Application()
app['websockets'] = {}
app.on_shutdown.append(shutdown)
aiohttp_jinja2.setup(
app, loader=jinja2.PackageLoader(__loader__.name, 'templates'))
app.router.add_get('/', index)
loop = asyncio.get_event_loop()
asyncio.Task(loop.create_datagram_endpoint(
MyServerUdpEchoProtocol, local_addr=('0.0.0.0', 6066)))
return app
async def shutdown(app):
for ws in app['websockets'].values():
await ws.close()
app['websockets'].clear()
def main():
app = init_app()
web.run_app(app)
if __name__ == '__main__':
main() | """
演示如何结合UDP服务和websocket
"""
import asyncio
import aiohttp_jinja2
from aiohttp import web
import jinja2
import sys
event = asyncio.Event()
message = None
class MyServerUdpEchoProtocol:
def connection_made(self, transport):
print('start', transport)
self.transport = transport
def datagram_received(self, data, addr):
global message
print('Data received:', data, addr)
message = data.decode()
event.set()
def error_received(self, exc):
print('Error received:', exc)
def connection_lost(self, exc):
print('stop', exc)
async def index(request):
global event
ws_current = web.WebSocketResponse()
ws_ready = ws_current.can_prepare(request)
if not ws_ready.ok:
return aiohttp_jinja2.render_template('index.html', request, {})
await ws_current.prepare(request)
while True:
await event.wait()
await ws_current.send_str(message)
event = asyncio.Event()
async def init_app():
app = web.Application()
app['websockets'] = {}
app.on_shutdown.append(shutdown)
aiohttp_jinja2.setup(
app, loader=jinja2.PackageLoader(__loader__.name, 'templates'))
app.router.add_get('/', index)
loop = asyncio.get_event_loop()
asyncio.Task(loop.create_datagram_endpoint(
MyServerUdpEchoProtocol, local_addr=('0.0.0.0', 6066)))
return app
async def shutdown(app):
for ws in app['websockets'].values():
await ws.close()
app['websockets'].clear()
def main():
app = init_app()
web.run_app(app)
if __name__ == '__main__':
main() | ja | 0.357761 | 演示如何结合UDP服务和websocket | 2.844052 | 3 |
src/__init__.py | nastjamakh/home-credit-risk | 0 | 6620461 | <filename>src/__init__.py
try:
import dotenv
from pathlib import Path
dotenv.load_dotenv(dotenv.find_dotenv(Path.cwd() / ".env")) # type: ignore
except ModuleNotFoundError:
import logging
logging.info("python-dotenv not found")
| <filename>src/__init__.py
try:
import dotenv
from pathlib import Path
dotenv.load_dotenv(dotenv.find_dotenv(Path.cwd() / ".env")) # type: ignore
except ModuleNotFoundError:
import logging
logging.info("python-dotenv not found")
| it | 0.190853 | # type: ignore | 2.01798 | 2 |
twittoff/predict.py | nastyalolpro/twittoff-2 | 0 | 6620462 | <reponame>nastyalolpro/twittoff-2<filename>twittoff/predict.py<gh_stars>0
"""Prediction of users based on the twitter embeddings"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, tweet_text):
""""Predicts user based on the hypothetical tweet text
return: 0 or 1 label, where 0 is user0_name, 1 - user1_name
"""
# find usernames in the database
user0 = User.query.filter(User.name == user0_name).one()
user1 = User.query.filter(User.name == user1_name).one()
# find their tweets
embeddings = []
labels = []
for tweet in user0.tweets:
embeddings.append(tweet.vect)
labels.append(user0.name)
for tweet in user1.tweets:
embeddings.append(tweet.vect)
labels.append(user1.name)
# make a vector out of tweet_text
tweet_text_vect = vectorize_tweet(tweet_text)
# train model
# TODO: let user choose a model to train
classifier = LogisticRegression()
classifier.fit(embeddings, labels)
return classifier.predict([tweet_text_vect])
| """Prediction of users based on the twitter embeddings"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, tweet_text):
""""Predicts user based on the hypothetical tweet text
return: 0 or 1 label, where 0 is user0_name, 1 - user1_name
"""
# find usernames in the database
user0 = User.query.filter(User.name == user0_name).one()
user1 = User.query.filter(User.name == user1_name).one()
# find their tweets
embeddings = []
labels = []
for tweet in user0.tweets:
embeddings.append(tweet.vect)
labels.append(user0.name)
for tweet in user1.tweets:
embeddings.append(tweet.vect)
labels.append(user1.name)
# make a vector out of tweet_text
tweet_text_vect = vectorize_tweet(tweet_text)
# train model
# TODO: let user choose a model to train
classifier = LogisticRegression()
classifier.fit(embeddings, labels)
return classifier.predict([tweet_text_vect]) | en | 0.831283 | Prediction of users based on the twitter embeddings "Predicts user based on the hypothetical tweet text return: 0 or 1 label, where 0 is user0_name, 1 - user1_name # find usernames in the database # find their tweets # make a vector out of tweet_text # train model # TODO: let user choose a model to train | 3.685786 | 4 |
day02.py | guowengbo/python | 0 | 6620463 | import requests
# 请求百度,需要注意:一定要带上http/https
# response = requests.get('http://www.baidu.com')
# print(response)
# print(response.test)
print(response.headers)
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('http://www.baidu.com') | import requests
# 请求百度,需要注意:一定要带上http/https
# response = requests.get('http://www.baidu.com')
# print(response)
# print(response.test)
print(response.headers)
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('http://www.baidu.com') | en | 0.46468 | # 请求百度,需要注意:一定要带上http/https # response = requests.get('http://www.baidu.com') # print(response) # print(response.test) | 3.081038 | 3 |
puzzle/cli.py | DKorytkin/DataRobot | 0 | 6620464 | import sys
from pathlib import Path
from argparse import ArgumentParser, ArgumentTypeError
from puzzle import Finder, GridMaker
def positive_int(number: str) -> int:
if not number.isdigit() or int(number) <= 0:
raise ArgumentTypeError("Argument must be integer and more 0")
return int(number)
def parser(args):
menu = ArgumentParser(
"Puzzle",
description="Programme generate crossword puzzle and "
"try to find exist words from file",
)
menu.add_argument(
"-f",
"--file",
dest="file",
type=Path,
default=Path(__file__).resolve().parent / "words.txt",
help="you can to change file with words",
)
menu.add_argument(
"-l",
"--length",
dest="length",
type=positive_int,
default=15,
help="you can to choice board length",
)
menu.add_argument(
"-d",
"--depth",
dest="depth",
type=positive_int,
default=15,
help="you can to choice board depth",
)
menu.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="you can to disable verbose mode",
)
return menu.parse_args(args)
def main():
params = parser(sys.argv[1:])
grid = GridMaker(length=params.length, depth=params.depth).generate()
if not params.quiet:
print(f"Was generated puzzle size of {grid.length}x{grid.depth}:")
print(grid)
finder = Finder(grid)
words = finder.find_words(params.file)
print(f"Found {len(words)} words:")
if not params.quiet:
print(words)
| import sys
from pathlib import Path
from argparse import ArgumentParser, ArgumentTypeError
from puzzle import Finder, GridMaker
def positive_int(number: str) -> int:
if not number.isdigit() or int(number) <= 0:
raise ArgumentTypeError("Argument must be integer and more 0")
return int(number)
def parser(args):
menu = ArgumentParser(
"Puzzle",
description="Programme generate crossword puzzle and "
"try to find exist words from file",
)
menu.add_argument(
"-f",
"--file",
dest="file",
type=Path,
default=Path(__file__).resolve().parent / "words.txt",
help="you can to change file with words",
)
menu.add_argument(
"-l",
"--length",
dest="length",
type=positive_int,
default=15,
help="you can to choice board length",
)
menu.add_argument(
"-d",
"--depth",
dest="depth",
type=positive_int,
default=15,
help="you can to choice board depth",
)
menu.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="you can to disable verbose mode",
)
return menu.parse_args(args)
def main():
params = parser(sys.argv[1:])
grid = GridMaker(length=params.length, depth=params.depth).generate()
if not params.quiet:
print(f"Was generated puzzle size of {grid.length}x{grid.depth}:")
print(grid)
finder = Finder(grid)
words = finder.find_words(params.file)
print(f"Found {len(words)} words:")
if not params.quiet:
print(words)
| none | 1 | 3.479669 | 3 | |
app.py | bkhanale/SlackContestWatcherBot | 12 | 6620465 | <gh_stars>10-100
import datetime
from dateutil import tz
import json
import requests
from os import environ
base_url = "https://clist.by/api/v1/contest/"
header = {"Authorization": environ["CLIST_API_TOKEN"]}
def convert_time(utc):
return str(
utc.replace(tzinfo=tz.gettz('UTC'))
.astimezone(tz.gettz('Asia/Calcutta'))
.replace(microsecond=0)
.replace(tzinfo=None))
def convert_dt(string):
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
def watchcontest(now):
cur_time = now.replace(microsecond=0) + datetime.timedelta(hours=1)
para = {"start": cur_time.isoformat()}
resp = requests.get(base_url, params=para, headers=header)
flag = False
res = ""
if(resp.status_code == 200):
contests = json.loads(resp.content.decode("utf-8"))
if(len(contests["objects"]) >= 1):
flag = True
for con in contests["objects"]:
lcls = convert_time(convert_dt(con["start"]))
lcle = convert_time(convert_dt(con["end"]))
res += con["event"] + " will start in 1 hour!\n"
res += con["href"] + "\n"
res += "Start: " + lcls + "\n"
res += "End: " + lcle + "\n"
res += "Duration: " + str(
datetime.timedelta(seconds=con["duration"])) + "\n\n"
return flag, res
def upcoming(site, now):
now = now.replace(microsecond=0)
then = now + datetime.timedelta(days=7)
para = {
"start__gte": now.isoformat(),
"start__lte": then.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the upcoming contests within a week:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def ongoing(site, now):
now = now.replace(microsecond=0)
para = {
"start__lte": now.isoformat(),
"end__gt": now.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the ongoing contests:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def build_string(contests):
res = ""
con_ind = 1
for con in contests["objects"]:
res += str(con_ind) + ". " + con["event"] + "\n"
res += con["href"] + "\n"
res += "Start: " + convert_time(convert_dt(con["start"])) + "\n"
res += "End: " + convert_time(convert_dt(con["end"])) + "\n"
res += "Duration: " + str(datetime.timedelta(seconds=con["duration"]))
res += "\n\n"
con_ind += 1
return res
| import datetime
from dateutil import tz
import json
import requests
from os import environ
base_url = "https://clist.by/api/v1/contest/"
header = {"Authorization": environ["CLIST_API_TOKEN"]}
def convert_time(utc):
return str(
utc.replace(tzinfo=tz.gettz('UTC'))
.astimezone(tz.gettz('Asia/Calcutta'))
.replace(microsecond=0)
.replace(tzinfo=None))
def convert_dt(string):
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
def watchcontest(now):
cur_time = now.replace(microsecond=0) + datetime.timedelta(hours=1)
para = {"start": cur_time.isoformat()}
resp = requests.get(base_url, params=para, headers=header)
flag = False
res = ""
if(resp.status_code == 200):
contests = json.loads(resp.content.decode("utf-8"))
if(len(contests["objects"]) >= 1):
flag = True
for con in contests["objects"]:
lcls = convert_time(convert_dt(con["start"]))
lcle = convert_time(convert_dt(con["end"]))
res += con["event"] + " will start in 1 hour!\n"
res += con["href"] + "\n"
res += "Start: " + lcls + "\n"
res += "End: " + lcle + "\n"
res += "Duration: " + str(
datetime.timedelta(seconds=con["duration"])) + "\n\n"
return flag, res
def upcoming(site, now):
now = now.replace(microsecond=0)
then = now + datetime.timedelta(days=7)
para = {
"start__gte": now.isoformat(),
"start__lte": then.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the upcoming contests within a week:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def ongoing(site, now):
now = now.replace(microsecond=0)
para = {
"start__lte": now.isoformat(),
"end__gt": now.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the ongoing contests:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def build_string(contests):
res = ""
con_ind = 1
for con in contests["objects"]:
res += str(con_ind) + ". " + con["event"] + "\n"
res += con["href"] + "\n"
res += "Start: " + convert_time(convert_dt(con["start"])) + "\n"
res += "End: " + convert_time(convert_dt(con["end"])) + "\n"
res += "Duration: " + str(datetime.timedelta(seconds=con["duration"]))
res += "\n\n"
con_ind += 1
return res | none | 1 | 2.775193 | 3 | |
src/pretalx/agenda/views/htmlexport.py | td00/pretalx | 0 | 6620466 | <gh_stars>0
import os
from bakery.views import BuildableDetailView
from django.conf import settings
from pretalx.agenda.views.schedule import (
FrabJsonView, FrabXCalView, FrabXmlView, ICalView, ScheduleView,
)
from pretalx.agenda.views.speaker import SpeakerView
from pretalx.agenda.views.talk import SingleICalView, TalkView
from pretalx.person.models import SpeakerProfile
from pretalx.schedule.models import Schedule
from pretalx.submission.models import Submission
class PretalxExportContextMixin():
def __init__(self, *args, **kwargs):
self._exporting_event = kwargs.pop('_exporting_event', None)
if not self._exporting_event:
raise Exception('Use the provided "export_schedule_html" management command to export the HTML schedule.')
super().__init__(*args, **kwargs)
def create_request(self, *args, **kwargs):
request = super().create_request(*args, **kwargs)
request.event = self._exporting_event
return request
def get_context_data(self, *args, **kwargs):
self.object = self.get_object() # ScheduleView crashes without this
ctx = super().get_context_data(*args, **kwargs)
ctx['is_html_export'] = True
return ctx
def get_url(self, obj):
return obj.urls.public
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(event=self._exporting_event)
def get_file_build_path(self, obj):
dir_path, file_name = os.path.split(self.get_url(obj))
path = os.path.join(settings.BUILD_DIR, dir_path[1:])
os.path.exists(path) or os.makedirs(path)
return os.path.join(path, file_name)
# current schedule
class ExportScheduleView(PretalxExportContextMixin, BuildableDetailView, ScheduleView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.schedule
class ExportFrabXmlView(PretalxExportContextMixin, BuildableDetailView, FrabXmlView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_xml
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportFrabXCalView(PretalxExportContextMixin, BuildableDetailView, FrabXCalView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_xcal
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportFrabJsonView(PretalxExportContextMixin, BuildableDetailView, FrabJsonView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_json
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportICalView(PretalxExportContextMixin, BuildableDetailView, ICalView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.ical
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
# all schedule versions
class ExportScheduleVersionsView(PretalxExportContextMixin, BuildableDetailView, ScheduleView):
queryset = Schedule.objects.filter(version__isnull=False)
class ExportTalkView(PretalxExportContextMixin, BuildableDetailView, TalkView):
queryset = Submission.objects.filter(slots__schedule__published__isnull=False).distinct()
class ExportTalkICalView(PretalxExportContextMixin, BuildableDetailView, SingleICalView):
queryset = Submission.objects.filter(slots__schedule__published__isnull=False).distinct()
def get_url(self, obj):
return obj.urls.ical
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportSpeakerView(PretalxExportContextMixin, BuildableDetailView, SpeakerView):
queryset = SpeakerProfile.objects.filter(user__submissions__slots__schedule__published__isnull=False).distinct()
| import os
from bakery.views import BuildableDetailView
from django.conf import settings
from pretalx.agenda.views.schedule import (
FrabJsonView, FrabXCalView, FrabXmlView, ICalView, ScheduleView,
)
from pretalx.agenda.views.speaker import SpeakerView
from pretalx.agenda.views.talk import SingleICalView, TalkView
from pretalx.person.models import SpeakerProfile
from pretalx.schedule.models import Schedule
from pretalx.submission.models import Submission
class PretalxExportContextMixin():
def __init__(self, *args, **kwargs):
self._exporting_event = kwargs.pop('_exporting_event', None)
if not self._exporting_event:
raise Exception('Use the provided "export_schedule_html" management command to export the HTML schedule.')
super().__init__(*args, **kwargs)
def create_request(self, *args, **kwargs):
request = super().create_request(*args, **kwargs)
request.event = self._exporting_event
return request
def get_context_data(self, *args, **kwargs):
self.object = self.get_object() # ScheduleView crashes without this
ctx = super().get_context_data(*args, **kwargs)
ctx['is_html_export'] = True
return ctx
def get_url(self, obj):
return obj.urls.public
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(event=self._exporting_event)
def get_file_build_path(self, obj):
dir_path, file_name = os.path.split(self.get_url(obj))
path = os.path.join(settings.BUILD_DIR, dir_path[1:])
os.path.exists(path) or os.makedirs(path)
return os.path.join(path, file_name)
# current schedule
class ExportScheduleView(PretalxExportContextMixin, BuildableDetailView, ScheduleView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.schedule
class ExportFrabXmlView(PretalxExportContextMixin, BuildableDetailView, FrabXmlView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_xml
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportFrabXCalView(PretalxExportContextMixin, BuildableDetailView, FrabXCalView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_xcal
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportFrabJsonView(PretalxExportContextMixin, BuildableDetailView, FrabJsonView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.frab_json
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportICalView(PretalxExportContextMixin, BuildableDetailView, ICalView):
queryset = Schedule.objects.filter(published__isnull=False).order_by('published')
def get_url(self, obj):
return obj.event.urls.ical
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
# all schedule versions
class ExportScheduleVersionsView(PretalxExportContextMixin, BuildableDetailView, ScheduleView):
queryset = Schedule.objects.filter(version__isnull=False)
class ExportTalkView(PretalxExportContextMixin, BuildableDetailView, TalkView):
queryset = Submission.objects.filter(slots__schedule__published__isnull=False).distinct()
class ExportTalkICalView(PretalxExportContextMixin, BuildableDetailView, SingleICalView):
queryset = Submission.objects.filter(slots__schedule__published__isnull=False).distinct()
def get_url(self, obj):
return obj.urls.ical
def get_content(self):
return self.get(self.request, self._exporting_event).content
def get_build_path(self, obj):
return self.get_file_build_path(obj)
class ExportSpeakerView(PretalxExportContextMixin, BuildableDetailView, SpeakerView):
queryset = SpeakerProfile.objects.filter(user__submissions__slots__schedule__published__isnull=False).distinct() | en | 0.693204 | # ScheduleView crashes without this # current schedule # all schedule versions | 1.843536 | 2 |
static/Pygeostat/plotting-2.py | MHadavand/MHadavand.github.io | 0 | 6620467 | <gh_stars>0
import pygeostat as gs
data_file = gs.ExampleData('point3d_ind_mv')
gs.location_plot(data_file) | import pygeostat as gs
data_file = gs.ExampleData('point3d_ind_mv')
gs.location_plot(data_file) | none | 1 | 1.684711 | 2 | |
portal/migrations/0016_auto_20181105_0110.py | eugenechia95/sherpalearn | 0 | 6620468 | # Generated by Django 2.1.2 on 2018-11-04 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0015_auto_20181104_0145'),
]
operations = [
migrations.CreateModel(
name='Subscribe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
],
),
migrations.AddField(
model_name='note',
name='title',
field=models.CharField(blank=True, max_length=75, null=True),
),
]
| # Generated by Django 2.1.2 on 2018-11-04 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0015_auto_20181104_0145'),
]
operations = [
migrations.CreateModel(
name='Subscribe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=254, null=True)),
],
),
migrations.AddField(
model_name='note',
name='title',
field=models.CharField(blank=True, max_length=75, null=True),
),
]
| en | 0.710134 | # Generated by Django 2.1.2 on 2018-11-04 17:10 | 1.633935 | 2 |
problem_solving/python/algorithms/implementation/taum_and_bday.py | kcc3/hackerrank-solutions | 0 | 6620469 | <reponame>kcc3/hackerrank-solutions
def taum_bday(b, w, bc, wc, z):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/taum-and-bday/problem
Taum is planning to celebrate the birthday of his friend, Diksha. There are two types of gifts that Diksha wants
from Taum: one is black and the other is white. To make her happy, Taum has to buy b black gifts and w white gifts.
- The cost of each black gift is bc units.
- The cost of every white gift is wc units.
- The cost of converting each black gift into white gift or vice versa is z units.
Help Taum by deducing the minimum amount he needs to spend on Diksha's gifts.
For example, if Taum wants to buy b = 3 black gifts and w = 5 white gifts at a cost of bc = 3, wc = 4 and conversion
cost z = 1, we see that he can buy a black gift for 3 and convert it to a white gift for 1, making the total cost of
each white gift 4. That matches the cost of a white gift, so he can do that or just buy black gifts and white gifts.
Either way, the overall cost is 3 * 3 + 5 * 4 = 29.
Args:
b (int): The number of black presents to purchase
w: (int): The number of white presents to purchase
bc (int): The cost of each black present
wc (int): The cost of each white present
z (int): The cost to switch between the present types
Returns:
int: the optimized cost to buy the specified presents
"""
return b * min(bc, wc + z) + w * min(wc, bc + z)
if __name__ == "__main__":
print taum_bday(3, 5, 3, 4, 1)
print taum_bday(10, 10, 1, 1 , 1)
print taum_bday(5, 9, 2, 3, 4) | def taum_bday(b, w, bc, wc, z):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/taum-and-bday/problem
Taum is planning to celebrate the birthday of his friend, Diksha. There are two types of gifts that Diksha wants
from Taum: one is black and the other is white. To make her happy, Taum has to buy b black gifts and w white gifts.
- The cost of each black gift is bc units.
- The cost of every white gift is wc units.
- The cost of converting each black gift into white gift or vice versa is z units.
Help Taum by deducing the minimum amount he needs to spend on Diksha's gifts.
For example, if Taum wants to buy b = 3 black gifts and w = 5 white gifts at a cost of bc = 3, wc = 4 and conversion
cost z = 1, we see that he can buy a black gift for 3 and convert it to a white gift for 1, making the total cost of
each white gift 4. That matches the cost of a white gift, so he can do that or just buy black gifts and white gifts.
Either way, the overall cost is 3 * 3 + 5 * 4 = 29.
Args:
b (int): The number of black presents to purchase
w: (int): The number of white presents to purchase
bc (int): The cost of each black present
wc (int): The cost of each white present
z (int): The cost to switch between the present types
Returns:
int: the optimized cost to buy the specified presents
"""
return b * min(bc, wc + z) + w * min(wc, bc + z)
if __name__ == "__main__":
print taum_bday(3, 5, 3, 4, 1)
print taum_bday(10, 10, 1, 1 , 1)
print taum_bday(5, 9, 2, 3, 4) | en | 0.919095 | Hackerrank Problem: https://www.hackerrank.com/challenges/taum-and-bday/problem Taum is planning to celebrate the birthday of his friend, Diksha. There are two types of gifts that Diksha wants from Taum: one is black and the other is white. To make her happy, Taum has to buy b black gifts and w white gifts. - The cost of each black gift is bc units. - The cost of every white gift is wc units. - The cost of converting each black gift into white gift or vice versa is z units. Help Taum by deducing the minimum amount he needs to spend on Diksha's gifts. For example, if Taum wants to buy b = 3 black gifts and w = 5 white gifts at a cost of bc = 3, wc = 4 and conversion cost z = 1, we see that he can buy a black gift for 3 and convert it to a white gift for 1, making the total cost of each white gift 4. That matches the cost of a white gift, so he can do that or just buy black gifts and white gifts. Either way, the overall cost is 3 * 3 + 5 * 4 = 29. Args: b (int): The number of black presents to purchase w: (int): The number of white presents to purchase bc (int): The cost of each black present wc (int): The cost of each white present z (int): The cost to switch between the present types Returns: int: the optimized cost to buy the specified presents | 4.109199 | 4 |
tests/tests_get_post.py | LandRegistry/historian-alpha | 0 | 6620470 | import unittest
from application import server, app
import json
class ApplicationTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.app = server.app.test_client()
headers = {'content-type': 'application/json; charset=utf-8'}
history_data = '{"title_number": "TEST1412258807231" }'
self.app.post('/TEST1412853022495', data=history_data, headers=headers)
def test_that_get_root_fails(self):
self.assertEqual((self.app.get('/')).status, '400 BAD REQUEST')
def test_that_post_root_fails(self):
self.assertEqual((self.app.post('/')).status, '405 METHOD NOT ALLOWED')
def test_that_post_successful(self):
headers = {'content-type': 'application/json; charset=utf-8'}
history_data = '{"title_number": "TEST1412258807231" }'
app.logger.info(history_data)
app.logger.info(json.dumps(history_data, encoding='utf-8'))
res = self.app.post('/TEST1412853022495', data=history_data, headers=headers)
self.assertEqual(res.status, '200 OK')
def test_that_get_list_successful(self):
self.assertEqual((self.app.get('/TEST1412853022495?version=list')).status, '200 OK')
data = json.loads(self.app.get('/TEST1412853022495?version=list').data)
self.assertEqual(json.dumps(data, encoding='utf-8'), '{"versions": []}')
def test_that_get_version_successful(self):
self.assertEqual((self.app.get('/TEST1412853022495?version=0')).status, '404 NOT FOUND')
| import unittest
from application import server, app
import json
class ApplicationTestCase(unittest.TestCase):
def setUp(self):
server.app.config['TESTING'] = True
self.app = server.app.test_client()
headers = {'content-type': 'application/json; charset=utf-8'}
history_data = '{"title_number": "TEST1412258807231" }'
self.app.post('/TEST1412853022495', data=history_data, headers=headers)
def test_that_get_root_fails(self):
self.assertEqual((self.app.get('/')).status, '400 BAD REQUEST')
def test_that_post_root_fails(self):
self.assertEqual((self.app.post('/')).status, '405 METHOD NOT ALLOWED')
def test_that_post_successful(self):
headers = {'content-type': 'application/json; charset=utf-8'}
history_data = '{"title_number": "TEST1412258807231" }'
app.logger.info(history_data)
app.logger.info(json.dumps(history_data, encoding='utf-8'))
res = self.app.post('/TEST1412853022495', data=history_data, headers=headers)
self.assertEqual(res.status, '200 OK')
def test_that_get_list_successful(self):
self.assertEqual((self.app.get('/TEST1412853022495?version=list')).status, '200 OK')
data = json.loads(self.app.get('/TEST1412853022495?version=list').data)
self.assertEqual(json.dumps(data, encoding='utf-8'), '{"versions": []}')
def test_that_get_version_successful(self):
self.assertEqual((self.app.get('/TEST1412853022495?version=0')).status, '404 NOT FOUND')
| none | 1 | 2.913319 | 3 | |
tests/test_nonblocking.py | thomaswhiteway/channel | 0 | 6620471 | from channel import Channel, Empty, Closed, Full
import pytest
def test_send_receive():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
tx.put(1)
assert rx.get() == 1
def test_double_receive():
channel = Channel()
tx = channel.tx()
rx = [channel.rx() for _ in range(2)]
tx.put(1)
tx.put(2)
assert rx[0].get() == 1
assert rx[1].get() == 2
def test_always_empty():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
with pytest.raises(Empty):
rx.get(block=False)
def test_becomes_empty():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
tx.put(1)
assert rx.get() == 1
with pytest.raises(Empty):
rx.get(block=False)
def test_closed():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
with pytest.raises(Empty):
rx.get(block=False)
tx.close()
with pytest.raises(Closed):
rx.get(block=False)
def test_full():
channel = Channel(1)
tx = channel.tx()
tx.put(10)
with pytest.raises(Full):
tx.put(11, block=False)
| from channel import Channel, Empty, Closed, Full
import pytest
def test_send_receive():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
tx.put(1)
assert rx.get() == 1
def test_double_receive():
channel = Channel()
tx = channel.tx()
rx = [channel.rx() for _ in range(2)]
tx.put(1)
tx.put(2)
assert rx[0].get() == 1
assert rx[1].get() == 2
def test_always_empty():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
with pytest.raises(Empty):
rx.get(block=False)
def test_becomes_empty():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
tx.put(1)
assert rx.get() == 1
with pytest.raises(Empty):
rx.get(block=False)
def test_closed():
channel = Channel()
tx = channel.tx()
rx = channel.rx()
with pytest.raises(Empty):
rx.get(block=False)
tx.close()
with pytest.raises(Closed):
rx.get(block=False)
def test_full():
channel = Channel(1)
tx = channel.tx()
tx.put(10)
with pytest.raises(Full):
tx.put(11, block=False)
| none | 1 | 2.520758 | 3 | |
firmware/rpi/test.py | DanNduati/Industrial-IoT-application-FinalYR-project | 1 | 6620472 | <reponame>DanNduati/Industrial-IoT-application-FinalYR-project
import RPi.GPIO as GPIO
import time
import datetime
from firebase import firebase
import Adafruit_DHT
import urllib2, urllib, httplib
import json
import os
from functools import partial
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
GPIO.setwarnings(False)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO_LM1 = 20
GPIO_LM2 = 19
GPIO_DHT11 = 4
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(GPIO_LM1, GPIO.IN)
GPIO.setup(GPIO_LM2, GPIO.IN)
#dht11 setup
sensor = Adafruit_DHT.DHT11
#firebase setup
firebase =firebase.FirebaseApplication('https://elevator-3a561-default-rtdb.firebaseio.com/', None)
def get_distance():
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
return distance
def update_firebase():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
lm1_state = GPIO.input(GPIO_LM1)
lm2_state = GPIO.input(GPIO_LM2)
distance = get_distance()
firebase.post('/lmsw1/value',lm1_state)
firebase.post('/lmsw2/value',lm2_state)
firebase.post('/Distance/value',distance)
firebase.post('/Humidity/value',humidity)
firebase.post('/Temperature/value',temperature)
while True:
update_firebase();
time.sleep(5)
| import RPi.GPIO as GPIO
import time
import datetime
from firebase import firebase
import Adafruit_DHT
import urllib2, urllib, httplib
import json
import os
from functools import partial
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
GPIO.setwarnings(False)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO_LM1 = 20
GPIO_LM2 = 19
GPIO_DHT11 = 4
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(GPIO_LM1, GPIO.IN)
GPIO.setup(GPIO_LM2, GPIO.IN)
#dht11 setup
sensor = Adafruit_DHT.DHT11
#firebase setup
firebase =firebase.FirebaseApplication('https://elevator-3a561-default-rtdb.firebaseio.com/', None)
def get_distance():
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
return distance
def update_firebase():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
lm1_state = GPIO.input(GPIO_LM1)
lm2_state = GPIO.input(GPIO_LM2)
distance = get_distance()
firebase.post('/lmsw1/value',lm1_state)
firebase.post('/lmsw2/value',lm2_state)
firebase.post('/Distance/value',distance)
firebase.post('/Humidity/value',humidity)
firebase.post('/Temperature/value',temperature)
while True:
update_firebase();
time.sleep(5) | en | 0.416354 | #set GPIO Pins #dht11 setup #firebase setup | 3.092729 | 3 |
app/celery_app/models.py | KimKiHyuk/BenefitObserver | 0 | 6620473 | <reponame>KimKiHyuk/BenefitObserver<gh_stars>0
from django.db import models
# Create your models here.
class CrawlerTask(models.Model):
objects = models.Manager()
log = models.TextField()
done_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.done_at)
class Meta:
ordering =['done_at']
verbose_name_plural = 'CrawlerTask'
| from django.db import models
# Create your models here.
class CrawlerTask(models.Model):
objects = models.Manager()
log = models.TextField()
done_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.done_at)
class Meta:
ordering =['done_at']
verbose_name_plural = 'CrawlerTask' | en | 0.963489 | # Create your models here. | 2.174795 | 2 |
capture/provenance/DefinitionProvenance.py | dew-uff/prospective-prov | 0 | 6620474 | <gh_stars>0
from collections import defaultdict
from capture.build.ConditionNodes import Nodes
from capture.build.GraphDrawer import GraphDrawer
from capture.build.Graphviz import Graphviz
from capture.build.HashingMapper import HashingMapper
from capture.build.SyntaxWrite import SyntaxWrite
from .ExecutionProvenanceCollector import ExecutionProvenance
from capture.collector.ExperimentDataCollector import ExperimentDataCollector
class DefinitionProvenanceAnalyzer:
def __init__(self, trial):
self.provenance = Graphviz(trial).initialize()
self.def_list = []
self.call = defaultdict(list)
self.defs = defaultdict(list)
self.visited_x = []
self.visited_y = []
self.def_method = defaultdict(list)
self.class_method = []
self.class_def_name = []
self.class_def_start = []
self.class_def_final = []
self.hash_index = []
self.trial = trial
''' Array List - Try components '''
self.try_except = defaultdict(list)
self.def_function = []
self.singleIF = []
self.generic_hash = []
self.def_function_final = []
self.def_function_after = []
''' Array List - Lines and code components'''
self.class_list = []
self.start = []
self.column = []
self.last = []
self.block = []
self.type = []
''' Array List - All nodes '''
self.node_hash = []
self.node_else = []
self.node_if = []
self.node_for = []
self.node_for_sup = []
self.node_end_for = []
self.arrayHashing = defaultdict(list)
def syntaxRulesIF(self):
"""
Step 1: format_Condition_Node
Step 2: create_Condition_Node()
Step 3: organize_Sequences_IF_ELSE()
Step 4: update_Sequences_IF_ELSE()
"""
""" Detect conditional structures within others """
self.format_Condition_Node()
""" Link conditional structures that belong to them """
self.create_Condition_Node()
""" Arrange and link IF and ELSE sequences """
self.organize_Sequences_IF_ELSE()
""" Report conditional items already covered and rearranged """
self.update_Sequences_IF_ELSE()
def syntaxRulesFOR(self):
"""
Step 1: get_Call_Back()
Step 2: get_Call_Ends()
Step 3: get_Last_Loop()
"""
""" Create a back edge within a loop [WHILE, FOR] """
self.get_Call_Back()
""" Create an ending edge within a loop [WHILE, FOR] """
self.get_Call_Ends()
""" Create and check for a next loop [WHILE, FOR] in sequence """
self.get_Last_Loop()
def syntaxRulesTRY(self):
"""
Step 1: check_Try()
Step 2: getCallTryException()
"""
self.check_Try()
self.getCallTryException()
def show(self):
# print('i|INDEX|START|LAST|NODE-HASH|NODE-ELSE|NODE-IF|NODE-END-FOR|NODE-FOR|COLUMN| DEF-LIST|\n')
for i in range(0, len(self.node_hash)):
string = '{}|{}* {} {} {} {} {} {} {} {} {} '.format(i, self.hash_index[i], self.start[i], self.last[i],
self.node_hash[i], self.node_else[i], self.node_if[i],
self.node_end_for[i], self.node_for[i], self.column[i],
self.def_list[i])
# print(string)
def createBoxesInFunctions(self):
"""
Create a boundary for the representation of functions.
Here, all content within functions is bounded by boxes, just as in classes.
"""
for index, node in enumerate(self.node_hash):
if 'function_def' in node:
border = SyntaxWrite().getIndexArray(self.last[index], self.start)
for index_2 in range(index, border + 1):
nameBoxes = 'cluster{}'.format(index)
with self.provenance.subgraph(name=nameBoxes) as subgroupA:
nodes = self.node_hash[index_2]
subgroupA.attr(style='dashed')
subgroupA.node(nodes)
condition = ['for', 'while', 'if', 'elif']
if any(x in nodes for x in condition):
subgroupA.node(nodes + 'c')
def createBoxInClass(self):
for cName, cStart, cLast in zip(self.class_def_name,
self.class_def_start,
self.class_def_final):
for index, nodes in enumerate(self.node_hash):
show_name = True
startNode = self.start[index]
getLimitX = startNode >= cStart
getLimitY = startNode <= cLast
if getLimitX and getLimitY:
nameCluster = 'cluster{}'.format(cName)
with self.provenance.subgraph(name=nameCluster) as subgroupA:
if show_name:
nameClass = 'Class {}'.format(cName)
subgroupA.attr(label=nameClass, style='dashed')
show_name = False
if 'function_def' in nodes:
functionStart = self.start[index]
functionFinal = self.last[index]
indexStart = index
functionName = 'cluster{}'.format(nodes)
with subgroupA.subgraph(name=functionName) as subgroupB:
subgroupA.attr(label='', style='dashed')
while True:
getLimitX = self.start[indexStart] >= functionStart
getLimitY = self.start[indexStart] <= functionFinal
if getLimitX and getLimitY:
subgroupB.node(self.node_hash[indexStart])
else:
break
indexStart = indexStart + 1
else:
subgroupA.node(self.node_hash[index])
def verify_function_check(self):
for index, node in enumerate(self.node_hash):
node_else = self.node_else[index]
node_function = self.def_list[index]
null_check = node_else is not None and node_function is not None
if node_else != node_function and null_check:
#self.node_else[index] = 'end{}'.format(node_function)
self.node_else[index] = node_function
def indented_label(self):
for index, node in enumerate(self.node_hash):
string_column = '{' + str(self.column[index]) + '}'
self.provenance.node(node, xlabel=string_column)
def format_column(self):
keys = defaultdict(list)
for index, item in enumerate(self.start):
keys[item].append(index)
for values in keys:
if len(keys[values]) > 1:
min_column = self.column[min(keys[values])]
for element in keys[values]:
self.column[element] = min_column
def linking_nodes_graph(self):
hash_loop = []
for i in range(1, len(self.node_hash) - 1):
current = self.node_hash[i]
next_node = self.node_hash[i + 1]
''' Visited node X (Any node)'''
visitX = (current not in self.visited_x)
''' Visited node Y (Any node)'''
visitY = (next_node not in self.visited_y)
''' Visited node Z (only loop node)'''
visitZ = (current not in hash_loop)
checking = visitX and visitY and visitZ
'''
Check limit in def function
if self.def_list[i] != None:
limite_1 = self.node_hash.index(self.def_list[i])
limite = self.last[limite_1]
check_return = self.start[i + 1] <= limite
'''
if checking:
if (('if' not in current) and ('else' not in current)) and (
(self.node_else[i] != None) and (self.node_for[i] != None)):
self.provenance.edge(current, self.node_for[i], style="dashed")
elif current in self.def_function_after:
continue
elif 'function_def' in next_node:
if next_node in self.def_function:
index_def_node = self.def_function.index(next_node)
self.provenance.edge(current, self.def_function_final[index_def_node])
elif 'if' in current:
self.provenance.edge(current, next_node, label=' True')
if '*' in self.node_else[i]:
hash_string = self.node_else[i]
if self.node_for[i] is not None:
hash_for = self.node_for[i]
item_false = self.node_hash.index(hash_string[0:len(hash_string) - 1])
if self.node_for[i] != self.node_for[item_false]:
self.provenance.edge(current, self.node_for[i], label=' False')
else:
self.provenance.edge(current, hash_string[0: len(hash_string) - 1], label=' False')
'''
elif '-' in self.node_else[i]:
lastNode = SyntaxWrite().getIndexArray(self.last[i], self.start)
self.provenance.edge(current, self.node_hash[lastNode], label=' False')
'''
elif 'try' in current:
self.provenance.edge(current, next_node)
elif 'exception' in current:
self.provenance.edge(self.node_if[i], self.node_hash[i])
self.provenance.edge(current, next_node)
elif 'else' in current:
self.provenance.edge(self.node_if[i], self.node_hash[i], label=' False')
self.provenance.edge(current, next_node)
elif 'for' in current or 'while' in current:
self.provenance.edge(current, next_node)
else:
if self.node_else[i] is None:
self.provenance.edge(current, next_node)
else:
self.provenance.edge(current, self.node_else[i])
self.createBoxesInFunctions()
def create_Global_End_Node(self):
self.node_hash.append('end')
self.node_else.append(None)
self.node_if.append(None)
self.last.append(self.last[-1] + 1)
self.last.append(self.last[-1] + 1)
self.start.append(self.last[-1] + 1)
self.column.append(0)
self.block.append('End')
self.type.append('end-code')
self.provenance.node('end', label='End')
element = '{}{}'.format(self.last[-1] + 1, 0)
self.hash_index.append(int(element))
self.generic_hash.append('{}name{}'.format(self.last[-1] + 1, 0))
return True
def start_node(self):
self.node_hash.append('start')
self.node_else.append(None)
self.node_if.append(None)
self.last.append(0)
self.start.append(0)
self.column.append(0)
self.block.append('Start')
self.type.append('start-code')
self.hash_index.append(0)
self.provenance.node('start', label='Start')
self.generic_hash.append('{}name{}'.format(0, 0))
return True
def arguments_selection(self, conn, start):
sqlite = ExperimentDataCollector(self.trial, conn)
return sqlite.selection_args(start)
def getCallTryException(self):
try:
for i in range(0, len(self.node_hash) - 1):
current = self.node_hash[i]
if 'try' in current:
intervalo_final = self.last[i]
intervalo_start = i
line_exception = -1
while True:
if intervalo_final <= self.start[intervalo_start]:
self.node_else[i] = self.node_hash[intervalo_start + 1]
break
if 'exception' in self.node_hash[intervalo_start]:
self.node_if[intervalo_start] = current
line_exception = intervalo_start
intervalo_start = intervalo_start + 1
if line_exception != -1:
self.node_else[line_exception - 1] = self.node_else[i]
except:
print('Error in update 4!')
def create_Condition_Node(self):
"""
insert tag in Condition Elements [node_if, node_elif, node_else]
dependencies: all, DefinitionProvenance Class
return: None
"""
for index, node in enumerate(self.node_hash):
if 'if' in node:
lastLoop = SyntaxWrite().getIndexArray(self.last[index], self.start)
for key in range(index + 1, lastLoop):
checkColumn = self.column[index] == self.column[key]
checkPosition = self.start[key] < self.last[index]
if SyntaxWrite().compareCondition(self.node_hash[key]):
if checkColumn and checkPosition:
self.node_if[key] = node
self.node_else[index] = self.node_hash[key]
self.last[key] = self.last[index]
def organize_Sequences_IF_ELSE(self):
"""
This method rearranges all conditions by binding them to their
respective blocks of code.
return: None
"""
for index, node in enumerate(self.node_hash):
there_Element = self.node_else[index] is not None
if there_Element and 'if' in node and 'else' in self.node_else[index]:
this_item = self.node_hash.index(self.node_else[index]) - 1
next_item = self.node_hash.index(self.node_else[index])
while True:
if 'else' in self.node_hash[next_item]:
node_else = self.node_else[next_item]
next_item = self.node_hash.index(node_else)
else:
self.node_else[this_item] = self.node_hash[next_item]
break
def format_Condition_Node(self):
"""
return: None
"""
""" This item checks whether a given IF has any ELSE related to it """
for key, node in enumerate(self.node_hash):
if SyntaxWrite().compareCondition(node):
idx = key + 1
while True:
if self.last[key] <= self.start[idx]:
self.node_else[key - 1] = self.node_hash[idx + 1]
self.node_else[key] = self.node_hash[idx + 1]
break
idx = idx + 1
""" This item checks already connected nodes and formats incorrectly linked nodes """
for key, node in enumerate(self.node_hash):
if SyntaxWrite().compareCondition(node):
lastElse = SyntaxWrite().getIndexArray(self.last[key], self.start)
lastNode = self.node_hash[lastElse]
value = lastElse
if SyntaxWrite().compareCondition(lastNode):
while True:
if SyntaxWrite().compareCondition(lastNode):
lastElse = SyntaxWrite().getIndexArray(self.last[lastElse], self.start)
value = lastElse - 1
lastNode = self.node_hash[lastElse]
break
else:
else_ = SyntaxWrite().getIndexArray(self.last[value], self.start)
self.provenance.edge(self.node_hash[else_], lastNode)
self.visited_x.append(self.node_hash[else_])
self.visited_y.append(lastNode)
self.node_else[else_] = lastNode
break
else:
self.node_else[value] = lastNode
else_ = SyntaxWrite().getIndexArray(self.last[value], self.start)
self.provenance.edge(self.node_hash[else_], lastNode)
def get_Call_Ends(self):
"""
Get the loop boundaries, ie the end nodes and return nodes.
dependencies: all, DefinitionProvenance class
return: None
"""
visitedArray = []
for index, currentNode in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(currentNode):
nodeLoop = SyntaxWrite().getObjectArray(self.last[index], self.start)
nodeBack = self.node_hash[nodeLoop]
if nodeBack not in visitedArray:
visitedArray.append(nodeBack)
there_Element = self.node_for[nodeLoop] is not None
check_Element = self.node_for[nodeLoop] != currentNode
if there_Element and check_Element:
nodeNext = self.node_for[self.getReturnLoop(nodeLoop)]
self.provenance.edge(nodeBack, nodeNext, style='dashed')
def get_Call_Back(self):
"""
get the link back between the loop nodes
dependencies: all, DefinitionProvenance class
return: None
"""
visitedArray = []
for index in reversed(range(len(self.start))):
currentNode = self.node_hash[index]
if SyntaxWrite().compareLoop(currentNode):
linkedBack = SyntaxWrite().getObjectArray(self.last[index], self.start)
if linkedBack is not None and self.node_hash[linkedBack] not in visitedArray:
self.provenance.edge(self.node_hash[linkedBack], currentNode, style='dashed')
visitedArray.append(self.node_hash[linkedBack])
else:
continue
def edge_Back_in_Loops(self):
def return_object(element):
object: int = -1
for index in range(len(self.start) - 1, -1, -1):
if element == self.start[index]:
object = index
break
return object
for index, item in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(item):
if self.last[index] in self.start:
index_loop = return_object(self.last[index])
for k in range(index, index_loop + 1):
self.node_for[k] = item
def get_Last_Loop(self):
for index, item in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(item):
columnNode = self.column[index]
check = False
if self.last[index] in self.start:
lastNode = SyntaxWrite().getIndexArray(self.last[index], self.start)
if lastNode + 1 == len(self.node_hash):
lastNode -= 1
if columnNode == self.column[lastNode + 1]:
self.node_end_for[index] = self.node_hash[lastNode + 1]
self.provenance.edge(item, self.node_hash[lastNode + 1],
label=" End Loop")
check = True
self.visited_x.append(self.node_hash[lastNode])
self.visited_y.append(self.node_hash[lastNode + 1])
else:
indexNode = lastNode
while True:
if indexNode == 0:
break
else:
if SyntaxWrite.compareLoop(self.node_hash[indexNode]):
check_column = self.column[indexNode] < columnNode
if check_column:
self.provenance.edge(item, self.node_hash[indexNode],
label=" End Loop")
check = True
self.node_end_for[index] = self.node_hash[indexNode]
break
indexNode = indexNode - 1
# print(check)
def update_Sequences_IF_ELSE(self):
"""
This method appends a string to nodes that will be
ignored in the binding method.
return: None
"""
for key, node in enumerate(self.node_hash):
if 'if' in node and self.node_else[key] is None:
node_if = SyntaxWrite().getIndexArray(self.last[key], self.start)
if 'else' in self.node_hash[node_if + 1]:
self.node_else[key] = '{}*'.format(self.node_else[node_if + 1])
else:
self.node_else[key] = '{}*'.format(self.node_hash[node_if + 1])
def edge_Definition_and_Calls(self):
"""
This function is only enabled when there is a function in the script schema.
Link function definition with their respective calls.
return: None
"""
for keyDef in self.defs:
# self.provenance.node('start' + keyDef, 'Start', shape='Msquare')
# self.provenance.edge('start' + keyDef, keyDef)
for keyCall in self.call:
nameDef = self.defs[keyDef][0]
nameCall = self.call[keyCall][0]
if nameCall.find(nameDef) != -1:
#self.provenance.edge(keyCall, 'start' + keyDef, style='dashed')
self.provenance.edge(keyCall, keyDef, style='dashed')
def create_Elif_List(self):
for index, node in enumerate(self.node_hash):
if 'if' in node or 'elif' in node:
id_node = index
column = self.column[index]
for k in range(index + 1, len(self.node_hash)):
if 'elif' in self.node_hash[k] and column == self.column[k]:
self.node_else[id_node] = self.node_hash[k]
if self.def_list[id_node] == None:
self.provenance.edge(node, self.node_hash[k], label=' False')
else:
if self.def_list[id_node] == self.def_list[k]:
self.provenance.edge(node, self.node_hash[k], label=' False')
break
def create_Function_End_List(self):
for i in range(0, len(self.node_hash)):
if 'function_def' in self.node_hash[i]:
end_index = SyntaxWrite().getIndexArray(self.last[i], self.start)
self.def_function.append(self.node_hash[i])
self.def_function_final.append(self.node_hash[end_index + 1])
self.def_function_after.append(self.node_hash[end_index])
def getPointCode(self):
for index, node in enumerate(self.node_hash):
if self.def_list[index] is None and 'start' not in node:
self.provenance.edge('start', node)
break
def check_Try(self):
for index, node in enumerate(self.node_hash):
if 'try' in node:
try_node = node
try_column = self.column[index]
try_final = self.last[index]
for index2 in range(index + 1, len(self.node_hash)):
if try_final == self.start[index2]:
break
if 'exception' in self.node_hash[index2]:
if try_column == self.column[index2]:
self.try_except[try_node].append(self.node_hash[index2])
elif 'finally' in self.node_hash[index2]:
if try_column == self.column[index2]:
self.try_except[try_node].append(self.node_hash[index2])
for key in self.try_except:
count = len(self.try_except[key])
check = False
if count == 1:
check = 'finally' in self.try_except[key][0]
if count == 2:
check = 'finally' in self.try_except[key][1]
check_structure = False
if check:
element = self.node_hash.index(key)
last = SyntaxWrite().getIndexArray(self.last[element], self.start)
self.provenance.edge(key, self.try_except[key][1])
if 'exception' in self.try_except[key][count - 2]:
check_structure = True
exception_node = self.try_except[key][count - 2]
element = self.node_hash.index(exception_node) - 1
self.provenance.edge(self.node_hash[element], self.try_except[key][1])
self.visited_x.append(self.node_hash[element])
element = self.node_hash.index(key)
last = SyntaxWrite().getIndexArray(self.last[element], self.start)
self.provenance.edge(key, self.node_hash[last + 1], style='dashed')
if check_structure:
self.visited_y.append(self.node_hash[last + 1])
def limited_Class(self):
for index, node_class in enumerate(self.class_def_name):
indexLast = SyntaxWrite().getIndexArray(self.last[index], self.start)
def limited(self):
for index, node in enumerate(self.node_hash):
if 'function_def' in node:
index_start = index
index_final = SyntaxWrite().getIndexArray(self.last[index],
self.start)
for j in range(index_start, index_final + 1):
self.def_list[j] = node
def create_Hash_Code(self, x, y, z):
return '{}{}{}'.format(x, y, z)
def create_All_Nodes(self, rows, connecting):
syntax = Nodes()
map = HashingMapper()
nodes = GraphDrawer(self.provenance)
self.start_node()
count_class = 0
count_def = 0
count_loop = 0
count_if = 0
count_try = 0
for codes in rows:
check = False
startLine = codes[0]
finalLine = codes[1]
typesLine = codes[2]
blockLine = codes[3]
columLine = codes[4]
nodesHash = self.create_Hash_Code(startLine,
typesLine,
finalLine)
label = map.getElement('label',
startLine,
blockLine)
if nodesHash not in self.node_hash:
if typesLine in SyntaxWrite().getOthers():
check, self.provenance = nodes.assign(nodesHash, label)
elif typesLine == 'class_def':
count_class = count_class + 1
self.class_def_name.append(blockLine)
self.class_def_start.append(startLine)
self.class_def_final.append(finalLine)
elif SyntaxWrite().getCall(typesLine, blockLine):
self.call[nodesHash].append(blockLine)
check, self.provenance = nodes.calls(nodesHash, label)
elif typesLine == 'import':
check, self.provenance = nodes.imports(nodesHash, label)
elif typesLine == 'return':
check, self.provenance = nodes.calls(nodesHash, label)
elif typesLine == 'function_def':
count_def = count_def + 1
self.defs[nodesHash].append(blockLine)
args = self.arguments_selection(connecting, startLine)
text = map.getElement('function', startLine, blockLine, args)
check, self.provenance = nodes.calls(nodesHash, text)
elif typesLine in SyntaxWrite().getLoop():
count_loop = count_loop + 1
array = blockLine.split('\n')
condition = syntax.loops(typesLine, array[0])
text = map.getElement('label', startLine, typesLine)
check, self.provenance = nodes.loops(nodesHash, text, condition)
elif typesLine == 'if':
count_if = count_if + 1
array = blockLine.split('\n')
if 'elif' in array[0]:
nodesHash = self.create_Hash_Code(startLine, 'elif', columLine)
text = map.getElement('label', startLine, 'elif')
check, self.provenance = nodes.condition(nodesHash, text,
syntax.statementIf(array[0]))
else:
text = map.getElement('label', startLine, 'if')
check, self.provenance = nodes.condition(nodesHash, text,
syntax.statementIf(array[0]))
elif typesLine in SyntaxWrite().getTry() or blockLine == 'finally:':
count_try = count_try + 1
if typesLine == 'try':
text = map.getElement('label', startLine, 'try')
check, self.provenance = nodes.exceptions(nodesHash, text)
elif blockLine == 'finally:':
nodesHash = self.create_Hash_Code(startLine, 'finally', columLine)
text = map.getElement('label', startLine, 'finally')
check, self.provenance = nodes.calls(nodesHash, text)
elif typesLine == 'exception':
text = map.getElement('label', startLine, 'except')
check, self.provenance = nodes.exceptions(nodesHash, text)
elif 'else:' == blockLine:
nodesHash = self.create_Hash_Code(startLine, 'else', columLine)
text = map.getElement('label', startLine, 'else')
check, self.provenance = nodes.calls(nodesHash, text)
if check:
Dict = {str(columLine): nodesHash}
self.arrayHashing[startLine].append(Dict)
generic = '{}name{}'.format(startLine,
columLine)
element = '{}{}'.format(startLine, columLine)
self.hash_index.append(int(element))
self.generic_hash.append(generic)
self.block.append(blockLine)
self.last.append(finalLine)
self.start.append(startLine)
self.column.append(columLine)
self.node_else.append(None)
self.node_if.append(None)
self.node_hash.append(nodesHash)
self.type.append(typesLine)
def create_Function_List(self):
self.def_list = [None for i in self.node_hash]
self.limited()
def create_Boxes_List(self):
self.createBoxInClass()
self.createBoxesInFunctions()
def create_Array_List(self):
self.class_list = ['Main' for i in self.node_hash]
self.node_end_for = [None for i in self.node_hash]
self.node_for = [None for i in self.node_hash]
self.node_for_sup = [None for i in self.node_hash]
self.singleIF = [None for i in self.node_hash]
def create_Rules_List(self):
self.syntaxRulesIF()
self.syntaxRulesFOR()
self.syntaxRulesTRY()
def componentAnalyzer(self, sqlite, connecting, rows, data_set):
self.create_All_Nodes(rows, connecting)
self.create_Global_End_Node()
self.format_column()
self.create_Function_List()
self.create_Boxes_List()
self.create_Array_List()
self.create_Rules_List()
self.create_Function_End_List()
self.edge_Definition_and_Calls()
self.create_Elif_List()
self.edge_Back_in_Loops()
if data_set['activations_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
None,
None,
self.arrayHashing)
execution.activations_provenance()
if data_set['contents_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
self.column,
self.generic_hash,
self.arrayHashing)
execution.contents_provenance()
if data_set['checkpoints_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
self.column,
self.generic_hash,
self.arrayHashing)
execution.runtime_provenance()
self.getPointCode()
if data_set['indented'][0]:
self.indented_label()
self.verify_function_check()
self.show()
self.linking_nodes_graph()
self.provenance.view()
| from collections import defaultdict
from capture.build.ConditionNodes import Nodes
from capture.build.GraphDrawer import GraphDrawer
from capture.build.Graphviz import Graphviz
from capture.build.HashingMapper import HashingMapper
from capture.build.SyntaxWrite import SyntaxWrite
from .ExecutionProvenanceCollector import ExecutionProvenance
from capture.collector.ExperimentDataCollector import ExperimentDataCollector
class DefinitionProvenanceAnalyzer:
def __init__(self, trial):
self.provenance = Graphviz(trial).initialize()
self.def_list = []
self.call = defaultdict(list)
self.defs = defaultdict(list)
self.visited_x = []
self.visited_y = []
self.def_method = defaultdict(list)
self.class_method = []
self.class_def_name = []
self.class_def_start = []
self.class_def_final = []
self.hash_index = []
self.trial = trial
''' Array List - Try components '''
self.try_except = defaultdict(list)
self.def_function = []
self.singleIF = []
self.generic_hash = []
self.def_function_final = []
self.def_function_after = []
''' Array List - Lines and code components'''
self.class_list = []
self.start = []
self.column = []
self.last = []
self.block = []
self.type = []
''' Array List - All nodes '''
self.node_hash = []
self.node_else = []
self.node_if = []
self.node_for = []
self.node_for_sup = []
self.node_end_for = []
self.arrayHashing = defaultdict(list)
def syntaxRulesIF(self):
"""
Step 1: format_Condition_Node
Step 2: create_Condition_Node()
Step 3: organize_Sequences_IF_ELSE()
Step 4: update_Sequences_IF_ELSE()
"""
""" Detect conditional structures within others """
self.format_Condition_Node()
""" Link conditional structures that belong to them """
self.create_Condition_Node()
""" Arrange and link IF and ELSE sequences """
self.organize_Sequences_IF_ELSE()
""" Report conditional items already covered and rearranged """
self.update_Sequences_IF_ELSE()
def syntaxRulesFOR(self):
"""
Step 1: get_Call_Back()
Step 2: get_Call_Ends()
Step 3: get_Last_Loop()
"""
""" Create a back edge within a loop [WHILE, FOR] """
self.get_Call_Back()
""" Create an ending edge within a loop [WHILE, FOR] """
self.get_Call_Ends()
""" Create and check for a next loop [WHILE, FOR] in sequence """
self.get_Last_Loop()
def syntaxRulesTRY(self):
"""
Step 1: check_Try()
Step 2: getCallTryException()
"""
self.check_Try()
self.getCallTryException()
def show(self):
# print('i|INDEX|START|LAST|NODE-HASH|NODE-ELSE|NODE-IF|NODE-END-FOR|NODE-FOR|COLUMN| DEF-LIST|\n')
for i in range(0, len(self.node_hash)):
string = '{}|{}* {} {} {} {} {} {} {} {} {} '.format(i, self.hash_index[i], self.start[i], self.last[i],
self.node_hash[i], self.node_else[i], self.node_if[i],
self.node_end_for[i], self.node_for[i], self.column[i],
self.def_list[i])
# print(string)
def createBoxesInFunctions(self):
"""
Create a boundary for the representation of functions.
Here, all content within functions is bounded by boxes, just as in classes.
"""
for index, node in enumerate(self.node_hash):
if 'function_def' in node:
border = SyntaxWrite().getIndexArray(self.last[index], self.start)
for index_2 in range(index, border + 1):
nameBoxes = 'cluster{}'.format(index)
with self.provenance.subgraph(name=nameBoxes) as subgroupA:
nodes = self.node_hash[index_2]
subgroupA.attr(style='dashed')
subgroupA.node(nodes)
condition = ['for', 'while', 'if', 'elif']
if any(x in nodes for x in condition):
subgroupA.node(nodes + 'c')
def createBoxInClass(self):
for cName, cStart, cLast in zip(self.class_def_name,
self.class_def_start,
self.class_def_final):
for index, nodes in enumerate(self.node_hash):
show_name = True
startNode = self.start[index]
getLimitX = startNode >= cStart
getLimitY = startNode <= cLast
if getLimitX and getLimitY:
nameCluster = 'cluster{}'.format(cName)
with self.provenance.subgraph(name=nameCluster) as subgroupA:
if show_name:
nameClass = 'Class {}'.format(cName)
subgroupA.attr(label=nameClass, style='dashed')
show_name = False
if 'function_def' in nodes:
functionStart = self.start[index]
functionFinal = self.last[index]
indexStart = index
functionName = 'cluster{}'.format(nodes)
with subgroupA.subgraph(name=functionName) as subgroupB:
subgroupA.attr(label='', style='dashed')
while True:
getLimitX = self.start[indexStart] >= functionStart
getLimitY = self.start[indexStart] <= functionFinal
if getLimitX and getLimitY:
subgroupB.node(self.node_hash[indexStart])
else:
break
indexStart = indexStart + 1
else:
subgroupA.node(self.node_hash[index])
def verify_function_check(self):
for index, node in enumerate(self.node_hash):
node_else = self.node_else[index]
node_function = self.def_list[index]
null_check = node_else is not None and node_function is not None
if node_else != node_function and null_check:
#self.node_else[index] = 'end{}'.format(node_function)
self.node_else[index] = node_function
def indented_label(self):
for index, node in enumerate(self.node_hash):
string_column = '{' + str(self.column[index]) + '}'
self.provenance.node(node, xlabel=string_column)
def format_column(self):
keys = defaultdict(list)
for index, item in enumerate(self.start):
keys[item].append(index)
for values in keys:
if len(keys[values]) > 1:
min_column = self.column[min(keys[values])]
for element in keys[values]:
self.column[element] = min_column
def linking_nodes_graph(self):
hash_loop = []
for i in range(1, len(self.node_hash) - 1):
current = self.node_hash[i]
next_node = self.node_hash[i + 1]
''' Visited node X (Any node)'''
visitX = (current not in self.visited_x)
''' Visited node Y (Any node)'''
visitY = (next_node not in self.visited_y)
''' Visited node Z (only loop node)'''
visitZ = (current not in hash_loop)
checking = visitX and visitY and visitZ
'''
Check limit in def function
if self.def_list[i] != None:
limite_1 = self.node_hash.index(self.def_list[i])
limite = self.last[limite_1]
check_return = self.start[i + 1] <= limite
'''
if checking:
if (('if' not in current) and ('else' not in current)) and (
(self.node_else[i] != None) and (self.node_for[i] != None)):
self.provenance.edge(current, self.node_for[i], style="dashed")
elif current in self.def_function_after:
continue
elif 'function_def' in next_node:
if next_node in self.def_function:
index_def_node = self.def_function.index(next_node)
self.provenance.edge(current, self.def_function_final[index_def_node])
elif 'if' in current:
self.provenance.edge(current, next_node, label=' True')
if '*' in self.node_else[i]:
hash_string = self.node_else[i]
if self.node_for[i] is not None:
hash_for = self.node_for[i]
item_false = self.node_hash.index(hash_string[0:len(hash_string) - 1])
if self.node_for[i] != self.node_for[item_false]:
self.provenance.edge(current, self.node_for[i], label=' False')
else:
self.provenance.edge(current, hash_string[0: len(hash_string) - 1], label=' False')
'''
elif '-' in self.node_else[i]:
lastNode = SyntaxWrite().getIndexArray(self.last[i], self.start)
self.provenance.edge(current, self.node_hash[lastNode], label=' False')
'''
elif 'try' in current:
self.provenance.edge(current, next_node)
elif 'exception' in current:
self.provenance.edge(self.node_if[i], self.node_hash[i])
self.provenance.edge(current, next_node)
elif 'else' in current:
self.provenance.edge(self.node_if[i], self.node_hash[i], label=' False')
self.provenance.edge(current, next_node)
elif 'for' in current or 'while' in current:
self.provenance.edge(current, next_node)
else:
if self.node_else[i] is None:
self.provenance.edge(current, next_node)
else:
self.provenance.edge(current, self.node_else[i])
self.createBoxesInFunctions()
def create_Global_End_Node(self):
self.node_hash.append('end')
self.node_else.append(None)
self.node_if.append(None)
self.last.append(self.last[-1] + 1)
self.last.append(self.last[-1] + 1)
self.start.append(self.last[-1] + 1)
self.column.append(0)
self.block.append('End')
self.type.append('end-code')
self.provenance.node('end', label='End')
element = '{}{}'.format(self.last[-1] + 1, 0)
self.hash_index.append(int(element))
self.generic_hash.append('{}name{}'.format(self.last[-1] + 1, 0))
return True
def start_node(self):
self.node_hash.append('start')
self.node_else.append(None)
self.node_if.append(None)
self.last.append(0)
self.start.append(0)
self.column.append(0)
self.block.append('Start')
self.type.append('start-code')
self.hash_index.append(0)
self.provenance.node('start', label='Start')
self.generic_hash.append('{}name{}'.format(0, 0))
return True
def arguments_selection(self, conn, start):
sqlite = ExperimentDataCollector(self.trial, conn)
return sqlite.selection_args(start)
def getCallTryException(self):
try:
for i in range(0, len(self.node_hash) - 1):
current = self.node_hash[i]
if 'try' in current:
intervalo_final = self.last[i]
intervalo_start = i
line_exception = -1
while True:
if intervalo_final <= self.start[intervalo_start]:
self.node_else[i] = self.node_hash[intervalo_start + 1]
break
if 'exception' in self.node_hash[intervalo_start]:
self.node_if[intervalo_start] = current
line_exception = intervalo_start
intervalo_start = intervalo_start + 1
if line_exception != -1:
self.node_else[line_exception - 1] = self.node_else[i]
except:
print('Error in update 4!')
def create_Condition_Node(self):
"""
insert tag in Condition Elements [node_if, node_elif, node_else]
dependencies: all, DefinitionProvenance Class
return: None
"""
for index, node in enumerate(self.node_hash):
if 'if' in node:
lastLoop = SyntaxWrite().getIndexArray(self.last[index], self.start)
for key in range(index + 1, lastLoop):
checkColumn = self.column[index] == self.column[key]
checkPosition = self.start[key] < self.last[index]
if SyntaxWrite().compareCondition(self.node_hash[key]):
if checkColumn and checkPosition:
self.node_if[key] = node
self.node_else[index] = self.node_hash[key]
self.last[key] = self.last[index]
def organize_Sequences_IF_ELSE(self):
"""
This method rearranges all conditions by binding them to their
respective blocks of code.
return: None
"""
for index, node in enumerate(self.node_hash):
there_Element = self.node_else[index] is not None
if there_Element and 'if' in node and 'else' in self.node_else[index]:
this_item = self.node_hash.index(self.node_else[index]) - 1
next_item = self.node_hash.index(self.node_else[index])
while True:
if 'else' in self.node_hash[next_item]:
node_else = self.node_else[next_item]
next_item = self.node_hash.index(node_else)
else:
self.node_else[this_item] = self.node_hash[next_item]
break
def format_Condition_Node(self):
"""
return: None
"""
""" This item checks whether a given IF has any ELSE related to it """
for key, node in enumerate(self.node_hash):
if SyntaxWrite().compareCondition(node):
idx = key + 1
while True:
if self.last[key] <= self.start[idx]:
self.node_else[key - 1] = self.node_hash[idx + 1]
self.node_else[key] = self.node_hash[idx + 1]
break
idx = idx + 1
""" This item checks already connected nodes and formats incorrectly linked nodes """
for key, node in enumerate(self.node_hash):
if SyntaxWrite().compareCondition(node):
lastElse = SyntaxWrite().getIndexArray(self.last[key], self.start)
lastNode = self.node_hash[lastElse]
value = lastElse
if SyntaxWrite().compareCondition(lastNode):
while True:
if SyntaxWrite().compareCondition(lastNode):
lastElse = SyntaxWrite().getIndexArray(self.last[lastElse], self.start)
value = lastElse - 1
lastNode = self.node_hash[lastElse]
break
else:
else_ = SyntaxWrite().getIndexArray(self.last[value], self.start)
self.provenance.edge(self.node_hash[else_], lastNode)
self.visited_x.append(self.node_hash[else_])
self.visited_y.append(lastNode)
self.node_else[else_] = lastNode
break
else:
self.node_else[value] = lastNode
else_ = SyntaxWrite().getIndexArray(self.last[value], self.start)
self.provenance.edge(self.node_hash[else_], lastNode)
def get_Call_Ends(self):
"""
Get the loop boundaries, ie the end nodes and return nodes.
dependencies: all, DefinitionProvenance class
return: None
"""
visitedArray = []
for index, currentNode in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(currentNode):
nodeLoop = SyntaxWrite().getObjectArray(self.last[index], self.start)
nodeBack = self.node_hash[nodeLoop]
if nodeBack not in visitedArray:
visitedArray.append(nodeBack)
there_Element = self.node_for[nodeLoop] is not None
check_Element = self.node_for[nodeLoop] != currentNode
if there_Element and check_Element:
nodeNext = self.node_for[self.getReturnLoop(nodeLoop)]
self.provenance.edge(nodeBack, nodeNext, style='dashed')
def get_Call_Back(self):
"""
get the link back between the loop nodes
dependencies: all, DefinitionProvenance class
return: None
"""
visitedArray = []
for index in reversed(range(len(self.start))):
currentNode = self.node_hash[index]
if SyntaxWrite().compareLoop(currentNode):
linkedBack = SyntaxWrite().getObjectArray(self.last[index], self.start)
if linkedBack is not None and self.node_hash[linkedBack] not in visitedArray:
self.provenance.edge(self.node_hash[linkedBack], currentNode, style='dashed')
visitedArray.append(self.node_hash[linkedBack])
else:
continue
def edge_Back_in_Loops(self):
def return_object(element):
object: int = -1
for index in range(len(self.start) - 1, -1, -1):
if element == self.start[index]:
object = index
break
return object
for index, item in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(item):
if self.last[index] in self.start:
index_loop = return_object(self.last[index])
for k in range(index, index_loop + 1):
self.node_for[k] = item
def get_Last_Loop(self):
for index, item in enumerate(self.node_hash):
if SyntaxWrite().compareLoop(item):
columnNode = self.column[index]
check = False
if self.last[index] in self.start:
lastNode = SyntaxWrite().getIndexArray(self.last[index], self.start)
if lastNode + 1 == len(self.node_hash):
lastNode -= 1
if columnNode == self.column[lastNode + 1]:
self.node_end_for[index] = self.node_hash[lastNode + 1]
self.provenance.edge(item, self.node_hash[lastNode + 1],
label=" End Loop")
check = True
self.visited_x.append(self.node_hash[lastNode])
self.visited_y.append(self.node_hash[lastNode + 1])
else:
indexNode = lastNode
while True:
if indexNode == 0:
break
else:
if SyntaxWrite.compareLoop(self.node_hash[indexNode]):
check_column = self.column[indexNode] < columnNode
if check_column:
self.provenance.edge(item, self.node_hash[indexNode],
label=" End Loop")
check = True
self.node_end_for[index] = self.node_hash[indexNode]
break
indexNode = indexNode - 1
# print(check)
def update_Sequences_IF_ELSE(self):
"""
This method appends a string to nodes that will be
ignored in the binding method.
return: None
"""
for key, node in enumerate(self.node_hash):
if 'if' in node and self.node_else[key] is None:
node_if = SyntaxWrite().getIndexArray(self.last[key], self.start)
if 'else' in self.node_hash[node_if + 1]:
self.node_else[key] = '{}*'.format(self.node_else[node_if + 1])
else:
self.node_else[key] = '{}*'.format(self.node_hash[node_if + 1])
def edge_Definition_and_Calls(self):
"""
This function is only enabled when there is a function in the script schema.
Link function definition with their respective calls.
return: None
"""
for keyDef in self.defs:
# self.provenance.node('start' + keyDef, 'Start', shape='Msquare')
# self.provenance.edge('start' + keyDef, keyDef)
for keyCall in self.call:
nameDef = self.defs[keyDef][0]
nameCall = self.call[keyCall][0]
if nameCall.find(nameDef) != -1:
#self.provenance.edge(keyCall, 'start' + keyDef, style='dashed')
self.provenance.edge(keyCall, keyDef, style='dashed')
def create_Elif_List(self):
for index, node in enumerate(self.node_hash):
if 'if' in node or 'elif' in node:
id_node = index
column = self.column[index]
for k in range(index + 1, len(self.node_hash)):
if 'elif' in self.node_hash[k] and column == self.column[k]:
self.node_else[id_node] = self.node_hash[k]
if self.def_list[id_node] == None:
self.provenance.edge(node, self.node_hash[k], label=' False')
else:
if self.def_list[id_node] == self.def_list[k]:
self.provenance.edge(node, self.node_hash[k], label=' False')
break
def create_Function_End_List(self):
for i in range(0, len(self.node_hash)):
if 'function_def' in self.node_hash[i]:
end_index = SyntaxWrite().getIndexArray(self.last[i], self.start)
self.def_function.append(self.node_hash[i])
self.def_function_final.append(self.node_hash[end_index + 1])
self.def_function_after.append(self.node_hash[end_index])
def getPointCode(self):
for index, node in enumerate(self.node_hash):
if self.def_list[index] is None and 'start' not in node:
self.provenance.edge('start', node)
break
def check_Try(self):
for index, node in enumerate(self.node_hash):
if 'try' in node:
try_node = node
try_column = self.column[index]
try_final = self.last[index]
for index2 in range(index + 1, len(self.node_hash)):
if try_final == self.start[index2]:
break
if 'exception' in self.node_hash[index2]:
if try_column == self.column[index2]:
self.try_except[try_node].append(self.node_hash[index2])
elif 'finally' in self.node_hash[index2]:
if try_column == self.column[index2]:
self.try_except[try_node].append(self.node_hash[index2])
for key in self.try_except:
count = len(self.try_except[key])
check = False
if count == 1:
check = 'finally' in self.try_except[key][0]
if count == 2:
check = 'finally' in self.try_except[key][1]
check_structure = False
if check:
element = self.node_hash.index(key)
last = SyntaxWrite().getIndexArray(self.last[element], self.start)
self.provenance.edge(key, self.try_except[key][1])
if 'exception' in self.try_except[key][count - 2]:
check_structure = True
exception_node = self.try_except[key][count - 2]
element = self.node_hash.index(exception_node) - 1
self.provenance.edge(self.node_hash[element], self.try_except[key][1])
self.visited_x.append(self.node_hash[element])
element = self.node_hash.index(key)
last = SyntaxWrite().getIndexArray(self.last[element], self.start)
self.provenance.edge(key, self.node_hash[last + 1], style='dashed')
if check_structure:
self.visited_y.append(self.node_hash[last + 1])
def limited_Class(self):
for index, node_class in enumerate(self.class_def_name):
indexLast = SyntaxWrite().getIndexArray(self.last[index], self.start)
def limited(self):
for index, node in enumerate(self.node_hash):
if 'function_def' in node:
index_start = index
index_final = SyntaxWrite().getIndexArray(self.last[index],
self.start)
for j in range(index_start, index_final + 1):
self.def_list[j] = node
def create_Hash_Code(self, x, y, z):
return '{}{}{}'.format(x, y, z)
def create_All_Nodes(self, rows, connecting):
syntax = Nodes()
map = HashingMapper()
nodes = GraphDrawer(self.provenance)
self.start_node()
count_class = 0
count_def = 0
count_loop = 0
count_if = 0
count_try = 0
for codes in rows:
check = False
startLine = codes[0]
finalLine = codes[1]
typesLine = codes[2]
blockLine = codes[3]
columLine = codes[4]
nodesHash = self.create_Hash_Code(startLine,
typesLine,
finalLine)
label = map.getElement('label',
startLine,
blockLine)
if nodesHash not in self.node_hash:
if typesLine in SyntaxWrite().getOthers():
check, self.provenance = nodes.assign(nodesHash, label)
elif typesLine == 'class_def':
count_class = count_class + 1
self.class_def_name.append(blockLine)
self.class_def_start.append(startLine)
self.class_def_final.append(finalLine)
elif SyntaxWrite().getCall(typesLine, blockLine):
self.call[nodesHash].append(blockLine)
check, self.provenance = nodes.calls(nodesHash, label)
elif typesLine == 'import':
check, self.provenance = nodes.imports(nodesHash, label)
elif typesLine == 'return':
check, self.provenance = nodes.calls(nodesHash, label)
elif typesLine == 'function_def':
count_def = count_def + 1
self.defs[nodesHash].append(blockLine)
args = self.arguments_selection(connecting, startLine)
text = map.getElement('function', startLine, blockLine, args)
check, self.provenance = nodes.calls(nodesHash, text)
elif typesLine in SyntaxWrite().getLoop():
count_loop = count_loop + 1
array = blockLine.split('\n')
condition = syntax.loops(typesLine, array[0])
text = map.getElement('label', startLine, typesLine)
check, self.provenance = nodes.loops(nodesHash, text, condition)
elif typesLine == 'if':
count_if = count_if + 1
array = blockLine.split('\n')
if 'elif' in array[0]:
nodesHash = self.create_Hash_Code(startLine, 'elif', columLine)
text = map.getElement('label', startLine, 'elif')
check, self.provenance = nodes.condition(nodesHash, text,
syntax.statementIf(array[0]))
else:
text = map.getElement('label', startLine, 'if')
check, self.provenance = nodes.condition(nodesHash, text,
syntax.statementIf(array[0]))
elif typesLine in SyntaxWrite().getTry() or blockLine == 'finally:':
count_try = count_try + 1
if typesLine == 'try':
text = map.getElement('label', startLine, 'try')
check, self.provenance = nodes.exceptions(nodesHash, text)
elif blockLine == 'finally:':
nodesHash = self.create_Hash_Code(startLine, 'finally', columLine)
text = map.getElement('label', startLine, 'finally')
check, self.provenance = nodes.calls(nodesHash, text)
elif typesLine == 'exception':
text = map.getElement('label', startLine, 'except')
check, self.provenance = nodes.exceptions(nodesHash, text)
elif 'else:' == blockLine:
nodesHash = self.create_Hash_Code(startLine, 'else', columLine)
text = map.getElement('label', startLine, 'else')
check, self.provenance = nodes.calls(nodesHash, text)
if check:
Dict = {str(columLine): nodesHash}
self.arrayHashing[startLine].append(Dict)
generic = '{}name{}'.format(startLine,
columLine)
element = '{}{}'.format(startLine, columLine)
self.hash_index.append(int(element))
self.generic_hash.append(generic)
self.block.append(blockLine)
self.last.append(finalLine)
self.start.append(startLine)
self.column.append(columLine)
self.node_else.append(None)
self.node_if.append(None)
self.node_hash.append(nodesHash)
self.type.append(typesLine)
def create_Function_List(self):
self.def_list = [None for i in self.node_hash]
self.limited()
def create_Boxes_List(self):
self.createBoxInClass()
self.createBoxesInFunctions()
def create_Array_List(self):
self.class_list = ['Main' for i in self.node_hash]
self.node_end_for = [None for i in self.node_hash]
self.node_for = [None for i in self.node_hash]
self.node_for_sup = [None for i in self.node_hash]
self.singleIF = [None for i in self.node_hash]
def create_Rules_List(self):
self.syntaxRulesIF()
self.syntaxRulesFOR()
self.syntaxRulesTRY()
def componentAnalyzer(self, sqlite, connecting, rows, data_set):
self.create_All_Nodes(rows, connecting)
self.create_Global_End_Node()
self.format_column()
self.create_Function_List()
self.create_Boxes_List()
self.create_Array_List()
self.create_Rules_List()
self.create_Function_End_List()
self.edge_Definition_and_Calls()
self.create_Elif_List()
self.edge_Back_in_Loops()
if data_set['activations_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
None,
None,
self.arrayHashing)
execution.activations_provenance()
if data_set['contents_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
self.column,
self.generic_hash,
self.arrayHashing)
execution.contents_provenance()
if data_set['checkpoints_v'][0]:
execution = ExecutionProvenance(self.trial,
self.provenance,
self.node_hash,
self.start,
data_set,
sqlite,
self.column,
self.generic_hash,
self.arrayHashing)
execution.runtime_provenance()
self.getPointCode()
if data_set['indented'][0]:
self.indented_label()
self.verify_function_check()
self.show()
self.linking_nodes_graph()
self.provenance.view() | en | 0.639825 | Array List - Try components Array List - Lines and code components Array List - All nodes Step 1: format_Condition_Node
Step 2: create_Condition_Node()
Step 3: organize_Sequences_IF_ELSE()
Step 4: update_Sequences_IF_ELSE() Detect conditional structures within others Link conditional structures that belong to them Arrange and link IF and ELSE sequences Report conditional items already covered and rearranged Step 1: get_Call_Back()
Step 2: get_Call_Ends()
Step 3: get_Last_Loop() Create a back edge within a loop [WHILE, FOR] Create an ending edge within a loop [WHILE, FOR] Create and check for a next loop [WHILE, FOR] in sequence Step 1: check_Try()
Step 2: getCallTryException() # print('i|INDEX|START|LAST|NODE-HASH|NODE-ELSE|NODE-IF|NODE-END-FOR|NODE-FOR|COLUMN| DEF-LIST|\n') # print(string) Create a boundary for the representation of functions.
Here, all content within functions is bounded by boxes, just as in classes. #self.node_else[index] = 'end{}'.format(node_function) Visited node X (Any node) Visited node Y (Any node) Visited node Z (only loop node) Check limit in def function
if self.def_list[i] != None:
limite_1 = self.node_hash.index(self.def_list[i])
limite = self.last[limite_1]
check_return = self.start[i + 1] <= limite elif '-' in self.node_else[i]:
lastNode = SyntaxWrite().getIndexArray(self.last[i], self.start)
self.provenance.edge(current, self.node_hash[lastNode], label=' False') insert tag in Condition Elements [node_if, node_elif, node_else]
dependencies: all, DefinitionProvenance Class
return: None This method rearranges all conditions by binding them to their
respective blocks of code.
return: None return: None This item checks whether a given IF has any ELSE related to it This item checks already connected nodes and formats incorrectly linked nodes Get the loop boundaries, ie the end nodes and return nodes.
dependencies: all, DefinitionProvenance class
return: None get the link back between the loop nodes
dependencies: all, DefinitionProvenance class
return: None # print(check) This method appends a string to nodes that will be
ignored in the binding method.
return: None This function is only enabled when there is a function in the script schema.
Link function definition with their respective calls.
return: None # self.provenance.node('start' + keyDef, 'Start', shape='Msquare') # self.provenance.edge('start' + keyDef, keyDef) #self.provenance.edge(keyCall, 'start' + keyDef, style='dashed') | 2.148962 | 2 |
test/output/071.py | EliRibble/pyfmt | 0 | 6620475 | <filename>test/output/071.py
import typing
def hello() -> typing.Optional[int]:
pass
| <filename>test/output/071.py
import typing
def hello() -> typing.Optional[int]:
pass
| none | 1 | 1.704903 | 2 | |
dHydra/Vendor/Xueqiu/const.py | yuzhucu/dHydra | 535 | 6620476 | # -*- coding: utf-8 -*-
"""
Const
Created on 02/22/2016
@description: Used for
@author: <NAME>
@contact: <EMAIL>
"""
# 股票指数与代码的转换表(无需修改)
INDEX_LABELS = ['sh', 'sz', 'hs300', 'sz50', 'cyb', 'zxb', 'zx300', 'zh500']
INDEX_LIST = {'sh': 'sh000001', 'sz': 'sz399001', 'hs300': 'sz399300',
'sz50': 'sh000016', 'zxb': 'sz399005', 'cyb': 'sz399006', 'zx300': 'sz399008', 'zh500':'sh000905'}
# 雪球常数
EX_NAME = {
'sha' : '沪A'
, 'shb' : '沪B'
, 'sza' : '深A'
, 'szb' : '深B'
, 'zxb' : '中小板'
, 'cyb' : '创业板'
} | # -*- coding: utf-8 -*-
"""
Const
Created on 02/22/2016
@description: Used for
@author: <NAME>
@contact: <EMAIL>
"""
# 股票指数与代码的转换表(无需修改)
INDEX_LABELS = ['sh', 'sz', 'hs300', 'sz50', 'cyb', 'zxb', 'zx300', 'zh500']
INDEX_LIST = {'sh': 'sh000001', 'sz': 'sz399001', 'hs300': 'sz399300',
'sz50': 'sh000016', 'zxb': 'sz399005', 'cyb': 'sz399006', 'zx300': 'sz399008', 'zh500':'sh000905'}
# 雪球常数
EX_NAME = {
'sha' : '沪A'
, 'shb' : '沪B'
, 'sza' : '深A'
, 'szb' : '深B'
, 'zxb' : '中小板'
, 'cyb' : '创业板'
} | zh | 0.38395 | # -*- coding: utf-8 -*- Const Created on 02/22/2016 @description: Used for @author: <NAME> @contact: <EMAIL> # 股票指数与代码的转换表(无需修改) # 雪球常数 | 1.920534 | 2 |
main.py | JTaeger/pomodoro_clock | 0 | 6620477 | <filename>main.py
from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 8
ticks = 0
timer = NONE
# ---------------------------- TIMER RESET ------------------------------- #
def reset():
global timer
window.after_cancel(timer)
check_label.config(text="")
name_label.config(text="Timer", fg=GREEN)
canvas.itemconfig(count_text, text="00:00")
global reps
global ticks
reps = 8
ticks = 0
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
global ticks
if reps % 2 == 1 and reps != 1:
window.lift()
window.attributes("-topmost", True)
window.attributes("-topmost", False)
ticks += 1
name_label.config(text="Short Break", fg=PINK)
count_down(SHORT_BREAK_MIN * 60)
check_label.config(text=ticks * "✔")
elif reps % 2 == 0:
name_label.config(text="Work", fg=GREEN)
count_down(WORK_MIN * 60)
elif reps == 1:
window.lift()
window.attributes("-topmost", True)
window.attributes("-topmost", False)
ticks += 1
check_label.config(text=ticks * "✔")
name_label.config(text="Long Break", fg=RED)
count_down(LONG_BREAK_MIN * 60)
if reps == 0:
reset()
reps -= 1
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
minutes = math.floor(count / 60)
seconds = count % 60
if len(str(seconds)) < 2:
seconds = f"0{seconds}"
if len(str(minutes)) < 2:
minutes = f"0{minutes}"
canvas.itemconfig(count_text, text=f"{minutes}:{seconds}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
minutes = count / 60
if count == 0:
start_timer()
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro Clock")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=220, height=230, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 133, image=tomato_img)
count_text = canvas.create_text(
100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold")
)
canvas.grid(column=1, row=1)
name_label = Label(text="Timer", fg=GREEN, font=(FONT_NAME, 35), bg=YELLOW)
name_label.grid(column=1, row=0)
check_label = Label(text="", fg=GREEN, font=(FONT_NAME, 30), bg=YELLOW)
check_label.grid(column=1, row=3)
start_button = Button(text="Start", command=start_timer, highlightbackground=YELLOW)
start_button.grid(column=0, row=2)
reset_button = Button(text="Reset", command=reset, highlightbackground=YELLOW)
reset_button.grid(column=2, row=2)
window.mainloop() | <filename>main.py
from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 8
ticks = 0
timer = NONE
# ---------------------------- TIMER RESET ------------------------------- #
def reset():
global timer
window.after_cancel(timer)
check_label.config(text="")
name_label.config(text="Timer", fg=GREEN)
canvas.itemconfig(count_text, text="00:00")
global reps
global ticks
reps = 8
ticks = 0
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
global ticks
if reps % 2 == 1 and reps != 1:
window.lift()
window.attributes("-topmost", True)
window.attributes("-topmost", False)
ticks += 1
name_label.config(text="Short Break", fg=PINK)
count_down(SHORT_BREAK_MIN * 60)
check_label.config(text=ticks * "✔")
elif reps % 2 == 0:
name_label.config(text="Work", fg=GREEN)
count_down(WORK_MIN * 60)
elif reps == 1:
window.lift()
window.attributes("-topmost", True)
window.attributes("-topmost", False)
ticks += 1
check_label.config(text=ticks * "✔")
name_label.config(text="Long Break", fg=RED)
count_down(LONG_BREAK_MIN * 60)
if reps == 0:
reset()
reps -= 1
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
minutes = math.floor(count / 60)
seconds = count % 60
if len(str(seconds)) < 2:
seconds = f"0{seconds}"
if len(str(minutes)) < 2:
minutes = f"0{minutes}"
canvas.itemconfig(count_text, text=f"{minutes}:{seconds}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
minutes = count / 60
if count == 0:
start_timer()
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro Clock")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=220, height=230, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 133, image=tomato_img)
count_text = canvas.create_text(
100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold")
)
canvas.grid(column=1, row=1)
name_label = Label(text="Timer", fg=GREEN, font=(FONT_NAME, 35), bg=YELLOW)
name_label.grid(column=1, row=0)
check_label = Label(text="", fg=GREEN, font=(FONT_NAME, 30), bg=YELLOW)
check_label.grid(column=1, row=3)
start_button = Button(text="Start", command=start_timer, highlightbackground=YELLOW)
start_button.grid(column=0, row=2)
reset_button = Button(text="Reset", command=reset, highlightbackground=YELLOW)
reset_button.grid(column=2, row=2)
window.mainloop() | en | 0.161368 | # ---------------------------- CONSTANTS ------------------------------- # # ---------------------------- TIMER RESET ------------------------------- # # ---------------------------- TIMER MECHANISM ------------------------------- # # ---------------------------- COUNTDOWN MECHANISM ------------------------------- # # ---------------------------- UI SETUP ------------------------------- # | 3.428763 | 3 |
pathplan/evaluation.py | ThisChessPlayer/LaserVision | 2 | 6620478 | <gh_stars>1-10
'''
Contains all methods for evaluating the performance of a path
'''
import sys, time, os, struct, json, fnmatch
from pathplan.geo import load_shapefile, load_altfile
from shapely.geometry import LineString, Polygon
from shapely.strtree import STRtree
from scipy.interpolate import interp1d
from scipy.integrate import quad
import numpy as np
import json
"""
Utility functions to allow for computing MSE of expected and actual waypoints
of the path when running through simulation or in real time. This file also
includes functions to add noise to waypoints to test. For example, the
default noise function over a mean of 0 and a std of 1 will give a MSE of
around 1 usually.
NOTE: This file uses Generators, Lists, Numpy Arrays interchangely, but
will do conversions from generators to lists to numpy arrays if necessary.
NOTE: This code was written using Python 3 so Python 2 will probably cause
some errors with generators in this file.
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import json
import pyproj
import sys
import math
import types
'''
Returns a list of LineStrings indicating the sections of the
path that intersect with the digital surface map
'''
def calculate_intersections(path, rtree, alts, buf=0):
intersected = []
ls = LineString(path)
tile = rtree.query(ls)
for pot in tile:
inter = pot.intersection(ls)
if not inter.is_empty:
alt = alts[inter.wkt] + buf
for x,y,z in inter.coords:
if z <= alt:
intersected.append(inter)
break
return intersected
def generator_to_list(array):
if isinstance(array, types.GeneratorType):
return list(array)
return array
def to_np_array(array):
if not isinstance(array, np.ndarray):
return np.array(array)
return array
def read_path_from_json(filepath):
"""
Parse a json file containing data points for a path. Expects the file
to have mappings to `longitude`, `latitude`, and `altitude`
Returns:
A generator containing all parsed data points (x=lon, y=lat, z=alt)
"""
X = "longitude"
Y = "latitude"
Z = "altitude"
proj = lambda pt: utm_proj(pt[X], pt[Y])
cartesian = lambda pt: pyproj.transform(wgs84, proj(pt), pt[X], pt[Y], pt[Z])
xyz = lambda pt: np.array(*[cartesian(pt)])
points = json.load(open(filepath))
return map(xyz, points)
def default_noise(val=0):
return val + np.random.normal(0, 1.5)
def gen_noise_points_static(waypoints, noise=lambda x: x + np.random.normal(0, 0.00005)):
"""
Generates a new path by adding a static noise to all points in the
original path; which is done via generator. This is the current
preferred way to generate noisy points from our planned path.
Args:
waypoints - a list of waypoints with each point a np-array
"""
for pt in waypoints:
yield pt + noise(0)
def gen_noise_points(waypoints, noise=default_noise):
""" [Deprecated]
For each point in waypoints, generate a new line perpendicular to it
using point[i] and point[i+1] as the line. Having this line, select
randomly one of the nonzero values on this line and add it to the
original point[i] to generate a new point in space.
"""
UP = np.array([0, 0, 1]) # altitude is stored in z-coordinate
waypoints = map(np.array, waypoints)
past_point = next(waypoints)
for pt in waypoints:
line = pt - past_point
perpendicular = np.cross(line, UP)
noise_line = perpendicular * noise()
yield noise_line + past_point
past_point = pt
yield past_point
def norm(vec):
return np.linalg.norm(vec)
def get_dist_between_points(points, scale=1):
prev = None
for pt in points:
if prev is not None:
yield norm(pt - prev) * scale
prev = pt
def total_dist(path):
return sum(get_dist_between_points(path))
def get_nearest_point_from(pt, list_of_points, set):
# NOTE: Replace with octree/kd-tree for better performance in future:
minlen = sys.float_info.max
minval = None
for other in list_of_points:
if tuple(other) in set:
continue
length = norm(pt - other)
if length < minlen:
minlen = length
minval = other
return minval
def gen_path_via_nearest_points(planned, flown):
used_pts = set()
for pt in planned:
found_pt = get_nearest_point_from(pt, flown, used_pts)
used_pts.add(tuple(found_pt))
yield found_pt
from pathplan.viz import build_distance_lists
def area_between_curves(first, second, max_dist=None):
fx, fy = build_distance_lists(first)
sx, sy = build_distance_lists(second)
if max_dist == None:
max_dist = min(fx[-1], sx[-1])
f1 = interp1d(fx, fy)
f2 = interp1d(sx, sy)
farea, ferror = quad(f1, 0, max_dist)
sarea, serror = quad(f2, 0, max_dist)
return abs(farea - sarea)
def linear_interpolation(xs, ys):
y_interp = interp1d(xs, ys)
new_xs = np.arange(xs[0], xs[-1], abs(xs[0]-xs[-1]) / 1000)
fake_ys = [y_interp(x) for x in new_xs]
return new_xs, fake_ys
def mse(expected, actual):
"""
Mean squared error of expected and actual waypoints.
Args:
expected - A list/generator/np-array of planned waypoints.
actual - The list/generator/np-array of points that we flew to.
Returns:
The mean squared error
"""
fx, fy = build_distance_lists(expected)
sx, sy = build_distance_lists(actual)
exp_interp = np.array(linear_interpolation(fx,fy))
act_interp = np.array(linear_interpolation(sx,sy))
return ((exp_interp - act_interp)**2).sum(axis=1) # avg along columns
def calc_errors_with_gen_noise(filepath, metric=mse):
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points(waypoints))
return metric(expected=waypoints, actual=noise_pts)
def get_individual_stats(name, path):
return "len({0}) = {1}\n{0} total distance: {2}".format(name, len(path), total_dist(np.array(path)))
def get_comparison_stats(p1, p2, name1, name2, metrics=[("Area", area_between_curves), ("SSE", mse)]):
vals = []
for name, metric in metrics:
val = metric(p1, p2)
vals.append('{0} between {1} and {2} = {3}'.format(name, name1, name2,val))
return '\n'.join(vals)
def print_comparison_info(planned, flown, name1="planned", name2="flown", metrics=[("Area", area_between_curves)]):
planned = list(map(to_np_array, planned))
flown = list(map(to_np_array, flown))
print("Path Debug")
print(" len({0}) = {1}".format(name1, len(planned)))
print(" len({0}) = {1}".format(name2, len(flown)))
print(" {0} Path Total distance: {1}".format(name1, total_dist(planned)))
print(" {0} Path Total distance: {1}".format(name2, total_dist(flown)))
for name, metric in metrics:
print(" Error based on {0} = {1}".format(name, metric(planned, flown)))
#def display_two_paths(one, two):
# """
# Args:
# path_one - List of waypoints in format [(x, y, z), (x, y, z), ...]
# path_two - List of waypoints in format [(x, y, z), (x, y, z), ...]
# """
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(*np.array(one).T, 'k-', color='b', linewidth=1.0)
# ax.plot(*np.array(two).T, 'k-', color='r', linewidth=1.0)
# plt.show()
def display_gen_noise_path_with_file(filepath):
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points(waypoints))
display_two_paths(waypoints, noise_pts)
def display_surface_with_file(filepath):
"""
Displays a graph of the error surface between input path and a path
generated by adding some noise to the input path.
Args:
filepath - JSON file containing the path itself
"""
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points_static(waypoints))
display_surface(waypoints, noise_pts)
def main():
planned = list(read_path_from_json("output/path.json"))
flown = read_path_from_json("output/min_alt_2.flight.json")
# NOTE: altitude in output/min_alt_2.flight.json adds 584
flown = list(map(lambda xyz: np.array([xyz[0], xyz[1], xyz[2] - 584.0]), flown))
flown = list(gen_path_via_nearest_points(planned, flown))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.plot(*np.array(planned).T, 'o', color='b')
plt.show()
# print_planned_and_flown_path_debug_info(planned, flown)
# display_surface(planned, flown)
# p = list(read_path_from_json("output/path.json"))
# flown = list(read_path_from_json("output/min_alt_2.flight.json"))
# display_two_paths(p, flown)
# Uncomment to test
# if __name__ == "__main__":
# main()
| '''
Contains all methods for evaluating the performance of a path
'''
import sys, time, os, struct, json, fnmatch
from pathplan.geo import load_shapefile, load_altfile
from shapely.geometry import LineString, Polygon
from shapely.strtree import STRtree
from scipy.interpolate import interp1d
from scipy.integrate import quad
import numpy as np
import json
"""
Utility functions to allow for computing MSE of expected and actual waypoints
of the path when running through simulation or in real time. This file also
includes functions to add noise to waypoints to test. For example, the
default noise function over a mean of 0 and a std of 1 will give a MSE of
around 1 usually.
NOTE: This file uses Generators, Lists, Numpy Arrays interchangely, but
will do conversions from generators to lists to numpy arrays if necessary.
NOTE: This code was written using Python 3 so Python 2 will probably cause
some errors with generators in this file.
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import json
import pyproj
import sys
import math
import types
'''
Returns a list of LineStrings indicating the sections of the
path that intersect with the digital surface map
'''
def calculate_intersections(path, rtree, alts, buf=0):
intersected = []
ls = LineString(path)
tile = rtree.query(ls)
for pot in tile:
inter = pot.intersection(ls)
if not inter.is_empty:
alt = alts[inter.wkt] + buf
for x,y,z in inter.coords:
if z <= alt:
intersected.append(inter)
break
return intersected
def generator_to_list(array):
if isinstance(array, types.GeneratorType):
return list(array)
return array
def to_np_array(array):
if not isinstance(array, np.ndarray):
return np.array(array)
return array
def read_path_from_json(filepath):
"""
Parse a json file containing data points for a path. Expects the file
to have mappings to `longitude`, `latitude`, and `altitude`
Returns:
A generator containing all parsed data points (x=lon, y=lat, z=alt)
"""
X = "longitude"
Y = "latitude"
Z = "altitude"
proj = lambda pt: utm_proj(pt[X], pt[Y])
cartesian = lambda pt: pyproj.transform(wgs84, proj(pt), pt[X], pt[Y], pt[Z])
xyz = lambda pt: np.array(*[cartesian(pt)])
points = json.load(open(filepath))
return map(xyz, points)
def default_noise(val=0):
return val + np.random.normal(0, 1.5)
def gen_noise_points_static(waypoints, noise=lambda x: x + np.random.normal(0, 0.00005)):
"""
Generates a new path by adding a static noise to all points in the
original path; which is done via generator. This is the current
preferred way to generate noisy points from our planned path.
Args:
waypoints - a list of waypoints with each point a np-array
"""
for pt in waypoints:
yield pt + noise(0)
def gen_noise_points(waypoints, noise=default_noise):
""" [Deprecated]
For each point in waypoints, generate a new line perpendicular to it
using point[i] and point[i+1] as the line. Having this line, select
randomly one of the nonzero values on this line and add it to the
original point[i] to generate a new point in space.
"""
UP = np.array([0, 0, 1]) # altitude is stored in z-coordinate
waypoints = map(np.array, waypoints)
past_point = next(waypoints)
for pt in waypoints:
line = pt - past_point
perpendicular = np.cross(line, UP)
noise_line = perpendicular * noise()
yield noise_line + past_point
past_point = pt
yield past_point
def norm(vec):
return np.linalg.norm(vec)
def get_dist_between_points(points, scale=1):
prev = None
for pt in points:
if prev is not None:
yield norm(pt - prev) * scale
prev = pt
def total_dist(path):
return sum(get_dist_between_points(path))
def get_nearest_point_from(pt, list_of_points, set):
# NOTE: Replace with octree/kd-tree for better performance in future:
minlen = sys.float_info.max
minval = None
for other in list_of_points:
if tuple(other) in set:
continue
length = norm(pt - other)
if length < minlen:
minlen = length
minval = other
return minval
def gen_path_via_nearest_points(planned, flown):
used_pts = set()
for pt in planned:
found_pt = get_nearest_point_from(pt, flown, used_pts)
used_pts.add(tuple(found_pt))
yield found_pt
from pathplan.viz import build_distance_lists
def area_between_curves(first, second, max_dist=None):
fx, fy = build_distance_lists(first)
sx, sy = build_distance_lists(second)
if max_dist == None:
max_dist = min(fx[-1], sx[-1])
f1 = interp1d(fx, fy)
f2 = interp1d(sx, sy)
farea, ferror = quad(f1, 0, max_dist)
sarea, serror = quad(f2, 0, max_dist)
return abs(farea - sarea)
def linear_interpolation(xs, ys):
y_interp = interp1d(xs, ys)
new_xs = np.arange(xs[0], xs[-1], abs(xs[0]-xs[-1]) / 1000)
fake_ys = [y_interp(x) for x in new_xs]
return new_xs, fake_ys
def mse(expected, actual):
"""
Mean squared error of expected and actual waypoints.
Args:
expected - A list/generator/np-array of planned waypoints.
actual - The list/generator/np-array of points that we flew to.
Returns:
The mean squared error
"""
fx, fy = build_distance_lists(expected)
sx, sy = build_distance_lists(actual)
exp_interp = np.array(linear_interpolation(fx,fy))
act_interp = np.array(linear_interpolation(sx,sy))
return ((exp_interp - act_interp)**2).sum(axis=1) # avg along columns
def calc_errors_with_gen_noise(filepath, metric=mse):
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points(waypoints))
return metric(expected=waypoints, actual=noise_pts)
def get_individual_stats(name, path):
return "len({0}) = {1}\n{0} total distance: {2}".format(name, len(path), total_dist(np.array(path)))
def get_comparison_stats(p1, p2, name1, name2, metrics=[("Area", area_between_curves), ("SSE", mse)]):
vals = []
for name, metric in metrics:
val = metric(p1, p2)
vals.append('{0} between {1} and {2} = {3}'.format(name, name1, name2,val))
return '\n'.join(vals)
def print_comparison_info(planned, flown, name1="planned", name2="flown", metrics=[("Area", area_between_curves)]):
planned = list(map(to_np_array, planned))
flown = list(map(to_np_array, flown))
print("Path Debug")
print(" len({0}) = {1}".format(name1, len(planned)))
print(" len({0}) = {1}".format(name2, len(flown)))
print(" {0} Path Total distance: {1}".format(name1, total_dist(planned)))
print(" {0} Path Total distance: {1}".format(name2, total_dist(flown)))
for name, metric in metrics:
print(" Error based on {0} = {1}".format(name, metric(planned, flown)))
#def display_two_paths(one, two):
# """
# Args:
# path_one - List of waypoints in format [(x, y, z), (x, y, z), ...]
# path_two - List of waypoints in format [(x, y, z), (x, y, z), ...]
# """
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(*np.array(one).T, 'k-', color='b', linewidth=1.0)
# ax.plot(*np.array(two).T, 'k-', color='r', linewidth=1.0)
# plt.show()
def display_gen_noise_path_with_file(filepath):
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points(waypoints))
display_two_paths(waypoints, noise_pts)
def display_surface_with_file(filepath):
"""
Displays a graph of the error surface between input path and a path
generated by adding some noise to the input path.
Args:
filepath - JSON file containing the path itself
"""
waypoints = list(read_path_from_json(filepath))
noise_pts = list(gen_noise_points_static(waypoints))
display_surface(waypoints, noise_pts)
def main():
planned = list(read_path_from_json("output/path.json"))
flown = read_path_from_json("output/min_alt_2.flight.json")
# NOTE: altitude in output/min_alt_2.flight.json adds 584
flown = list(map(lambda xyz: np.array([xyz[0], xyz[1], xyz[2] - 584.0]), flown))
flown = list(gen_path_via_nearest_points(planned, flown))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.plot(*np.array(planned).T, 'o', color='b')
plt.show()
# print_planned_and_flown_path_debug_info(planned, flown)
# display_surface(planned, flown)
# p = list(read_path_from_json("output/path.json"))
# flown = list(read_path_from_json("output/min_alt_2.flight.json"))
# display_two_paths(p, flown)
# Uncomment to test
# if __name__ == "__main__":
# main() | en | 0.752792 | Contains all methods for evaluating the performance of a path Utility functions to allow for computing MSE of expected and actual waypoints of the path when running through simulation or in real time. This file also includes functions to add noise to waypoints to test. For example, the default noise function over a mean of 0 and a std of 1 will give a MSE of around 1 usually. NOTE: This file uses Generators, Lists, Numpy Arrays interchangely, but will do conversions from generators to lists to numpy arrays if necessary. NOTE: This code was written using Python 3 so Python 2 will probably cause some errors with generators in this file. Returns a list of LineStrings indicating the sections of the path that intersect with the digital surface map Parse a json file containing data points for a path. Expects the file to have mappings to `longitude`, `latitude`, and `altitude` Returns: A generator containing all parsed data points (x=lon, y=lat, z=alt) Generates a new path by adding a static noise to all points in the original path; which is done via generator. This is the current preferred way to generate noisy points from our planned path. Args: waypoints - a list of waypoints with each point a np-array [Deprecated] For each point in waypoints, generate a new line perpendicular to it using point[i] and point[i+1] as the line. Having this line, select randomly one of the nonzero values on this line and add it to the original point[i] to generate a new point in space. # altitude is stored in z-coordinate # NOTE: Replace with octree/kd-tree for better performance in future: Mean squared error of expected and actual waypoints. Args: expected - A list/generator/np-array of planned waypoints. actual - The list/generator/np-array of points that we flew to. Returns: The mean squared error # avg along columns #def display_two_paths(one, two): # """ # Args: # path_one - List of waypoints in format [(x, y, z), (x, y, z), ...] # path_two - List of waypoints in format [(x, y, z), (x, y, z), ...] # """ # fig = plt.figure() # ax = fig.add_subplot(111, projection='3d') # ax.plot(*np.array(one).T, 'k-', color='b', linewidth=1.0) # ax.plot(*np.array(two).T, 'k-', color='r', linewidth=1.0) # plt.show() Displays a graph of the error surface between input path and a path generated by adding some noise to the input path. Args: filepath - JSON file containing the path itself # NOTE: altitude in output/min_alt_2.flight.json adds 584 #ax.plot(*np.array(planned).T, 'o', color='b') # print_planned_and_flown_path_debug_info(planned, flown) # display_surface(planned, flown) # p = list(read_path_from_json("output/path.json")) # flown = list(read_path_from_json("output/min_alt_2.flight.json")) # display_two_paths(p, flown) # Uncomment to test # if __name__ == "__main__": # main() | 2.676155 | 3 |
tests/test_texteditor.py | jpsca/texteditor | 6 | 6620479 | <reponame>jpsca/texteditor<gh_stars>1-10
import os
from unittest.mock import MagicMock
import pytest
import texteditor
from texteditor import EDITOR
def test_EDITOR_used():
os.environ[EDITOR] = "/path/to/myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args():
os.environ[EDITOR] = "/path/to/myeditor --wait"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[1] == "--wait"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args_and_spaces():
os.environ[EDITOR] = "/path\\ to/myeditor --wait -n"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
assert cmd[1] == "--wait"
assert cmd[2] == "-n"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_quoted_cmd():
os.environ[EDITOR] = '"/path to/myeditor" --wait'
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, _ = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
texteditor.run = _run
def test_set_extension():
os.environ[EDITOR] = "myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open(extension="md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith(".md") # the filename
texteditor.run = _run
def test_use_filename():
os.environ[EDITOR] = "myeditor"
texteditor.run = MagicMock()
_run = texteditor.run
texteditor.open(filename="README.md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith("README.md") # the filename
texteditor.run = _run
def test_get_editor():
os.environ[EDITOR] = ""
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0]
def test_no_editor_available():
os.environ[EDITOR] = ""
def find_nothing(_):
return None
_which = texteditor.which
texteditor.which = find_nothing
_run = texteditor.run
texteditor.run = MagicMock()
# inconceivable!
with pytest.raises(RuntimeError):
texteditor.open()
texteditor.which = _which
texteditor.run = _run
| import os
from unittest.mock import MagicMock
import pytest
import texteditor
from texteditor import EDITOR
def test_EDITOR_used():
os.environ[EDITOR] = "/path/to/myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args():
os.environ[EDITOR] = "/path/to/myeditor --wait"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[1] == "--wait"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args_and_spaces():
os.environ[EDITOR] = "/path\\ to/myeditor --wait -n"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
assert cmd[1] == "--wait"
assert cmd[2] == "-n"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_quoted_cmd():
os.environ[EDITOR] = '"/path to/myeditor" --wait'
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, _ = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
texteditor.run = _run
def test_set_extension():
os.environ[EDITOR] = "myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open(extension="md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith(".md") # the filename
texteditor.run = _run
def test_use_filename():
os.environ[EDITOR] = "myeditor"
texteditor.run = MagicMock()
_run = texteditor.run
texteditor.open(filename="README.md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith("README.md") # the filename
texteditor.run = _run
def test_get_editor():
os.environ[EDITOR] = ""
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0]
def test_no_editor_available():
os.environ[EDITOR] = ""
def find_nothing(_):
return None
_which = texteditor.which
texteditor.which = find_nothing
_run = texteditor.run
texteditor.run = MagicMock()
# inconceivable!
with pytest.raises(RuntimeError):
texteditor.open()
texteditor.which = _which
texteditor.run = _run | en | 0.621956 | # the filename # the filename # the filename # the filename # the filename # inconceivable! | 2.635535 | 3 |
test.py | holmdk/IrradianceNet | 2 | 6620480 | """
Script for running inference of IrradianceNet model
"""
# Author: <NAME> <<EMAIL>>
import torch
import numpy as np
import pandas as pd
import json
import argparse
from src.models.optical_flow_functions import optflow_predict
from src.config.config import config_parser
from src.config.str2bool import str2bool
from src.visualization.create_video import create_video
from src.data.IrradianceConverter import IrradianceConverter
from src.data.utils.helper_functions import convert_to_full_res, interpolate_borders
def run_evaluation(data_loader, irradiance_converter, CONFIG):
"""
Run evaluation of IrradianceNet model given arguments from command line
"""
# Load pretrained model
if CONFIG['pretrained_path'] is not None:
if 'ckpt' in CONFIG['pretrained_path']:
model_weights = torch.load(CONFIG['pretrained_path'])['state_dict']
else:
model_weights = torch.load(CONFIG['pretrained_path'])
model_weights = {k.replace('model.', ''): v for k, v in model_weights.items()}
CONFIG['model_arch'].load_state_dict(state_dict=model_weights)
# Instantiate lists for containing performance results
mae = []
rmse = []
mae_sis = []
rmse_sis = []
total_batches = len(data_loader)
with torch.no_grad():
for i, batch in enumerate(data_loader):
print('\nProcessing batch {} out of {}'.format(i, total_batches))
x, y, times = batch
x = x.squeeze(2)
y = y.squeeze()
ts = times.numpy().squeeze()
ts = pd.DataFrame(([pd.to_datetime(ts[x]).values for x in range(ts.shape[0])]))
target_times = ts.iloc[:, - CONFIG['n_steps_ahead']:]
# we only predict two steps ahead
full_output_img = torch.zeros_like(x[:, -2:, :, 0])
full_pred_img = torch.zeros_like(x[:, -2:, :, 0])
target_times = target_times.iloc[:, -2:]
if CONFIG['patch_based']:
img_size = (x.shape[2] * x.shape[4]) // 4
patch_dim = img_size // 128
for patch in range(x.shape[2]):
x_patch = x[:, :, patch]
x_patch = x_patch.permute(0, 1, 3, 4, 2)
y_hat = CONFIG['model_arch'].forward(x_patch.cuda()).squeeze()
y_hat = y_hat[:, -2:]
y = y[:, -2:]
full_pred_img[:, :, patch] = y_hat
full_output_img[:, :, patch] = y[:, :, patch]
pred_Y = convert_to_full_res(full_pred_img, img_size, patch_dim, y.shape)
gt_Y = convert_to_full_res(full_output_img, img_size, patch_dim, y.shape)
if CONFIG['interpolate_borders']:
for b in range(pred_Y.shape[0]):
pred_Y[b] = interpolate_borders(pred_Y[b].squeeze(), patch_dim, 128, double=True).squeeze().unsqueeze(1)
else:
if CONFIG['model_arch'] == 'opt_flow':
y_hat = optflow_predict(X=x[:, -2:].unsqueeze(2),
flow_model=CONFIG['flow_model'],
future=CONFIG['n_steps_ahead'],
params=CONFIG['params']) # tvl1
else:
y_hat = CONFIG['model_arch'].forward(x.unsqueeze(4).cuda().float()).squeeze().detach().cpu().unsqueeze(2)
y_hat = y_hat[:, -2:]
y = y[:, -2:]
pred_Y = y_hat
gt_Y = y.detach().cpu().unsqueeze(2)
# CONVERT TO SIS
pred_SIS = irradiance_converter.convert_k_to_SSI(pred_Y, target_times).squeeze()
gt_SIS = irradiance_converter.return_sis(target_times)
# Performance
## Albedo-related
mae.append(torch.mean(abs(pred_Y - gt_Y)).item())
rmse.append(torch.sqrt(torch.mean(torch.pow(pred_Y - gt_Y, 2))).item())
## Irradiance-related
for batch in range(target_times.shape[1]):
mae_sis.append(np.nanmean(abs(pred_SIS[:, batch].numpy() - gt_SIS[batch].values)))
rmse_sis.append(np.sqrt(np.nanmean(np.power(pred_SIS[:, batch].numpy() - gt_SIS[batch].values, 2))))
# Save images
if CONFIG['save_images']:
create_video(pred_Y, gt_Y, i, CONFIG['model_name'])
# Remove Infs
mae = np.array(mae)
mae = mae[~np.isinf(mae)]
rmse = np.array(rmse)
rmse = rmse[~np.isinf(rmse)]
mae_sis = np.array(mae_sis)
mae_sis = mae_sis[~np.isinf(mae_sis)]
rmse_sis = np.array(rmse_sis)
rmse_sis = rmse_sis[~np.isinf(rmse_sis)]
return {'k_mae': np.nanmean(mae),
'k_rmse': np.nanmean(rmse),
'sis_mae': np.nanmean(mae_sis),
'sis_rmse': np.nanmean(rmse_sis)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
default='convlstm',
type=str,
help='Which model to use for inference')
parser.add_argument('--in_channel',
default=1,
type=int,
help='Number of input channels')
parser.add_argument('--n_past_frames',
default=4,
type=int,
help='Number of past time steps')
parser.add_argument('--n_future_frames',
default=2,
type=int,
help='Number of future time steps to predict')
parser.add_argument('--batch_size',
default=8,
type=int,
help='Batch size to use')
parser.add_argument('--data_path',
default='./data/',
type=str,
help='Relative path to data folder')
parser.add_argument('--cal_filename',
default='CAL_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument('--sis_filename',
default='SIS_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument('--sis_clearsky_filename',
default='irradiance_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument("--save_images",
default=True,
type=str2bool,
help="Save predictions as png in result folder")
parser.add_argument("--interpolate_patch_borders",
default=True,
type=str2bool,
help="Save borders when using patch-based ConvLSTM")
parser.add_argument('-epochs', default=500, type=int, help='sum of epochs')
args = parser.parse_args()
# Save args into json file for logging purposes
with open('./config.json', 'wt') as f:
json.dump(vars(args), f, indent=4)
# Parse args into config
CONFIG = config_parser(args)
print(CONFIG)
test_set = CONFIG['dataset']
# instantiate data loader
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=args.batch_size, # needs to be one for external dataloader to work
num_workers=0,
shuffle=False,
pin_memory=False
)
# instantiate irradiance converter
irradiance_converter = IrradianceConverter(args.data_path,
sis_name=args.sis_filename + '.nc',
sis_clearsky_name=args.sis_clearsky_filename + '.nc',
resolution='high_res')
# Run evaluation and print results
results = run_evaluation(test_loader, irradiance_converter, CONFIG)
print(results)
| """
Script for running inference of IrradianceNet model
"""
# Author: <NAME> <<EMAIL>>
import torch
import numpy as np
import pandas as pd
import json
import argparse
from src.models.optical_flow_functions import optflow_predict
from src.config.config import config_parser
from src.config.str2bool import str2bool
from src.visualization.create_video import create_video
from src.data.IrradianceConverter import IrradianceConverter
from src.data.utils.helper_functions import convert_to_full_res, interpolate_borders
def run_evaluation(data_loader, irradiance_converter, CONFIG):
"""
Run evaluation of IrradianceNet model given arguments from command line
"""
# Load pretrained model
if CONFIG['pretrained_path'] is not None:
if 'ckpt' in CONFIG['pretrained_path']:
model_weights = torch.load(CONFIG['pretrained_path'])['state_dict']
else:
model_weights = torch.load(CONFIG['pretrained_path'])
model_weights = {k.replace('model.', ''): v for k, v in model_weights.items()}
CONFIG['model_arch'].load_state_dict(state_dict=model_weights)
# Instantiate lists for containing performance results
mae = []
rmse = []
mae_sis = []
rmse_sis = []
total_batches = len(data_loader)
with torch.no_grad():
for i, batch in enumerate(data_loader):
print('\nProcessing batch {} out of {}'.format(i, total_batches))
x, y, times = batch
x = x.squeeze(2)
y = y.squeeze()
ts = times.numpy().squeeze()
ts = pd.DataFrame(([pd.to_datetime(ts[x]).values for x in range(ts.shape[0])]))
target_times = ts.iloc[:, - CONFIG['n_steps_ahead']:]
# we only predict two steps ahead
full_output_img = torch.zeros_like(x[:, -2:, :, 0])
full_pred_img = torch.zeros_like(x[:, -2:, :, 0])
target_times = target_times.iloc[:, -2:]
if CONFIG['patch_based']:
img_size = (x.shape[2] * x.shape[4]) // 4
patch_dim = img_size // 128
for patch in range(x.shape[2]):
x_patch = x[:, :, patch]
x_patch = x_patch.permute(0, 1, 3, 4, 2)
y_hat = CONFIG['model_arch'].forward(x_patch.cuda()).squeeze()
y_hat = y_hat[:, -2:]
y = y[:, -2:]
full_pred_img[:, :, patch] = y_hat
full_output_img[:, :, patch] = y[:, :, patch]
pred_Y = convert_to_full_res(full_pred_img, img_size, patch_dim, y.shape)
gt_Y = convert_to_full_res(full_output_img, img_size, patch_dim, y.shape)
if CONFIG['interpolate_borders']:
for b in range(pred_Y.shape[0]):
pred_Y[b] = interpolate_borders(pred_Y[b].squeeze(), patch_dim, 128, double=True).squeeze().unsqueeze(1)
else:
if CONFIG['model_arch'] == 'opt_flow':
y_hat = optflow_predict(X=x[:, -2:].unsqueeze(2),
flow_model=CONFIG['flow_model'],
future=CONFIG['n_steps_ahead'],
params=CONFIG['params']) # tvl1
else:
y_hat = CONFIG['model_arch'].forward(x.unsqueeze(4).cuda().float()).squeeze().detach().cpu().unsqueeze(2)
y_hat = y_hat[:, -2:]
y = y[:, -2:]
pred_Y = y_hat
gt_Y = y.detach().cpu().unsqueeze(2)
# CONVERT TO SIS
pred_SIS = irradiance_converter.convert_k_to_SSI(pred_Y, target_times).squeeze()
gt_SIS = irradiance_converter.return_sis(target_times)
# Performance
## Albedo-related
mae.append(torch.mean(abs(pred_Y - gt_Y)).item())
rmse.append(torch.sqrt(torch.mean(torch.pow(pred_Y - gt_Y, 2))).item())
## Irradiance-related
for batch in range(target_times.shape[1]):
mae_sis.append(np.nanmean(abs(pred_SIS[:, batch].numpy() - gt_SIS[batch].values)))
rmse_sis.append(np.sqrt(np.nanmean(np.power(pred_SIS[:, batch].numpy() - gt_SIS[batch].values, 2))))
# Save images
if CONFIG['save_images']:
create_video(pred_Y, gt_Y, i, CONFIG['model_name'])
# Remove Infs
mae = np.array(mae)
mae = mae[~np.isinf(mae)]
rmse = np.array(rmse)
rmse = rmse[~np.isinf(rmse)]
mae_sis = np.array(mae_sis)
mae_sis = mae_sis[~np.isinf(mae_sis)]
rmse_sis = np.array(rmse_sis)
rmse_sis = rmse_sis[~np.isinf(rmse_sis)]
return {'k_mae': np.nanmean(mae),
'k_rmse': np.nanmean(rmse),
'sis_mae': np.nanmean(mae_sis),
'sis_rmse': np.nanmean(rmse_sis)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
default='convlstm',
type=str,
help='Which model to use for inference')
parser.add_argument('--in_channel',
default=1,
type=int,
help='Number of input channels')
parser.add_argument('--n_past_frames',
default=4,
type=int,
help='Number of past time steps')
parser.add_argument('--n_future_frames',
default=2,
type=int,
help='Number of future time steps to predict')
parser.add_argument('--batch_size',
default=8,
type=int,
help='Batch size to use')
parser.add_argument('--data_path',
default='./data/',
type=str,
help='Relative path to data folder')
parser.add_argument('--cal_filename',
default='CAL_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument('--sis_filename',
default='SIS_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument('--sis_clearsky_filename',
default='irradiance_2016_05',
type=str,
help='Effective Cloud Albedo filename')
parser.add_argument("--save_images",
default=True,
type=str2bool,
help="Save predictions as png in result folder")
parser.add_argument("--interpolate_patch_borders",
default=True,
type=str2bool,
help="Save borders when using patch-based ConvLSTM")
parser.add_argument('-epochs', default=500, type=int, help='sum of epochs')
args = parser.parse_args()
# Save args into json file for logging purposes
with open('./config.json', 'wt') as f:
json.dump(vars(args), f, indent=4)
# Parse args into config
CONFIG = config_parser(args)
print(CONFIG)
test_set = CONFIG['dataset']
# instantiate data loader
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=args.batch_size, # needs to be one for external dataloader to work
num_workers=0,
shuffle=False,
pin_memory=False
)
# instantiate irradiance converter
irradiance_converter = IrradianceConverter(args.data_path,
sis_name=args.sis_filename + '.nc',
sis_clearsky_name=args.sis_clearsky_filename + '.nc',
resolution='high_res')
# Run evaluation and print results
results = run_evaluation(test_loader, irradiance_converter, CONFIG)
print(results)
| en | 0.731934 | Script for running inference of IrradianceNet model # Author: <NAME> <<EMAIL>> Run evaluation of IrradianceNet model given arguments from command line # Load pretrained model # Instantiate lists for containing performance results # we only predict two steps ahead # tvl1 # CONVERT TO SIS # Performance ## Albedo-related ## Irradiance-related # Save images # Remove Infs # Save args into json file for logging purposes # Parse args into config # instantiate data loader # needs to be one for external dataloader to work # instantiate irradiance converter # Run evaluation and print results | 2.143926 | 2 |
meiduo_mall/apps/meiduo_admin/views/permission.py | Vent-Any/meiduo_mall_cangku | 0 | 6620481 | <reponame>Vent-Any/meiduo_mall_cangku<filename>meiduo_mall/apps/meiduo_admin/views/permission.py
from rest_framework.viewsets import ModelViewSet
from django.contrib.auth.models import Permission
from apps.meiduo_admin.utils import PageNumber
from apps.meiduo_admin.serializer.permission import PermissionModelSerializer
class PermissionModelView(ModelViewSet):
queryset = Permission.objects.all()
serializer_class = PermissionModelSerializer
pagination_class = PageNumber
#########################权限的内容类型#######################
from django.contrib.auth.models import ContentType
from rest_framework.generics import ListAPIView
from apps.meiduo_admin.serializer.permission import ContentTypeModelSerializer
class ContenTypeListAPIView(ListAPIView):
queryset = ContentType.objects.all()
serializer_class = ContentTypeModelSerializer
| from rest_framework.viewsets import ModelViewSet
from django.contrib.auth.models import Permission
from apps.meiduo_admin.utils import PageNumber
from apps.meiduo_admin.serializer.permission import PermissionModelSerializer
class PermissionModelView(ModelViewSet):
queryset = Permission.objects.all()
serializer_class = PermissionModelSerializer
pagination_class = PageNumber
#########################权限的内容类型#######################
from django.contrib.auth.models import ContentType
from rest_framework.generics import ListAPIView
from apps.meiduo_admin.serializer.permission import ContentTypeModelSerializer
class ContenTypeListAPIView(ListAPIView):
queryset = ContentType.objects.all()
serializer_class = ContentTypeModelSerializer | de | 0.797513 | #########################权限的内容类型####################### | 1.91414 | 2 |
niftivis/__main__.py | jstutters/niftivis | 0 | 6620482 | from niftivis.niftivis import make_thumbnails
if __name__ == "__main__":
make_thumbnails()
| from niftivis.niftivis import make_thumbnails
if __name__ == "__main__":
make_thumbnails()
| none | 1 | 1.116753 | 1 | |
logtf_analyser_cli/commands.py | cob16/logtf_analyzer | 0 | 6620483 | import logging
import begin
from clint.textui import prompt, colored, progress, puts, indent
from peewee import SqliteDatabase
from logtf_analyser.chatbuilder import ChatBuilder
from logtf_analyser.log_search import LogSearch
from logtf_analyser.model import db, Chat, bulk_add_chat, Log
from logtf_analyser.rest_actions import search_logs, get_log
URL_FILENAME = 'url'
AUTHTOKEN_FILENAME = 'token'
MAX_LIMIT = 10000
@begin.subcommand
@begin.convert(limit=int, userid=int)
def download(userid: 'Steam User Id64' = None,
limit: 'Number or logs to get' = 5,
offset: 'Offset search results' = None,
ignore_console: 'ignore chat made by the console' = False):
"""
Get chat logs of the user
"""
if limit > MAX_LIMIT:
logging.critical(colored.red("Limit is set over MAX_LIMIT of {}".format(MAX_LIMIT), bold=True))
exit(2)
logging.info("Querying for latest {} logs from logs.tf...".format(limit))
if userid:
logging.info("- for user {} from logs.tf...".format(userid))
if ignore_console:
logging.info("- will not save console messages in chat".format(userid))
with db.atomic():
with db.savepoint() as save:
logs = search_logs(player=userid, offset=offset, limit=limit)
logging.info("Got {} results".format(len(logs)))
logs = LogSearch().db_load(logs)
logging.info("{} existing logs and {} new logs".format(len(logs.existing_logs), len(logs.newLogs)))
if logs.newLogs:
if download_prompt(len(logs.newLogs)):
download_chat_logs(logs.newLogs, ignore_console)
logging.info(colored.green("Successfully downloaded all logs!"))
else:
save.rollback()
def download_prompt(num_new_logs: int):
prompt_options = [
{'selector': 'y', 'prompt': 'Yes, to download all new logs', 'return': True},
{'selector': 'n', 'prompt': 'No, and exit program', 'return': False}
]
prompt_msg = "Download {} logs?".format(num_new_logs)
return prompt.options(colored.magenta(prompt_msg, bold=True), prompt_options)
def download_chat_logs(logs, ignore_console):
for log in progress.bar(logs):
logging.debug(colored.yellow("Downloading chat for {}".format(log.log_id)))
result = get_log(log.log_id)
assert result
chat_messages = ChatBuilder(log.log_id, result, ignore_console=ignore_console).build()
bulk_add_chat(chat_messages)
logging.debug(colored.green("Saved {} to DB".format(len(chat_messages))))
@begin.subcommand
def info():
"""
Get database statistics
"""
counts = {
'Logs': Log.select().count(),
'Users': Chat.select(Chat.steam_id).distinct().count(),
'Chats': Chat.select().count(),
}
puts(colored.blue('Current DB contains:'))
for name, value in counts.items():
with indent(6, quote=colored.blue(name)):
puts(str(value))
@begin.subcommand
def prune():
deleted_rows = _prune_query().execute()
logging.info(colored.red("Deleted {} logs".format(deleted_rows)))
def _prune_query():
query = Log.delete().where(
Log.log_id.not_in(
Chat.select(Chat.log)
)
)
return query
@begin.subcommand
@begin.convert(steam_id=int, search_str=str, count_only=bool)
def chat(steam_id=None, search_str=None, count_only: "get only count of results" = False):
query = Chat.select(Log.log_id, Log.date, Log.title, Chat.msg, Chat.username).join(Log)
if steam_id:
query = query.where(Chat.steam_id == steam_id)
if search_str:
query = query.where(Chat.msg.contains(search_str))
if count_only:
puts(colored.blue(str(query.count())))
else:
chat = query.order_by(Chat.log, Chat.order)
name_length = len(max(chat, key=lambda key: len(key.username)).username) + 1
log_id = 0
for index, c in enumerate(chat):
if log_id != c.log:
log_id = c.log
puts(colored.yellow("Log {} {}:".format(c.log_id, c.log.date)))
with indent(3):
with indent(name_length, quote=colored.blue(str(c.username))):
puts(c.msg)
@begin.start(auto_convert=True, short_args=True)
@begin.logging
def logtf_analyser(*subcommands, dbname: 'Name of sqlite db' = 'chat.db'):
"""
Downloads tf2 chat from logs.tf into a db and provides search.
Use [subcommand] -h to get information of a command
"""
db.initialize(SqliteDatabase(dbname))
db.connect()
db.create_tables([Chat, Log], safe=True)
| import logging
import begin
from clint.textui import prompt, colored, progress, puts, indent
from peewee import SqliteDatabase
from logtf_analyser.chatbuilder import ChatBuilder
from logtf_analyser.log_search import LogSearch
from logtf_analyser.model import db, Chat, bulk_add_chat, Log
from logtf_analyser.rest_actions import search_logs, get_log
URL_FILENAME = 'url'
AUTHTOKEN_FILENAME = 'token'
MAX_LIMIT = 10000
@begin.subcommand
@begin.convert(limit=int, userid=int)
def download(userid: 'Steam User Id64' = None,
limit: 'Number or logs to get' = 5,
offset: 'Offset search results' = None,
ignore_console: 'ignore chat made by the console' = False):
"""
Get chat logs of the user
"""
if limit > MAX_LIMIT:
logging.critical(colored.red("Limit is set over MAX_LIMIT of {}".format(MAX_LIMIT), bold=True))
exit(2)
logging.info("Querying for latest {} logs from logs.tf...".format(limit))
if userid:
logging.info("- for user {} from logs.tf...".format(userid))
if ignore_console:
logging.info("- will not save console messages in chat".format(userid))
with db.atomic():
with db.savepoint() as save:
logs = search_logs(player=userid, offset=offset, limit=limit)
logging.info("Got {} results".format(len(logs)))
logs = LogSearch().db_load(logs)
logging.info("{} existing logs and {} new logs".format(len(logs.existing_logs), len(logs.newLogs)))
if logs.newLogs:
if download_prompt(len(logs.newLogs)):
download_chat_logs(logs.newLogs, ignore_console)
logging.info(colored.green("Successfully downloaded all logs!"))
else:
save.rollback()
def download_prompt(num_new_logs: int):
prompt_options = [
{'selector': 'y', 'prompt': 'Yes, to download all new logs', 'return': True},
{'selector': 'n', 'prompt': 'No, and exit program', 'return': False}
]
prompt_msg = "Download {} logs?".format(num_new_logs)
return prompt.options(colored.magenta(prompt_msg, bold=True), prompt_options)
def download_chat_logs(logs, ignore_console):
for log in progress.bar(logs):
logging.debug(colored.yellow("Downloading chat for {}".format(log.log_id)))
result = get_log(log.log_id)
assert result
chat_messages = ChatBuilder(log.log_id, result, ignore_console=ignore_console).build()
bulk_add_chat(chat_messages)
logging.debug(colored.green("Saved {} to DB".format(len(chat_messages))))
@begin.subcommand
def info():
"""
Get database statistics
"""
counts = {
'Logs': Log.select().count(),
'Users': Chat.select(Chat.steam_id).distinct().count(),
'Chats': Chat.select().count(),
}
puts(colored.blue('Current DB contains:'))
for name, value in counts.items():
with indent(6, quote=colored.blue(name)):
puts(str(value))
@begin.subcommand
def prune():
deleted_rows = _prune_query().execute()
logging.info(colored.red("Deleted {} logs".format(deleted_rows)))
def _prune_query():
query = Log.delete().where(
Log.log_id.not_in(
Chat.select(Chat.log)
)
)
return query
@begin.subcommand
@begin.convert(steam_id=int, search_str=str, count_only=bool)
def chat(steam_id=None, search_str=None, count_only: "get only count of results" = False):
query = Chat.select(Log.log_id, Log.date, Log.title, Chat.msg, Chat.username).join(Log)
if steam_id:
query = query.where(Chat.steam_id == steam_id)
if search_str:
query = query.where(Chat.msg.contains(search_str))
if count_only:
puts(colored.blue(str(query.count())))
else:
chat = query.order_by(Chat.log, Chat.order)
name_length = len(max(chat, key=lambda key: len(key.username)).username) + 1
log_id = 0
for index, c in enumerate(chat):
if log_id != c.log:
log_id = c.log
puts(colored.yellow("Log {} {}:".format(c.log_id, c.log.date)))
with indent(3):
with indent(name_length, quote=colored.blue(str(c.username))):
puts(c.msg)
@begin.start(auto_convert=True, short_args=True)
@begin.logging
def logtf_analyser(*subcommands, dbname: 'Name of sqlite db' = 'chat.db'):
"""
Downloads tf2 chat from logs.tf into a db and provides search.
Use [subcommand] -h to get information of a command
"""
db.initialize(SqliteDatabase(dbname))
db.connect()
db.create_tables([Chat, Log], safe=True)
| en | 0.862543 | Get chat logs of the user Get database statistics Downloads tf2 chat from logs.tf into a db and provides search. Use [subcommand] -h to get information of a command | 2.269011 | 2 |
src/utoolbox/feature/__init__.py | liuyenting/utoolbox-legacy | 2 | 6620484 | <filename>src/utoolbox/feature/__init__.py
from .dft_register import * | <filename>src/utoolbox/feature/__init__.py
from .dft_register import * | none | 1 | 1.131154 | 1 | |
setup.py | xethorn/schema | 3 | 6620485 | from setuptools import find_packages
from setuptools import setup
setup(
name='Sukimu',
version='2.0.5',
url='https://github.com/xethorn/sukimu',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Standardized way to perform CRUD operations with Field validation'),
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: Alpha',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],)
| from setuptools import find_packages
from setuptools import setup
setup(
name='Sukimu',
version='2.0.5',
url='https://github.com/xethorn/sukimu',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Standardized way to perform CRUD operations with Field validation'),
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: Alpha',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],)
| none | 1 | 1.221209 | 1 | |
my_tt/UserApp/migrations/0004_auto_20201011_1529.py | tanproject/tantan | 0 | 6620486 | # Generated by Django 2.2.16 on 2020-10-11 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserApp', '0003_auto_20200926_1904'),
]
operations = [
migrations.AddField(
model_name='user',
name='vip_end',
field=models.DateTimeField(default='3000-01-01', verbose_name='会员到期时间'),
),
migrations.AddField(
model_name='user',
name='vip_id',
field=models.IntegerField(default=1, verbose_name='vip的id'),
),
]
| # Generated by Django 2.2.16 on 2020-10-11 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserApp', '0003_auto_20200926_1904'),
]
operations = [
migrations.AddField(
model_name='user',
name='vip_end',
field=models.DateTimeField(default='3000-01-01', verbose_name='会员到期时间'),
),
migrations.AddField(
model_name='user',
name='vip_id',
field=models.IntegerField(default=1, verbose_name='vip的id'),
),
]
| en | 0.754463 | # Generated by Django 2.2.16 on 2020-10-11 15:29 | 1.573455 | 2 |
model_monkey/tests/test_endpoint_tests.py | mbernico/model_monkey | 0 | 6620487 | <filename>model_monkey/tests/test_endpoint_tests.py
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import pytest
from model_monkey.endpoint_tests import ExpectedValueTest, TestFactory, ExpectedStatusTest
@pytest.fixture
def set_proxy():
os.environ['NO_PROXY'] = 'localhost' # only needed when running against testing_api.py
def test_good_ExpectedValueTest(set_proxy):
evt = ExpectedValueTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5, "b": 5},
predict_label="answer", expected_output=10)
test_result = evt.run_test()
assert test_result['success'] is True
def test_bad_ExpectedValueTest(set_proxy):
evt = ExpectedValueTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5, "b": 5},
predict_label="answer", expected_output=11)
test_result = evt.run_test()
assert test_result['success'] is False
def test_factory_success():
evt = TestFactory.create("ExpectedValueTest", url="localhost/blah/predict/", headers=None, inputs=[1, 2, 3],
predict_label='predict', expected_output=6)
assert isinstance(evt, ExpectedValueTest)
def test_factory_failure():
with pytest.raises(AssertionError) as context:
TestFactory.create("BorkBorkTest")
assert "BorkBorkTest" in str(context.value)
def test_good_ExpectedStatusTest(set_proxy):
evt = ExpectedStatusTest(url="http://localhost:5000/v666/predict/", headers=None, inputs={"a": 5, "b": 5},
expected_http_status=200)
test_result = evt.run_test()
assert test_result['success'] is True
def test_bad_ExpectedStatusTest(set_proxy):
evt = ExpectedStatusTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5},
expected_http_status=200)
test_result = evt.run_test()
assert test_result['success'] is False | <filename>model_monkey/tests/test_endpoint_tests.py
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import pytest
from model_monkey.endpoint_tests import ExpectedValueTest, TestFactory, ExpectedStatusTest
@pytest.fixture
def set_proxy():
os.environ['NO_PROXY'] = 'localhost' # only needed when running against testing_api.py
def test_good_ExpectedValueTest(set_proxy):
evt = ExpectedValueTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5, "b": 5},
predict_label="answer", expected_output=10)
test_result = evt.run_test()
assert test_result['success'] is True
def test_bad_ExpectedValueTest(set_proxy):
evt = ExpectedValueTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5, "b": 5},
predict_label="answer", expected_output=11)
test_result = evt.run_test()
assert test_result['success'] is False
def test_factory_success():
evt = TestFactory.create("ExpectedValueTest", url="localhost/blah/predict/", headers=None, inputs=[1, 2, 3],
predict_label='predict', expected_output=6)
assert isinstance(evt, ExpectedValueTest)
def test_factory_failure():
with pytest.raises(AssertionError) as context:
TestFactory.create("BorkBorkTest")
assert "BorkBorkTest" in str(context.value)
def test_good_ExpectedStatusTest(set_proxy):
evt = ExpectedStatusTest(url="http://localhost:5000/v666/predict/", headers=None, inputs={"a": 5, "b": 5},
expected_http_status=200)
test_result = evt.run_test()
assert test_result['success'] is True
def test_bad_ExpectedStatusTest(set_proxy):
evt = ExpectedStatusTest(url="http://localhost:5000/v666/predict/",headers=None, inputs={"a": 5},
expected_http_status=200)
test_result = evt.run_test()
assert test_result['success'] is False | en | 0.898399 | # only needed when running against testing_api.py | 2.269376 | 2 |
gen_vermin_rules.py | gousaiyang/python-change-parser | 3 | 6620488 | <gh_stars>1-10
import ast
import collections
import re
def read_text_file(filename, encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as file:
return file.read()
def parse_version_requirement(text, expect_major):
version = text.split('.')
if not all(re.fullmatch(r'[0-9]+', x) for x in version):
raise ValueError(f'invalid version {text!r}')
if len(version) not in (2, 3):
raise ValueError(f'invalid version {text!r}, should be major.minor or major.minor.micro')
major = int(version[0])
minor = int(version[1])
micro = int(version[2]) if len(version) > 2 else None
if major not in (1, 2, 3):
raise ValueError(f'invalid major version {major}')
if major == 2 and (minor > 7 or minor < 0):
raise ValueError(f'minor version {minor} out of range for major version 2')
if micro is not None and (micro >= 20 or micro < 0):
raise ValueError(f'micro version {micro} out of range')
if major == 1:
major = 2
minor = 0
if major != expect_major:
raise ValueError(f'major version {major} does not match expected major version {expect_major}')
return (major, minor)
def is_removed_in_py3(name):
name_parts = name.split('.')
return any(name_parts[:len(x)] == x for x in removed_in_py3_list)
def validate_identifier_name(name):
parts = name.split('.')
if not all(re.fullmatch('[_A-Za-z][_0-9A-Za-z]*', x) for x in parts):
raise ValueError('invalid identifier name')
py3_rules = read_text_file('py3_rules.txt').rstrip('\n').split('\n\n')[1:]
py2_rules = read_text_file('py2_rules.txt').rstrip('\n').split('\n\n')[1:]
removed_in_py3_list = [x.split('.') for x in read_text_file('removed_in_py3.txt').splitlines() if not x.startswith('#')]
modules_rules = collections.defaultdict(lambda: [None, None])
classes_rules = collections.defaultdict(lambda: [None, None])
exceptions_rules = collections.defaultdict(lambda: [None, None])
functions_rules = collections.defaultdict(lambda: [None, None])
variables_and_constants_rules = collections.defaultdict(lambda: [None, None])
decorators_rules = collections.defaultdict(lambda: [None, None])
kwargs_rules = collections.defaultdict(lambda: [None, None])
for ruleset, major_version in ((py2_rules, 2), (py3_rules, 3)):
for part in ruleset:
rules = part.split('\n')
rule_type = rules[0]
if not rule_type.endswith(':'):
raise ValueError('rule type line should end with ":"')
rule_type = rule_type[:-1]
if rule_type == 'misc': # skip misc part
continue
elif rule_type == 'module':
target = 'modules_rules'
elif rule_type in ('data', 'attribute'):
target = 'variables_and_constants_rules'
elif rule_type == 'class':
target = 'classes_rules'
elif rule_type == 'exception':
target = 'exceptions_rules'
elif rule_type in ('function', 'method'):
target = 'functions_rules'
elif rule_type == 'decorator':
target = 'decorators_rules'
elif rule_type == 'argument':
target = 'kwargs_rules'
else:
raise ValueError(f'unknown rule type {rule_type!r}')
rules = rules[1:]
for rule in rules:
rule_version, rule_content = rule.split(' ', 1)
rule_version = parse_version_requirement(rule_version, major_version)
if target == 'kwargs_rules':
func, kwargs = ast.literal_eval(rule_content)
validate_identifier_name(func)
for kwarg in kwargs:
validate_identifier_name(kwarg)
kwargs_rules[(func, kwarg)][major_version - 2] = rule_version
else:
validate_identifier_name(rule_content)
globals()[target][rule_content][major_version - 2] = rule_version
modules_rules = sorted(modules_rules.items())
classes_rules = sorted(classes_rules.items())
exceptions_rules = sorted(exceptions_rules.items())
functions_rules = sorted(functions_rules.items())
variables_and_constants_rules = sorted(variables_and_constants_rules.items())
decorators_rules = sorted(decorators_rules.items())
kwargs_rules = sorted(kwargs_rules.items())
with open('vermin_rules_generated.py', 'w', encoding='utf-8') as rulefile:
for rule_type in ('modules_rules', 'classes_rules', 'exceptions_rules', 'functions_rules', 'variables_and_constants_rules', 'decorators_rules'):
rulefile.write(f'{rule_type} = {{\n')
for name, versions in globals()[rule_type]:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name):
versions[1] = (3, 0)
rulefile.write(f' "{name}": {tuple(versions)!r},\n')
rulefile.write('}\n\n')
rulefile.write('kwargs_rules = {\n')
for name, versions in kwargs_rules:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name[0]):
versions[1] = (3, 0)
rulefile.write(f' ("{name[0]}", "{name[1]}"): {tuple(versions)!r},\n')
rulefile.write('}\n')
| import ast
import collections
import re
def read_text_file(filename, encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as file:
return file.read()
def parse_version_requirement(text, expect_major):
version = text.split('.')
if not all(re.fullmatch(r'[0-9]+', x) for x in version):
raise ValueError(f'invalid version {text!r}')
if len(version) not in (2, 3):
raise ValueError(f'invalid version {text!r}, should be major.minor or major.minor.micro')
major = int(version[0])
minor = int(version[1])
micro = int(version[2]) if len(version) > 2 else None
if major not in (1, 2, 3):
raise ValueError(f'invalid major version {major}')
if major == 2 and (minor > 7 or minor < 0):
raise ValueError(f'minor version {minor} out of range for major version 2')
if micro is not None and (micro >= 20 or micro < 0):
raise ValueError(f'micro version {micro} out of range')
if major == 1:
major = 2
minor = 0
if major != expect_major:
raise ValueError(f'major version {major} does not match expected major version {expect_major}')
return (major, minor)
def is_removed_in_py3(name):
name_parts = name.split('.')
return any(name_parts[:len(x)] == x for x in removed_in_py3_list)
def validate_identifier_name(name):
parts = name.split('.')
if not all(re.fullmatch('[_A-Za-z][_0-9A-Za-z]*', x) for x in parts):
raise ValueError('invalid identifier name')
py3_rules = read_text_file('py3_rules.txt').rstrip('\n').split('\n\n')[1:]
py2_rules = read_text_file('py2_rules.txt').rstrip('\n').split('\n\n')[1:]
removed_in_py3_list = [x.split('.') for x in read_text_file('removed_in_py3.txt').splitlines() if not x.startswith('#')]
modules_rules = collections.defaultdict(lambda: [None, None])
classes_rules = collections.defaultdict(lambda: [None, None])
exceptions_rules = collections.defaultdict(lambda: [None, None])
functions_rules = collections.defaultdict(lambda: [None, None])
variables_and_constants_rules = collections.defaultdict(lambda: [None, None])
decorators_rules = collections.defaultdict(lambda: [None, None])
kwargs_rules = collections.defaultdict(lambda: [None, None])
for ruleset, major_version in ((py2_rules, 2), (py3_rules, 3)):
for part in ruleset:
rules = part.split('\n')
rule_type = rules[0]
if not rule_type.endswith(':'):
raise ValueError('rule type line should end with ":"')
rule_type = rule_type[:-1]
if rule_type == 'misc': # skip misc part
continue
elif rule_type == 'module':
target = 'modules_rules'
elif rule_type in ('data', 'attribute'):
target = 'variables_and_constants_rules'
elif rule_type == 'class':
target = 'classes_rules'
elif rule_type == 'exception':
target = 'exceptions_rules'
elif rule_type in ('function', 'method'):
target = 'functions_rules'
elif rule_type == 'decorator':
target = 'decorators_rules'
elif rule_type == 'argument':
target = 'kwargs_rules'
else:
raise ValueError(f'unknown rule type {rule_type!r}')
rules = rules[1:]
for rule in rules:
rule_version, rule_content = rule.split(' ', 1)
rule_version = parse_version_requirement(rule_version, major_version)
if target == 'kwargs_rules':
func, kwargs = ast.literal_eval(rule_content)
validate_identifier_name(func)
for kwarg in kwargs:
validate_identifier_name(kwarg)
kwargs_rules[(func, kwarg)][major_version - 2] = rule_version
else:
validate_identifier_name(rule_content)
globals()[target][rule_content][major_version - 2] = rule_version
modules_rules = sorted(modules_rules.items())
classes_rules = sorted(classes_rules.items())
exceptions_rules = sorted(exceptions_rules.items())
functions_rules = sorted(functions_rules.items())
variables_and_constants_rules = sorted(variables_and_constants_rules.items())
decorators_rules = sorted(decorators_rules.items())
kwargs_rules = sorted(kwargs_rules.items())
with open('vermin_rules_generated.py', 'w', encoding='utf-8') as rulefile:
for rule_type in ('modules_rules', 'classes_rules', 'exceptions_rules', 'functions_rules', 'variables_and_constants_rules', 'decorators_rules'):
rulefile.write(f'{rule_type} = {{\n')
for name, versions in globals()[rule_type]:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name):
versions[1] = (3, 0)
rulefile.write(f' "{name}": {tuple(versions)!r},\n')
rulefile.write('}\n\n')
rulefile.write('kwargs_rules = {\n')
for name, versions in kwargs_rules:
if not any(versions):
raise ValueError('invalid versions tuple')
if versions[1] is None and not is_removed_in_py3(name[0]):
versions[1] = (3, 0)
rulefile.write(f' ("{name[0]}", "{name[1]}"): {tuple(versions)!r},\n')
rulefile.write('}\n') | en | 0.769359 | # skip misc part | 3.077299 | 3 |
src/urls.py | dkrukouski/django-rest-start-template | 2 | 6620489 | """Project URL Configuration
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
api_v1_urlpatterns: list = [
# API documentation
path('schema/', SpectacularAPIView.as_view(), name='schema'),
path(
'docs/',
SpectacularSwaggerView.as_view(url_name='api_v1:schema'),
name='schema-swagger-ui',
)
]
urlpatterns = [
path('admin/', include(('fake_admin.urls', 'fake_admin'), namespace='fake_admin')),
path('api/v1/', include((api_v1_urlpatterns, 'api_v1'))),
path(f'{settings.DJANGO_ADMIN_URL}/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| """Project URL Configuration
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
api_v1_urlpatterns: list = [
# API documentation
path('schema/', SpectacularAPIView.as_view(), name='schema'),
path(
'docs/',
SpectacularSwaggerView.as_view(url_name='api_v1:schema'),
name='schema-swagger-ui',
)
]
urlpatterns = [
path('admin/', include(('fake_admin.urls', 'fake_admin'), namespace='fake_admin')),
path('api/v1/', include((api_v1_urlpatterns, 'api_v1'))),
path(f'{settings.DJANGO_ADMIN_URL}/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| en | 0.129329 | Project URL Configuration # API documentation | 2.017224 | 2 |
model.py | pradyunkumar/Letoplay | 1 | 6620490 | <filename>model.py
import os
import sys
from tqdm import tqdm
import pandas as pd
import numpy as np
from random import choice, choices
import process_fp
import gensim as gn
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
def load_data(buckets, k):
X = []
y = []
for bucket in buckets:
for i in range(k):
seq = choices(buckets[bucket], k=200)
seq = sorted(seq)
X.append(seq)
y.append(bucket)
return X, y
def create_w2v(vocabulary):
w2v = gn.models.Word2Vec(min_count=1, size=25, window=2)
w2v.build_vocab(vocabulary, progress_per=10000)
w2v.train(vocabulary, total_examples=w2v.corpus_count, epochs=30)
return w2v
def create_Sentences(buckets):
sentences = []
for bucket in buckets:
sentences.append(buckets[bucket])
return sentences
def preprocess(X, y, sentences):
w2v = create_w2v(sentences)
for x in X:
for i in range(len(x)):
fp = x[i]
x[i] = w2v.wv[fp]
le = LabelEncoder()
le.fit_transform(y)
X = np.array(X)
nsamples, nx, ny = X.shape
X = X.reshape((nsamples, nx*ny))
return X, y, le
def clean_dir(direc):
for f in os.listdir(direc):
os.remove(os.path.join(direc, f))
def model(X, y):
svc = SVC(kernel='sigmoid')
svc.fit(X, y)
return svc
def process_train(train_direc, data_direc):
process_fp.preprocess(train_direc, data_direc)
allfps, buckets = process_fp.process(train_direc, data_direc)
X, y = load_data(buckets, 100)
sentences = create_Sentences(buckets)
X, y, labels = preprocess(X, y, sentences)
clean_dir(data_direc)
return X, y, sentences
def process_test(test_direc, data_direc, train_sentences, samples):
process_fp.preprocess(test_direc, data_direc)
alltestsfps, test_buckets = process_fp.process(test_direc, data_direc)
X_test, y_test = load_data(test_buckets, samples)
test_sentences = create_Sentences(test_buckets)
X_test, y_test, labels = preprocess(X_test, y_test, (train_sentences + test_sentences))
clean_dir(data_direc)
return X_test, y_test
def detect_errors(svc, X_test, y_test):
y_pred = svc.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print('Accuracy Score: ', accuracy_score(y_test, y_pred))
X_train, y_train, train_buckets = process_train('train/', 'data/')
X_test, y_test = process_test('test/', 'data/', train_buckets, 30)
svc = model(X_train, y_train)
detect_errors(svc, X_test, y_test) | <filename>model.py
import os
import sys
from tqdm import tqdm
import pandas as pd
import numpy as np
from random import choice, choices
import process_fp
import gensim as gn
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
def load_data(buckets, k):
X = []
y = []
for bucket in buckets:
for i in range(k):
seq = choices(buckets[bucket], k=200)
seq = sorted(seq)
X.append(seq)
y.append(bucket)
return X, y
def create_w2v(vocabulary):
w2v = gn.models.Word2Vec(min_count=1, size=25, window=2)
w2v.build_vocab(vocabulary, progress_per=10000)
w2v.train(vocabulary, total_examples=w2v.corpus_count, epochs=30)
return w2v
def create_Sentences(buckets):
sentences = []
for bucket in buckets:
sentences.append(buckets[bucket])
return sentences
def preprocess(X, y, sentences):
w2v = create_w2v(sentences)
for x in X:
for i in range(len(x)):
fp = x[i]
x[i] = w2v.wv[fp]
le = LabelEncoder()
le.fit_transform(y)
X = np.array(X)
nsamples, nx, ny = X.shape
X = X.reshape((nsamples, nx*ny))
return X, y, le
def clean_dir(direc):
for f in os.listdir(direc):
os.remove(os.path.join(direc, f))
def model(X, y):
svc = SVC(kernel='sigmoid')
svc.fit(X, y)
return svc
def process_train(train_direc, data_direc):
process_fp.preprocess(train_direc, data_direc)
allfps, buckets = process_fp.process(train_direc, data_direc)
X, y = load_data(buckets, 100)
sentences = create_Sentences(buckets)
X, y, labels = preprocess(X, y, sentences)
clean_dir(data_direc)
return X, y, sentences
def process_test(test_direc, data_direc, train_sentences, samples):
process_fp.preprocess(test_direc, data_direc)
alltestsfps, test_buckets = process_fp.process(test_direc, data_direc)
X_test, y_test = load_data(test_buckets, samples)
test_sentences = create_Sentences(test_buckets)
X_test, y_test, labels = preprocess(X_test, y_test, (train_sentences + test_sentences))
clean_dir(data_direc)
return X_test, y_test
def detect_errors(svc, X_test, y_test):
y_pred = svc.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print('Accuracy Score: ', accuracy_score(y_test, y_pred))
X_train, y_train, train_buckets = process_train('train/', 'data/')
X_test, y_test = process_test('test/', 'data/', train_buckets, 30)
svc = model(X_train, y_train)
detect_errors(svc, X_test, y_test) | none | 1 | 2.532028 | 3 | |
scripts/commit-msg.py | garrisonblair/soen390-wikipedia | 0 | 6620491 | import re
import sys
LINK_REGEX = "#[0-9]+"
MERGE_REGEX = "Merge"
def validate_commit_message():
""" Make sure commit message has is linked to an issue. """
try:
script, commit_msg = sys.argv
link_match = re.search(LINK_REGEX, commit_msg)
merge_match = re.search(MERGE_REGEX, commit_msg, re.IGNORECASE)
if link_match is not None or merge_match is not None:
return 0
except Exception as e:
print("ERROR")
print(e)
print("Commit message does not link to an issue and isn't a merge commit")
return 1
if __name__ == '__main__':
sys.exit(validate_commit_message())
| import re
import sys
LINK_REGEX = "#[0-9]+"
MERGE_REGEX = "Merge"
def validate_commit_message():
""" Make sure commit message has is linked to an issue. """
try:
script, commit_msg = sys.argv
link_match = re.search(LINK_REGEX, commit_msg)
merge_match = re.search(MERGE_REGEX, commit_msg, re.IGNORECASE)
if link_match is not None or merge_match is not None:
return 0
except Exception as e:
print("ERROR")
print(e)
print("Commit message does not link to an issue and isn't a merge commit")
return 1
if __name__ == '__main__':
sys.exit(validate_commit_message())
| en | 0.98319 | Make sure commit message has is linked to an issue. | 2.969152 | 3 |
lib/gamepad_demo.py | ifurusato/ros | 9 | 6620492 | <filename>lib/gamepad_demo.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-08-05
# modified: 2020-10-18
#
# This is a test class that interprets the signals arriving from the 8BitDo N30
# Pro Gamepad, a paired Bluetooth device. The result is passed on to a
# mocked MessageQueue, which passes a filtered subset of those messages on to a
# SimpleMotorController. The result is tele-robotics, i.e., a remote-controlled
# robot.
#
import sys, threading # TEMP
from colorama import init, Fore, Style
init()
from lib.config_loader import ConfigLoader
from lib.logger import Logger, Level
from lib.i2c_scanner import I2CScanner
from lib.queue import MessageQueue
from lib.message import Message
from lib.message_bus import MessageBus
from lib.message_factory import MessageFactory
from lib.gamepad import Gamepad
from lib.gamepad_ctrl import GamepadController
from lib.clock import Clock
from lib.lux import Lux
#from lib.message import Message
#from lib.compass import Compass
#rom lib.blob import BlobSensor
#from lib.video import Video
#from lib.matrix import Matrix
#from lib.indicator import Indicator
from lib.ifs import IntegratedFrontSensor
from lib.battery import BatteryCheck
from lib.motors import Motors
from lib.pid_motor_ctrl import PIDMotorController
# ..............................................................................
class GamepadDemo():
def __init__(self, level):
super().__init__()
_loader = ConfigLoader(Level.INFO)
filename = 'config.yaml'
_config = _loader.configure(filename)
self._log = Logger("gamepad-demo", level)
self._log.heading('gamepad-demo','Configuring Gamepad...',None)
self._config = _config['ros'].get('gamepad_demo')
self._enable_ifs = self._config.get('enable_ifs')
self._enable_compass = self._config.get('enable_compass')
self._enable_indicator = self._config.get('enable_indicator')
self._message_factory = MessageFactory(level)
self._motors = Motors(_config, None, Level.INFO)
# self._motor_controller = SimpleMotorController(self._motors, Level.INFO)
self._pid_motor_ctrl = PIDMotorController(_config, self._motors, Level.INFO)
# i2c scanner, let's us know if certain devices are available
_i2c_scanner = I2CScanner(Level.WARN)
_addresses = _i2c_scanner.get_int_addresses()
ltr559_available = ( 0x23 in _addresses )
'''
Availability of displays:
The 5x5 RGB Matrix is at 0x74 for port, 0x77 for starboard.
The 11x7 LED matrix is at 0x75 for starboard, 0x77 for port. The latter
conflicts with the RGB LED matrix, so both cannot be used simultaneously.
We check for either the 0x74 address to see if RGB Matrix displays are
used, OR for 0x75 to assume a pair of 11x7 Matrix displays are being used.
'''
# rgbmatrix5x5_stbd_available = ( 0x74 in _addresses ) # not used yet
# matrix11x7_stbd_available = ( 0x75 in _addresses ) # used as camera lighting
matrix11x7_stbd_available = False
# self._blob = BlobSensor(_config, self._motors, Level.INFO)
self._blob = None
self._lux = Lux(Level.INFO) if ltr559_available else None
self._video = None
# self._video = Video(_config, self._lux, matrix11x7_stbd_available, Level.INFO)
self._message_bus = MessageBus(Level.INFO)
# in this application the gamepad controller is the message queue
# self._queue = MessageQueue(self._message_factory, Level.INFO)
self._clock = Clock(_config, self._message_bus, self._message_factory, Level.INFO)
# attempt to find the gamepad
self._gamepad = Gamepad(_config, self._message_bus, self._message_factory, Level.INFO)
# if self._enable_indicator:
# self._indicator = Indicator(Level.INFO)
# if self._enable_compass:
# self._compass = Compass(_config, self._queue, self._indicator, Level.INFO)
# self._video.set_compass(self._compass)
_enable_battery_check = False
if _enable_battery_check:
self._log.info('starting battery check thread...')
self._battery_check = BatteryCheck(_config, self._queue, self._message_factory, Level.INFO)
else:
self._battery_check = None
if self._enable_ifs:
self._log.info('integrated front sensor enabled.')
self._ifs = IntegratedFrontSensor(_config, self._clock, self._message_bus, self._message_factory, Level.INFO)
# add indicator as message consumer
if self._enable_indicator:
self._queue.add_consumer(self._indicator)
else:
self._ifs = None
self._log.info('integrated front sensor disabled.')
# self._ctrl = GamepadController(_config, self._queue, self._pid_motor_ctrl, self._ifs, self._video, self._blob, matrix11x7_stbd_available, Level.INFO, self._close_demo_callback)
self._ctrl = GamepadController(_config, self._message_bus, self._pid_motor_ctrl, self._ifs, self._video, self._blob, matrix11x7_stbd_available, Level.INFO, self._close_demo_callback)
self._message_bus.add_handler(Message, self._ctrl.handle_message)
self._enabled = False
self._log.info('connecting gamepad...')
self._gamepad.connect()
self._log.info('ready.')
# ..........................................................................
def get_motors(self):
return self._motors
# ..........................................................................
@property
def enabled(self):
return self._enabled
# ..........................................................................
def enable(self):
if self._enabled:
self._log.warning('already enabled.')
return
self._log.info('enabling...')
self._gamepad.enable()
self._clock.enable()
# if self._enable_compass:
# self._compass.enable()
if self._battery_check:
self._battery_check.enable()
if self._enable_ifs:
self._ifs.enable()
self._ctrl.enable()
self._enabled = True
self._log.info('enabled.')
# ..........................................................................
def get_thread_position(self, thread):
frame = sys._current_frames().get(thread.ident, None)
if frame:
return frame.f_code.co_filename, frame.f_code.co_name, frame.f_code.co_firstlineno
# ..........................................................................
def disable(self):
if not self._enabled:
self._log.warning('already disabled.')
return
self._log.info('disabling...')
self._enabled = False
self._clock.disable()
if self._battery_check:
self._battery_check.disable()
# if self._enable_compass:
# self._compass.disable()
if self._enable_ifs:
self._ifs.disable()
self._pid_motor_ctrl.disable()
self._gamepad.disable()
_show_thread_info = False
if _show_thread_info:
for thread in threading.enumerate():
self._log.info(Fore.GREEN + 'thread "{}" is alive? {}; is daemon? {}\t😡'.format(thread.name, thread.is_alive(), thread.isDaemon()))
if thread is not None:
_position = self.get_thread_position(thread)
if _position:
self._log.info(Fore.GREEN + ' thread "{}" filename: {}; co_name: {}; first_lineno: {}'.format(thread.name, _position[0], _position[1], _position[2]))
else:
self._log.info(Fore.GREEN + ' thread "{}" position null.'.format(thread.name))
else:
self._log.info(Fore.GREEN + ' null thread.')
self._log.info('disabled.')
# ..........................................................................
def _close_demo_callback(self):
self._log.info(Fore.MAGENTA + 'close demo callback...')
# self._queue.disable()
self.disable()
self.close()
# ..........................................................................
def close(self):
if self._enabled:
self.disable()
self._log.info('closing...')
if self._enable_ifs:
self._ifs.close()
self._pid_motor_ctrl.close()
self._gamepad.close()
self._log.info('closed.')
# EOF
| <filename>lib/gamepad_demo.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-08-05
# modified: 2020-10-18
#
# This is a test class that interprets the signals arriving from the 8BitDo N30
# Pro Gamepad, a paired Bluetooth device. The result is passed on to a
# mocked MessageQueue, which passes a filtered subset of those messages on to a
# SimpleMotorController. The result is tele-robotics, i.e., a remote-controlled
# robot.
#
import sys, threading # TEMP
from colorama import init, Fore, Style
init()
from lib.config_loader import ConfigLoader
from lib.logger import Logger, Level
from lib.i2c_scanner import I2CScanner
from lib.queue import MessageQueue
from lib.message import Message
from lib.message_bus import MessageBus
from lib.message_factory import MessageFactory
from lib.gamepad import Gamepad
from lib.gamepad_ctrl import GamepadController
from lib.clock import Clock
from lib.lux import Lux
#from lib.message import Message
#from lib.compass import Compass
#rom lib.blob import BlobSensor
#from lib.video import Video
#from lib.matrix import Matrix
#from lib.indicator import Indicator
from lib.ifs import IntegratedFrontSensor
from lib.battery import BatteryCheck
from lib.motors import Motors
from lib.pid_motor_ctrl import PIDMotorController
# ..............................................................................
class GamepadDemo():
def __init__(self, level):
super().__init__()
_loader = ConfigLoader(Level.INFO)
filename = 'config.yaml'
_config = _loader.configure(filename)
self._log = Logger("gamepad-demo", level)
self._log.heading('gamepad-demo','Configuring Gamepad...',None)
self._config = _config['ros'].get('gamepad_demo')
self._enable_ifs = self._config.get('enable_ifs')
self._enable_compass = self._config.get('enable_compass')
self._enable_indicator = self._config.get('enable_indicator')
self._message_factory = MessageFactory(level)
self._motors = Motors(_config, None, Level.INFO)
# self._motor_controller = SimpleMotorController(self._motors, Level.INFO)
self._pid_motor_ctrl = PIDMotorController(_config, self._motors, Level.INFO)
# i2c scanner, let's us know if certain devices are available
_i2c_scanner = I2CScanner(Level.WARN)
_addresses = _i2c_scanner.get_int_addresses()
ltr559_available = ( 0x23 in _addresses )
'''
Availability of displays:
The 5x5 RGB Matrix is at 0x74 for port, 0x77 for starboard.
The 11x7 LED matrix is at 0x75 for starboard, 0x77 for port. The latter
conflicts with the RGB LED matrix, so both cannot be used simultaneously.
We check for either the 0x74 address to see if RGB Matrix displays are
used, OR for 0x75 to assume a pair of 11x7 Matrix displays are being used.
'''
# rgbmatrix5x5_stbd_available = ( 0x74 in _addresses ) # not used yet
# matrix11x7_stbd_available = ( 0x75 in _addresses ) # used as camera lighting
matrix11x7_stbd_available = False
# self._blob = BlobSensor(_config, self._motors, Level.INFO)
self._blob = None
self._lux = Lux(Level.INFO) if ltr559_available else None
self._video = None
# self._video = Video(_config, self._lux, matrix11x7_stbd_available, Level.INFO)
self._message_bus = MessageBus(Level.INFO)
# in this application the gamepad controller is the message queue
# self._queue = MessageQueue(self._message_factory, Level.INFO)
self._clock = Clock(_config, self._message_bus, self._message_factory, Level.INFO)
# attempt to find the gamepad
self._gamepad = Gamepad(_config, self._message_bus, self._message_factory, Level.INFO)
# if self._enable_indicator:
# self._indicator = Indicator(Level.INFO)
# if self._enable_compass:
# self._compass = Compass(_config, self._queue, self._indicator, Level.INFO)
# self._video.set_compass(self._compass)
_enable_battery_check = False
if _enable_battery_check:
self._log.info('starting battery check thread...')
self._battery_check = BatteryCheck(_config, self._queue, self._message_factory, Level.INFO)
else:
self._battery_check = None
if self._enable_ifs:
self._log.info('integrated front sensor enabled.')
self._ifs = IntegratedFrontSensor(_config, self._clock, self._message_bus, self._message_factory, Level.INFO)
# add indicator as message consumer
if self._enable_indicator:
self._queue.add_consumer(self._indicator)
else:
self._ifs = None
self._log.info('integrated front sensor disabled.')
# self._ctrl = GamepadController(_config, self._queue, self._pid_motor_ctrl, self._ifs, self._video, self._blob, matrix11x7_stbd_available, Level.INFO, self._close_demo_callback)
self._ctrl = GamepadController(_config, self._message_bus, self._pid_motor_ctrl, self._ifs, self._video, self._blob, matrix11x7_stbd_available, Level.INFO, self._close_demo_callback)
self._message_bus.add_handler(Message, self._ctrl.handle_message)
self._enabled = False
self._log.info('connecting gamepad...')
self._gamepad.connect()
self._log.info('ready.')
# ..........................................................................
def get_motors(self):
return self._motors
# ..........................................................................
@property
def enabled(self):
return self._enabled
# ..........................................................................
def enable(self):
if self._enabled:
self._log.warning('already enabled.')
return
self._log.info('enabling...')
self._gamepad.enable()
self._clock.enable()
# if self._enable_compass:
# self._compass.enable()
if self._battery_check:
self._battery_check.enable()
if self._enable_ifs:
self._ifs.enable()
self._ctrl.enable()
self._enabled = True
self._log.info('enabled.')
# ..........................................................................
def get_thread_position(self, thread):
frame = sys._current_frames().get(thread.ident, None)
if frame:
return frame.f_code.co_filename, frame.f_code.co_name, frame.f_code.co_firstlineno
# ..........................................................................
def disable(self):
if not self._enabled:
self._log.warning('already disabled.')
return
self._log.info('disabling...')
self._enabled = False
self._clock.disable()
if self._battery_check:
self._battery_check.disable()
# if self._enable_compass:
# self._compass.disable()
if self._enable_ifs:
self._ifs.disable()
self._pid_motor_ctrl.disable()
self._gamepad.disable()
_show_thread_info = False
if _show_thread_info:
for thread in threading.enumerate():
self._log.info(Fore.GREEN + 'thread "{}" is alive? {}; is daemon? {}\t😡'.format(thread.name, thread.is_alive(), thread.isDaemon()))
if thread is not None:
_position = self.get_thread_position(thread)
if _position:
self._log.info(Fore.GREEN + ' thread "{}" filename: {}; co_name: {}; first_lineno: {}'.format(thread.name, _position[0], _position[1], _position[2]))
else:
self._log.info(Fore.GREEN + ' thread "{}" position null.'.format(thread.name))
else:
self._log.info(Fore.GREEN + ' null thread.')
self._log.info('disabled.')
# ..........................................................................
def _close_demo_callback(self):
self._log.info(Fore.MAGENTA + 'close demo callback...')
# self._queue.disable()
self.disable()
self.close()
# ..........................................................................
def close(self):
if self._enabled:
self.disable()
self._log.info('closing...')
if self._enable_ifs:
self._ifs.close()
self._pid_motor_ctrl.close()
self._gamepad.close()
self._log.info('closed.')
# EOF
| en | 0.568443 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by <NAME>. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: <NAME> # created: 2020-08-05 # modified: 2020-10-18 # # This is a test class that interprets the signals arriving from the 8BitDo N30 # Pro Gamepad, a paired Bluetooth device. The result is passed on to a # mocked MessageQueue, which passes a filtered subset of those messages on to a # SimpleMotorController. The result is tele-robotics, i.e., a remote-controlled # robot. # # TEMP #from lib.message import Message #from lib.compass import Compass #rom lib.blob import BlobSensor #from lib.video import Video #from lib.matrix import Matrix #from lib.indicator import Indicator # .............................................................................. # self._motor_controller = SimpleMotorController(self._motors, Level.INFO) # i2c scanner, let's us know if certain devices are available Availability of displays: The 5x5 RGB Matrix is at 0x74 for port, 0x77 for starboard. The 11x7 LED matrix is at 0x75 for starboard, 0x77 for port. The latter conflicts with the RGB LED matrix, so both cannot be used simultaneously. We check for either the 0x74 address to see if RGB Matrix displays are used, OR for 0x75 to assume a pair of 11x7 Matrix displays are being used. # rgbmatrix5x5_stbd_available = ( 0x74 in _addresses ) # not used yet # matrix11x7_stbd_available = ( 0x75 in _addresses ) # used as camera lighting # self._blob = BlobSensor(_config, self._motors, Level.INFO) # self._video = Video(_config, self._lux, matrix11x7_stbd_available, Level.INFO) # in this application the gamepad controller is the message queue # self._queue = MessageQueue(self._message_factory, Level.INFO) # attempt to find the gamepad # if self._enable_indicator: # self._indicator = Indicator(Level.INFO) # if self._enable_compass: # self._compass = Compass(_config, self._queue, self._indicator, Level.INFO) # self._video.set_compass(self._compass) # add indicator as message consumer # self._ctrl = GamepadController(_config, self._queue, self._pid_motor_ctrl, self._ifs, self._video, self._blob, matrix11x7_stbd_available, Level.INFO, self._close_demo_callback) # .......................................................................... # .......................................................................... # .......................................................................... # if self._enable_compass: # self._compass.enable() # .......................................................................... # .......................................................................... # if self._enable_compass: # self._compass.disable() # .......................................................................... # self._queue.disable() # .......................................................................... # EOF | 2.507951 | 3 |
src/service/google_storage.py | A-Ortiz-L/hyperspectral-imaging-cnn-final-degree-work | 0 | 6620493 | <reponame>A-Ortiz-L/hyperspectral-imaging-cnn-final-degree-work
from google.cloud import storage
from google.cloud.exceptions import NotFound
from logging import getLogger
log = getLogger(__name__)
class GoogleStorage:
def __init__(self):
self.client = storage.Client()
self.storage_list = {}
def get_bucket(self, bucket_name: str):
try:
bucket = self.client.get_bucket(bucket_name)
return bucket
except NotFound:
log.warning(f'Could not find bucket={bucket_name}')
return False
def download_blob(self, bucket_name: str, source_blob_name: str, destination_file_name: str) -> bool:
"""Downloads a blob from the bucket."""
bucket = self.get_bucket(bucket_name)
if not bucket or not bucket.get_blob(source_blob_name):
log.warning(f'Could not download blob={source_blob_name} on bucket={bucket_name}')
return False
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
log.info('Blob {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
return True
| from google.cloud import storage
from google.cloud.exceptions import NotFound
from logging import getLogger
log = getLogger(__name__)
class GoogleStorage:
def __init__(self):
self.client = storage.Client()
self.storage_list = {}
def get_bucket(self, bucket_name: str):
try:
bucket = self.client.get_bucket(bucket_name)
return bucket
except NotFound:
log.warning(f'Could not find bucket={bucket_name}')
return False
def download_blob(self, bucket_name: str, source_blob_name: str, destination_file_name: str) -> bool:
"""Downloads a blob from the bucket."""
bucket = self.get_bucket(bucket_name)
if not bucket or not bucket.get_blob(source_blob_name):
log.warning(f'Could not download blob={source_blob_name} on bucket={bucket_name}')
return False
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
log.info('Blob {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
return True | en | 0.847882 | Downloads a blob from the bucket. | 2.852951 | 3 |
src/chapter 2/exercise3.py | group8BSE1/BSE-2021 | 0 | 6620494 | <filename>src/chapter 2/exercise3.py
hours=int(input('Enter_hours: '))
print(hours)
rate=float(input('Enter_rate: '))
print(rate)
pay= hours * rate
print('PAY: ', pay) | <filename>src/chapter 2/exercise3.py
hours=int(input('Enter_hours: '))
print(hours)
rate=float(input('Enter_rate: '))
print(rate)
pay= hours * rate
print('PAY: ', pay) | none | 1 | 3.963598 | 4 | |
src/medicineinventory/apps.py | vandana0608/Pharmacy-Managament | 0 | 6620495 | from django.apps import AppConfig
class MedicineinventoryConfig(AppConfig):
name = 'medicineinventory'
| from django.apps import AppConfig
class MedicineinventoryConfig(AppConfig):
name = 'medicineinventory'
| none | 1 | 1.150379 | 1 | |
Tools/ColorTest.py | steveknipmeyer/ModelRelief | 0 | 6620496 | <gh_stars>0
import sys
from tools import Colors
def main():
"""
Main entry point.
"""
c = Colors()
c.print_ansi16_colors()
if __name__ == "__main__":
print (sys.version)
print (u"\u001b[31mHelloWorld")
main()
| import sys
from tools import Colors
def main():
"""
Main entry point.
"""
c = Colors()
c.print_ansi16_colors()
if __name__ == "__main__":
print (sys.version)
print (u"\u001b[31mHelloWorld")
main() | en | 0.864792 | Main entry point. | 2.154252 | 2 |
py_imgProcessing/bottle_server.py | JBollow/CoperniCloud | 1 | 6620497 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 22:03:38 2018
@author: timmimim
"""
from bottle import route, run, template, request, response
import time
import numpy as np
import image_operations as img_ops
import os
from subprocess import Popen
from subprocess import call
import gdal
import gdalconst
import osr
import json
import subprocess
from json import dumps
from PIL import Image
float32 = np.float32
geotiff = gdal.GetDriverByName('GTiff')
# helper functions to handle Band- and Image requests
# convert incoming band IDs to band file names
# docker
# localPath = ""
# Anna
# localPath = "F:/Dokumente/Uni/WS_2017/Geosoft2/Testdaten"
# Jan-Patrick
localPath = "C:"
optPath = localPath + "/opt/"
def getFileNamesPerBandID(imgName):
img = imgName
imgPath = optPath + "sentinel2/" + \
img + ".SAFE/GRANULE/"
dirName = os.listdir(imgPath)
print(dirName)
imgPath = imgPath + dirName[0] + "/IMG_DATA/"
filenameParts = imgName.split("_")
bandFileNames = [None]*13
# helper functions for building band file paths
def concatenateFileName_2A(bandID):
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_10m.jp2"):
return imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_20m.jp2"):
return imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_20m.jp2"
else:
return imgPath + "R60m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_60m.jp2"
def concatenateFileName_1C(bandID):
filenamePrefix = filenameParts[5] + "_" + filenameParts[2] + "_"
return imgPath + filenamePrefix + bandID + ".jp2"
if "L1C" in filenameParts[1]:
bandFileNames = [
concatenateFileName_1C("B01"),
concatenateFileName_1C("B02"),
concatenateFileName_1C("B03"),
concatenateFileName_1C("B04"),
concatenateFileName_1C("B05"),
concatenateFileName_1C("B06"),
concatenateFileName_1C("B07"),
concatenateFileName_1C("B08"),
concatenateFileName_1C("B8A"),
concatenateFileName_1C("B09"),
concatenateFileName_1C("B10"),
concatenateFileName_1C("B11"),
concatenateFileName_1C("B12")
]
else:
bandFileNames = [
concatenateFileName_2A("B01"),
concatenateFileName_2A("B02"),
concatenateFileName_2A("B03"),
concatenateFileName_2A("B04"),
concatenateFileName_2A("B05"),
concatenateFileName_2A("B06"),
concatenateFileName_2A("B07"),
concatenateFileName_2A("B08"),
concatenateFileName_2A("B8A"),
concatenateFileName_2A("B09"),
concatenateFileName_2A("B10"),
concatenateFileName_2A("B11"),
concatenateFileName_2A("B12")
]
return bandFileNames
def makeColorInterpretationSettingNumeric(instructions):
instructionSet = instructions
if instructionSet['color'] == "grayscale":
instructionSet['color'] = 1
if instructionSet['color'] == "palette":
instructionSet['color'] = 2
if instructionSet['color'] == "red":
instructionSet['color'] = 3
if instructionSet['color'] == "green":
instructionSet['color'] = 4
if instructionSet['color'] == "blue":
instructionSet['color'] = 5
if instructionSet['color'] == "alpha":
instructionSet['color'] = 6
return instructionSet
#######################
###### Server #######
#######################
@route('/')
def meta(x='NA'):
return '<b>This is a simple python server, set up using the Bottle framework.</b>'
@route('/create_new_image')
def create_new_image():
req = request.json
imageName = req['image']
tilePath = optPath + "userrequest/" + \
req['image'] + ".SAFE/" + req['id']
os.makedirs(tilePath)
tmpPath = optPath + "tmp/copernicloud/userrequest/" + \
req['id'] + "/"
os.makedirs(tmpPath)
tmpFile = tmpPath + req['id'] + ".tif"
# get full file paths for each band of the requested image
bandFileNames = getFileNamesPerBandID(imageName)
# read one band to get metadata, i.e. GeoTransform and Projection
# incidentally, blue color band is always supplied in 10m resolution
metaBand = gdal.Open(bandFileNames[1])
#newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0)
newImageObject = geotiff.Create(
tmpFile,
metaBand.RasterXSize, metaBand.RasterYSize,
len(req['operations']),
gdal.GDT_UInt16)
newImageObject.SetGeoTransform(metaBand.GetGeoTransform())
newImageObject.SetProjection(metaBand.GetProjection())
bandBuildInstructions = [None]*len(req['operations'])
for i, instructionSet in enumerate(req['operations'], start=0):
bandBuildInstructions[i] = makeColorInterpretationSettingNumeric(
instructionSet)
summaryArray = []
for index, instructionSet in enumerate(bandBuildInstructions, start=1):
newBand = img_ops.edit_band(instructionSet, bandFileNames)
# rescale image to 10m resolution Raster Size, so images match in display
img = Image.fromarray(newBand)
img = img.resize((metaBand.RasterXSize, metaBand.RasterXSize))
newBand = np.array(img)
# summaryStatistics[str(index)] = img_ops.getSummaryStatistics(newBand)
summaryArray.append(img_ops.getSummaryStatistics(newBand))
newImageObject.GetRasterBand(index).WriteArray(newBand)
newImageObject.GetRasterBand(
index).SetRasterColorInterpretation(instructionSet['color'])
summaryStatistics = {"band": summaryArray}
newImageObject = None
cmdString = "--profile=mercator -z 3-13 --processes=8 \"" + tmpFile + "\" \"" + tilePath + "\""
subprocess.call(["powershell.exe", "gdal2tiles_multi.py", cmdString])
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(summaryStatistics)
@route('/arithmetic_band_combination')
def arithmetic_band_combination():
req = request.json
# get full file paths for each band of the requested image
bands = getFileNamesPerBandID(req['image'])
equation = req['operations']
mask = req['mask']
newBand = img_ops.arithmeticCombination(bands, equation, mask)
tilePath = optPath + "userrequest/" + \
req['image'] + ".SAFE/" + req['id'] + "/"
os.makedirs(tilePath)
tmpPath = optPath + "tmp/copernicloud/userrequest/" + \
req['id'] + "/"
os.makedirs(tmpPath)
tmpFile = tmpPath + req['id'] + ".tif"
# read one band to get metadata, i.e. GeoTransform and Projection
metaBand = gdal.Open(bands[1])
# newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0)
newImageObject = geotiff.Create(
tmpFile,
metaBand.RasterXSize, metaBand.RasterYSize,
1,
gdal.GDT_UInt16)
newImageObject.SetGeoTransform(metaBand.GetGeoTransform())
newImageObject.SetProjection(metaBand.GetProjection())
summaryArray = []
summaryArray.append(img_ops.getSummaryStatistics(newBand))
print("summaryArray")
print(summaryArray)
summaryStatistics = {"band": summaryArray}
newImageObject.GetRasterBand(1).WriteArray(newBand)
newImageObject.GetRasterBand(1).SetRasterColorInterpretation(1)
newImageObject = None
cmdString = "--profile=mercator -z 3-13 --processes=8 \"" + tmpFile + "\" \"" + tilePath + "\""
subprocess.call(["powershell.exe", "gdal2tiles_multi.py", cmdString])
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(summaryStatistics)
# =============================================================================
#
# @route('/mask_pixels')
# def mask_pixels():
# return "TODO"
#
# =============================================================================
@route('/get_point_info')
def get_point_info():
req = request.json
lat, lng = req['lat'], req['lng']
band = req['band']
imgName = req['image'] # may differ later
imgPath = optPath + "sentinel2/" + \
imgName + ".SAFE/GRANULE/"
dirName = os.listdir(imgPath)
imgPath = imgPath + dirName[0] + "/IMG_DATA/"
filenameParts = imgName.split("_")
if "L1C" in filenameParts[1]:
band = imgPath + filenameParts[5] + "_" + \
filenameParts[2] + "_" + band + ".jp2"
else:
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_10m.jp2"):
band = imgPath + "R10m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_20m.jp2"):
band = imgPath + "R20m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_20m.jp2"
else:
band = imgPath + "R60m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_60m.jp2"
pointInfo = img_ops.getPointInfo(band, lat, lng)
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps({"pointInfo": pointInfo})
@route('/get_summary_statistics')
def get_summary_statistics():
req = request.json
band = req["band"]
imgName = req['image'] # may differ later
imgPath = optPath + "sentinel2/" + \
imgName + ".SAFE/GRANULE/"
imgPath = imgPath + [x[0] for x in os.walk(imgPath)] + "/IMG_DATA/"
filenameParts = imgName.split("_")
if "L1C" in filenameParts[1]:
band = imgPath + filenameParts[5] + "_" + \
filenameParts[2] + "_" + band + ".jp2"
else:
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_10m.jp2"):
band = imgPath + "R10m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_20m.jp2"):
band = imgPath + "R20m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_20m.jp2"
else:
band = imgPath + "R60m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_60m.jp2"
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(img_ops.getSummaryStatistics(band))
run(host='localhost', port=8088)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 22:03:38 2018
@author: timmimim
"""
from bottle import route, run, template, request, response
import time
import numpy as np
import image_operations as img_ops
import os
from subprocess import Popen
from subprocess import call
import gdal
import gdalconst
import osr
import json
import subprocess
from json import dumps
from PIL import Image
float32 = np.float32
geotiff = gdal.GetDriverByName('GTiff')
# helper functions to handle Band- and Image requests
# convert incoming band IDs to band file names
# docker
# localPath = ""
# Anna
# localPath = "F:/Dokumente/Uni/WS_2017/Geosoft2/Testdaten"
# Jan-Patrick
localPath = "C:"
optPath = localPath + "/opt/"
def getFileNamesPerBandID(imgName):
img = imgName
imgPath = optPath + "sentinel2/" + \
img + ".SAFE/GRANULE/"
dirName = os.listdir(imgPath)
print(dirName)
imgPath = imgPath + dirName[0] + "/IMG_DATA/"
filenameParts = imgName.split("_")
bandFileNames = [None]*13
# helper functions for building band file paths
def concatenateFileName_2A(bandID):
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_10m.jp2"):
return imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_20m.jp2"):
return imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_20m.jp2"
else:
return imgPath + "R60m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + bandID + "_60m.jp2"
def concatenateFileName_1C(bandID):
filenamePrefix = filenameParts[5] + "_" + filenameParts[2] + "_"
return imgPath + filenamePrefix + bandID + ".jp2"
if "L1C" in filenameParts[1]:
bandFileNames = [
concatenateFileName_1C("B01"),
concatenateFileName_1C("B02"),
concatenateFileName_1C("B03"),
concatenateFileName_1C("B04"),
concatenateFileName_1C("B05"),
concatenateFileName_1C("B06"),
concatenateFileName_1C("B07"),
concatenateFileName_1C("B08"),
concatenateFileName_1C("B8A"),
concatenateFileName_1C("B09"),
concatenateFileName_1C("B10"),
concatenateFileName_1C("B11"),
concatenateFileName_1C("B12")
]
else:
bandFileNames = [
concatenateFileName_2A("B01"),
concatenateFileName_2A("B02"),
concatenateFileName_2A("B03"),
concatenateFileName_2A("B04"),
concatenateFileName_2A("B05"),
concatenateFileName_2A("B06"),
concatenateFileName_2A("B07"),
concatenateFileName_2A("B08"),
concatenateFileName_2A("B8A"),
concatenateFileName_2A("B09"),
concatenateFileName_2A("B10"),
concatenateFileName_2A("B11"),
concatenateFileName_2A("B12")
]
return bandFileNames
def makeColorInterpretationSettingNumeric(instructions):
instructionSet = instructions
if instructionSet['color'] == "grayscale":
instructionSet['color'] = 1
if instructionSet['color'] == "palette":
instructionSet['color'] = 2
if instructionSet['color'] == "red":
instructionSet['color'] = 3
if instructionSet['color'] == "green":
instructionSet['color'] = 4
if instructionSet['color'] == "blue":
instructionSet['color'] = 5
if instructionSet['color'] == "alpha":
instructionSet['color'] = 6
return instructionSet
#######################
###### Server #######
#######################
@route('/')
def meta(x='NA'):
return '<b>This is a simple python server, set up using the Bottle framework.</b>'
@route('/create_new_image')
def create_new_image():
req = request.json
imageName = req['image']
tilePath = optPath + "userrequest/" + \
req['image'] + ".SAFE/" + req['id']
os.makedirs(tilePath)
tmpPath = optPath + "tmp/copernicloud/userrequest/" + \
req['id'] + "/"
os.makedirs(tmpPath)
tmpFile = tmpPath + req['id'] + ".tif"
# get full file paths for each band of the requested image
bandFileNames = getFileNamesPerBandID(imageName)
# read one band to get metadata, i.e. GeoTransform and Projection
# incidentally, blue color band is always supplied in 10m resolution
metaBand = gdal.Open(bandFileNames[1])
#newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0)
newImageObject = geotiff.Create(
tmpFile,
metaBand.RasterXSize, metaBand.RasterYSize,
len(req['operations']),
gdal.GDT_UInt16)
newImageObject.SetGeoTransform(metaBand.GetGeoTransform())
newImageObject.SetProjection(metaBand.GetProjection())
bandBuildInstructions = [None]*len(req['operations'])
for i, instructionSet in enumerate(req['operations'], start=0):
bandBuildInstructions[i] = makeColorInterpretationSettingNumeric(
instructionSet)
summaryArray = []
for index, instructionSet in enumerate(bandBuildInstructions, start=1):
newBand = img_ops.edit_band(instructionSet, bandFileNames)
# rescale image to 10m resolution Raster Size, so images match in display
img = Image.fromarray(newBand)
img = img.resize((metaBand.RasterXSize, metaBand.RasterXSize))
newBand = np.array(img)
# summaryStatistics[str(index)] = img_ops.getSummaryStatistics(newBand)
summaryArray.append(img_ops.getSummaryStatistics(newBand))
newImageObject.GetRasterBand(index).WriteArray(newBand)
newImageObject.GetRasterBand(
index).SetRasterColorInterpretation(instructionSet['color'])
summaryStatistics = {"band": summaryArray}
newImageObject = None
cmdString = "--profile=mercator -z 3-13 --processes=8 \"" + tmpFile + "\" \"" + tilePath + "\""
subprocess.call(["powershell.exe", "gdal2tiles_multi.py", cmdString])
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(summaryStatistics)
@route('/arithmetic_band_combination')
def arithmetic_band_combination():
req = request.json
# get full file paths for each band of the requested image
bands = getFileNamesPerBandID(req['image'])
equation = req['operations']
mask = req['mask']
newBand = img_ops.arithmeticCombination(bands, equation, mask)
tilePath = optPath + "userrequest/" + \
req['image'] + ".SAFE/" + req['id'] + "/"
os.makedirs(tilePath)
tmpPath = optPath + "tmp/copernicloud/userrequest/" + \
req['id'] + "/"
os.makedirs(tmpPath)
tmpFile = tmpPath + req['id'] + ".tif"
# read one band to get metadata, i.e. GeoTransform and Projection
metaBand = gdal.Open(bands[1])
# newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0)
newImageObject = geotiff.Create(
tmpFile,
metaBand.RasterXSize, metaBand.RasterYSize,
1,
gdal.GDT_UInt16)
newImageObject.SetGeoTransform(metaBand.GetGeoTransform())
newImageObject.SetProjection(metaBand.GetProjection())
summaryArray = []
summaryArray.append(img_ops.getSummaryStatistics(newBand))
print("summaryArray")
print(summaryArray)
summaryStatistics = {"band": summaryArray}
newImageObject.GetRasterBand(1).WriteArray(newBand)
newImageObject.GetRasterBand(1).SetRasterColorInterpretation(1)
newImageObject = None
cmdString = "--profile=mercator -z 3-13 --processes=8 \"" + tmpFile + "\" \"" + tilePath + "\""
subprocess.call(["powershell.exe", "gdal2tiles_multi.py", cmdString])
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(summaryStatistics)
# =============================================================================
#
# @route('/mask_pixels')
# def mask_pixels():
# return "TODO"
#
# =============================================================================
@route('/get_point_info')
def get_point_info():
req = request.json
lat, lng = req['lat'], req['lng']
band = req['band']
imgName = req['image'] # may differ later
imgPath = optPath + "sentinel2/" + \
imgName + ".SAFE/GRANULE/"
dirName = os.listdir(imgPath)
imgPath = imgPath + dirName[0] + "/IMG_DATA/"
filenameParts = imgName.split("_")
if "L1C" in filenameParts[1]:
band = imgPath + filenameParts[5] + "_" + \
filenameParts[2] + "_" + band + ".jp2"
else:
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_10m.jp2"):
band = imgPath + "R10m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_20m.jp2"):
band = imgPath + "R20m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_20m.jp2"
else:
band = imgPath + "R60m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_60m.jp2"
pointInfo = img_ops.getPointInfo(band, lat, lng)
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps({"pointInfo": pointInfo})
@route('/get_summary_statistics')
def get_summary_statistics():
req = request.json
band = req["band"]
imgName = req['image'] # may differ later
imgPath = optPath + "sentinel2/" + \
imgName + ".SAFE/GRANULE/"
imgPath = imgPath + [x[0] for x in os.walk(imgPath)] + "/IMG_DATA/"
filenameParts = imgName.split("_")
if "L1C" in filenameParts[1]:
band = imgPath + filenameParts[5] + "_" + \
filenameParts[2] + "_" + band + ".jp2"
else:
if os.path.is_file(imgPath + "R10m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_10m.jp2"):
band = imgPath + "R10m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_10m.jp2"
elif os.path.is_file(imgPath + "R20m/L2A_" + filenameParts[5] + "_" + filenameParts[2] + "_" + band + "_20m.jp2"):
band = imgPath + "R20m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_20m.jp2"
else:
band = imgPath + "R60m/L2A_" + \
filenameParts[5] + "_" + filenameParts[2] + \
"_" + band + "_60m.jp2"
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(img_ops.getSummaryStatistics(band))
run(host='localhost', port=8088)
| en | 0.651154 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Jan 16 22:03:38 2018 @author: timmimim # helper functions to handle Band- and Image requests # convert incoming band IDs to band file names # docker # localPath = "" # Anna # localPath = "F:/Dokumente/Uni/WS_2017/Geosoft2/Testdaten" # Jan-Patrick # helper functions for building band file paths ####################### ###### Server ####### ####################### # get full file paths for each band of the requested image # read one band to get metadata, i.e. GeoTransform and Projection # incidentally, blue color band is always supplied in 10m resolution #newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0) # rescale image to 10m resolution Raster Size, so images match in display # summaryStatistics[str(index)] = img_ops.getSummaryStatistics(newBand) # get full file paths for each band of the requested image # read one band to get metadata, i.e. GeoTransform and Projection # newImageObject = geotiff.CreateCopy(tmpFile, metaBand, 0) # ============================================================================= # # @route('/mask_pixels') # def mask_pixels(): # return "TODO" # # ============================================================================= # may differ later # may differ later | 2.202647 | 2 |
models/graph_unet.py | microsoft/DualOctreeGNN | 2 | 6620498 | # --------------------------------------------------------
# Dual Octree Graph Networks
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
import torch.nn
from . import dual_octree
from . import graph_ounet
class GraphUNet(graph_ounet.GraphOUNet):
def _setup_channels_and_resblks(self):
# self.resblk_num = [3] * 7 + [1] + [1] * 9
self.resblk_num = [2] * 16
self.channels = [4, 512, 512, 256, 128, 64, 32, 32, 32]
def recons_decoder(self, convs, doctree_out):
logits = dict()
reg_voxs = dict()
deconvs = dict()
deconvs[self.full_depth] = convs[self.full_depth]
for i, d in enumerate(range(self.full_depth, self.depth_out+1)):
if d > self.full_depth:
nnum = doctree_out.nnum[d-1]
leaf_mask = doctree_out.node_child(d-1) < 0
deconvd = self.upsample[i-1](deconvs[d-1], leaf_mask, nnum)
deconvd = deconvd + convs[d] # skip connections
edge_idx = doctree_out.graph[d]['edge_idx']
edge_type = doctree_out.graph[d]['edge_dir']
node_type = doctree_out.graph[d]['node_type']
deconvs[d] = self.decoder[i-1](deconvd, edge_idx, edge_type, node_type)
# predict the splitting label
logit = self.predict[i](deconvs[d])
nnum = doctree_out.nnum[d]
logits[d] = logit[-nnum:]
# predict the signal
reg_vox = self.regress[i](deconvs[d])
# TODO: improve it
# pad zeros to reg_vox to reuse the original code for ocnn
node_mask = doctree_out.graph[d]['node_mask']
shape = (node_mask.shape[0], reg_vox.shape[1])
reg_vox_pad = torch.zeros(shape, device=reg_vox.device)
reg_vox_pad[node_mask] = reg_vox
reg_voxs[d] = reg_vox_pad
return logits, reg_voxs
def forward(self, octree_in, octree_out=None, pos=None):
# octree_in and octree_out are the same for UNet
doctree_in = dual_octree.DualOctree(octree_in)
doctree_in.post_processing_for_docnn()
# run encoder and decoder
convs = self.octree_encoder(octree_in, doctree_in)
out = self.recons_decoder(convs, doctree_in)
output = {'reg_voxs': out[1], 'octree_out': octree_in}
# compute function value with mpu
if pos is not None:
output['mpus'] = self.neural_mpu(pos, out[1], octree_in)
# create the mpu wrapper
def _neural_mpu(pos):
pred = self.neural_mpu(pos, out[1], octree_in)
return pred[self.depth_out][0]
output['neural_mpu'] = _neural_mpu
return output
| # --------------------------------------------------------
# Dual Octree Graph Networks
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
import torch.nn
from . import dual_octree
from . import graph_ounet
class GraphUNet(graph_ounet.GraphOUNet):
def _setup_channels_and_resblks(self):
# self.resblk_num = [3] * 7 + [1] + [1] * 9
self.resblk_num = [2] * 16
self.channels = [4, 512, 512, 256, 128, 64, 32, 32, 32]
def recons_decoder(self, convs, doctree_out):
logits = dict()
reg_voxs = dict()
deconvs = dict()
deconvs[self.full_depth] = convs[self.full_depth]
for i, d in enumerate(range(self.full_depth, self.depth_out+1)):
if d > self.full_depth:
nnum = doctree_out.nnum[d-1]
leaf_mask = doctree_out.node_child(d-1) < 0
deconvd = self.upsample[i-1](deconvs[d-1], leaf_mask, nnum)
deconvd = deconvd + convs[d] # skip connections
edge_idx = doctree_out.graph[d]['edge_idx']
edge_type = doctree_out.graph[d]['edge_dir']
node_type = doctree_out.graph[d]['node_type']
deconvs[d] = self.decoder[i-1](deconvd, edge_idx, edge_type, node_type)
# predict the splitting label
logit = self.predict[i](deconvs[d])
nnum = doctree_out.nnum[d]
logits[d] = logit[-nnum:]
# predict the signal
reg_vox = self.regress[i](deconvs[d])
# TODO: improve it
# pad zeros to reg_vox to reuse the original code for ocnn
node_mask = doctree_out.graph[d]['node_mask']
shape = (node_mask.shape[0], reg_vox.shape[1])
reg_vox_pad = torch.zeros(shape, device=reg_vox.device)
reg_vox_pad[node_mask] = reg_vox
reg_voxs[d] = reg_vox_pad
return logits, reg_voxs
def forward(self, octree_in, octree_out=None, pos=None):
# octree_in and octree_out are the same for UNet
doctree_in = dual_octree.DualOctree(octree_in)
doctree_in.post_processing_for_docnn()
# run encoder and decoder
convs = self.octree_encoder(octree_in, doctree_in)
out = self.recons_decoder(convs, doctree_in)
output = {'reg_voxs': out[1], 'octree_out': octree_in}
# compute function value with mpu
if pos is not None:
output['mpus'] = self.neural_mpu(pos, out[1], octree_in)
# create the mpu wrapper
def _neural_mpu(pos):
pred = self.neural_mpu(pos, out[1], octree_in)
return pred[self.depth_out][0]
output['neural_mpu'] = _neural_mpu
return output
| en | 0.651332 | # -------------------------------------------------------- # Dual Octree Graph Networks # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> # -------------------------------------------------------- # self.resblk_num = [3] * 7 + [1] + [1] * 9 # skip connections # predict the splitting label # predict the signal # TODO: improve it # pad zeros to reg_vox to reuse the original code for ocnn # octree_in and octree_out are the same for UNet # run encoder and decoder # compute function value with mpu # create the mpu wrapper | 2.252273 | 2 |
src/test/resources/unittest/class/advanced/custom_tests/testing_advanced_expected.py | AlexTereshenkov/pybutler | 0 | 6620499 | <filename>src/test/resources/unittest/class/advanced/custom_tests/testing_advanced_expected.py
import os
import sys
import re
import shutil
import unittest
class CustomTestSuite(unittest.TestCase):
def setUp(self):
return
def unit_function1(self):
""""""
assert 1 == 1
return
def unit_function2(self):
""""""
assert 1 == 1
return
def tearDown(self):
return
| <filename>src/test/resources/unittest/class/advanced/custom_tests/testing_advanced_expected.py
import os
import sys
import re
import shutil
import unittest
class CustomTestSuite(unittest.TestCase):
def setUp(self):
return
def unit_function1(self):
""""""
assert 1 == 1
return
def unit_function2(self):
""""""
assert 1 == 1
return
def tearDown(self):
return
| none | 1 | 2.579684 | 3 | |
tests/day4/test_main.py | martin1keogh/aoc2020 | 0 | 6620500 | from typing import List, Callable
from aoc2020.day4.main import SolverDay4, Passport
from tests.utils.puzzle_examples_checker import PuzzleExamplesChecker, Example
class TestSolverDay4(PuzzleExamplesChecker):
day: int = 4
solver: SolverDay4 = SolverDay4
examples: List[Example] = [
Example(
data="""\
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in""",
solution_part1=2,
),
Example(
data="""\
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007""",
solution_part2=0
),
Example(
data="""\
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719""",
solution_part2=4
)
]
# hack to get around the fact that python sucks ass
parser: Callable[[str], List[Passport]] = lambda x, y: SolverDay4.parser(y)
| from typing import List, Callable
from aoc2020.day4.main import SolverDay4, Passport
from tests.utils.puzzle_examples_checker import PuzzleExamplesChecker, Example
class TestSolverDay4(PuzzleExamplesChecker):
day: int = 4
solver: SolverDay4 = SolverDay4
examples: List[Example] = [
Example(
data="""\
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in""",
solution_part1=2,
),
Example(
data="""\
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007""",
solution_part2=0
),
Example(
data="""\
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719""",
solution_part2=4
)
]
# hack to get around the fact that python sucks ass
parser: Callable[[str], List[Passport]] = lambda x, y: SolverDay4.parser(y)
| no | 0.116009 | \ ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in \ eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 \ pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 # hack to get around the fact that python sucks ass | 2.357308 | 2 |
knx_stack/definition/layer/application/a_group_value_response/__init__.py | majamassarini/knx-stack | 2 | 6620501 | from knx_stack.definition.layer.application.a_group_value_response import ind, con
| from knx_stack.definition.layer.application.a_group_value_response import ind, con
| none | 1 | 1.167385 | 1 | |
main.py | korhanyuzbas/python-articlecrawler | 1 | 6620502 | <gh_stars>1-10
import argparse
import json
import os
import uuid
import justext
import requests
from bs4 import BeautifulSoup
from goose3 import Goose
from goose3.configuration import Configuration
from pdfminer.converter import PDFPageAggregator
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams, LTTextBoxHorizontal
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from requests.adapters import HTTPAdapter
from sqlalchemy import exists
from urllib3 import Retry
from constants import BASE_DIR, REQUESTS_RETRY, REQUESTS_TIMEOUT
from db import Article
from exceptions import UnknownContentType
class ExportArticle(object):
"""
Export article to SQL or JSON (yet)
Usage example:
ExportArticle(article=<ArticleCrawler object>, to='sql')
"""
def __init__(self, article, to):
"""
:param to: Extration type, only supports SQL and JSON
:param article: ArticleCrawler object
:return: None
"""
# article must be an ArticleCrawler object
assert isinstance(article, ArticleCrawler), "article must be an ArticleCrawler object"
self.to = to
self.article = article
if not self.article.title and not self.article.content:
return
try:
import sqlalchemy
except ImportError:
print("SqlAlchemy is not installed. Process will be extracted to JSON file")
self.to = 'json'
self.__extract_to_json() if self.to == 'json' else self.__extract_to_sql()
def __extract_to_sql(self):
"""
Creates article table if not exists
If url already exists in database, it will check if html content (raw_content) has changed
Otherwise it will create new article
Database sets for SQLite3.
#TODO: hardcoded to SQLite3, get parameter from user
"""
# Bad practice for importing
# But it's creating tables on import
# TODO: create table when __extract_to_sql() function called
from db import sql_session as sql
is_exists = sql.query(exists().where(Article.url == self.article.url)).scalar()
if is_exists:
# TODO: redundant query count. is_exists should be combined with article variable. affects database performance.
article = sql.query(Article).filter_by(url=self.article.url).first()
if article.raw_content != self.article.raw_content:
article.raw_content = self.article.raw_content
article.content = self.article.content
article.title = self.article.title
article.meta_keywords = self.article.meta_keywords
article.meta_description = self.article.meta_description
article.images = json.dumps(self.article.images)
sql.commit()
else:
article = Article(title=self.article.title,
content=self.article.content,
url=self.article.url,
raw_content=self.article.raw_content,
meta_description=self.article.meta_description,
meta_keywords=self.article.meta_keywords,
images=json.dumps(self.article.images))
sql.add(article)
sql.commit()
def __extract_to_json(self):
"""
Extracting data to JSON
"""
json_data = {}
article_json_file = os.path.join(BASE_DIR, 'article.json')
if os.path.exists(article_json_file):
with open(article_json_file, 'r') as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
# It will delete JSON file in anyway
pass
# delete json file so it can create it with edited version
os.remove(article_json_file)
else:
json_data[self.article.url] = {}
# TODO: Can cause performance issues
# must find another way to do it
if url not in json_data:
json_data[self.article.url] = {}
json_data[self.article.url] = {'title': self.article.title,
'content': self.article.content,
'raw_content': self.article.raw_content,
'url': self.article.url,
'meta_keywords': self.article.meta_keywords,
'meta_description': self.article.meta_description,
'images': self.article.images}
# create json file
with open(article_json_file, 'w') as f:
json.dump(json_data, f, indent=4)
class ArticleCrawler(object):
"""
Getting article details
It doesn't require to call any function in this class
Usage example:
article = ArticleCrawler(url='some link')
Example attributes:
article.title = 'Example Title'
article.content = 'Example Content'
article.raw_content = returns full html body without parsing
article.url = returns url for export to SQL or JSON
article.images = returns list of images URLs
"""
def __init__(self, url):
"""
Constructor of ArticleCrawler
:param url: URL to fetch
:return: None
"""
self.url = url
self.title = ''
self.content = ''
self.raw_content = ''
self.images = []
self.meta_description = ''
self.meta_keywords = ''
self.response = self.__get_response()
if not self.response:
return
try:
self.__get_content_type()
except UnknownContentType:
print()
self.is_html = False
self.is_pdf = False
def __get_content_type(self):
response = self.response
content_type = response.headers['Content-Type']
# known content types
# unknown content types will be added to json file
if 'text/html' in content_type:
self.is_html = True
self.get_article_details()
elif content_type == 'application/pdf':
self.is_pdf = True
print("PDF rendering is still in progress")
# self.get_pdf_details()
else:
# if content type is not a expected type, it will write it to json file for
# create textures folder if not exists
path = os.path.join(BASE_DIR, 'textures')
if not os.path.exists(path):
os.makedirs(path)
json_data = {}
# read json file first if exists
unknown_content_types_file = os.path.join(path, 'unknown-content-types.json')
if os.path.exists(unknown_content_types_file):
with open(unknown_content_types_file, 'r') as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
# It will delete JSON file in anyway
pass
# delete json file so it can create it with edited version
os.remove(unknown_content_types_file)
else:
json_data['content_types'] = []
# for broken JSON files, there must be content_types key in dict
if 'content_types' not in json_data:
json_data['content_types'] = []
json_data['content_types'].append(content_type)
# create json file
with open(unknown_content_types_file, 'w') as f:
json.dump(json_data, f, indent=4)
raise UnknownContentType
def __get_response(self):
with requests.Session() as s:
retry = Retry(total=REQUESTS_RETRY, backoff_factor=0.3, status_forcelist=[500, 503])
adapter = HTTPAdapter(max_retries=retry)
s.mount('https://', adapter=adapter)
s.mount('http://', adapter=adapter)
try:
return s.get(self.url, timeout=REQUESTS_TIMEOUT)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
# TODO: Do something in case of connectionerror and/or readtimeout
return
except (requests.exceptions.MissingSchema):
print("Invalid URL, Please make sure to add http:// or https:// to URL")
def __process_goose(self):
goose_config = Configuration()
goose_config.browser_user_agent = 'Mozilla 5.0'
goose_config.enable_image_fetching = True
g = Goose(config=goose_config)
try:
article = g.extract(self.url)
if article.top_image.src:
self.images = self.get_all_images_from_example_src(article.top_image.src)
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
return None
return article
def get_all_images_from_example_src(self, src):
"""
Goose library returns only one relevant image in article
In case of having multiple relevant images in article, this will collect them all if have similar attributes
:param src: Relevant image's URL
:return: images list
"""
soup = BeautifulSoup(self.response.text, 'html.parser')
image_element = soup.find_all('img', {'src': src})[0]
image_attrs = image_element.attrs
# class attribute may use in other irrelevant image
image_attrs.pop('class', None)
# alt and id attributes are unique in most cases.
image_attrs.pop('alt', None)
image_attrs.pop('id', None)
all_images = []
for key, value in image_attrs.items():
all_data = soup.find_all('img', {key: value})
for i in all_data:
all_images.append(i.get('src'))
# article's top_image will appear in list twice, thus it will convert it to set and convert it back to list again
all_images = list(set(all_images))
return all_images
def get_article_details(self):
goose_object = self.__process_goose()
title = None
content = None
if goose_object:
title = goose_object.title
# Removing newlines and tabs in article content
content = goose_object.cleaned_text.replace('\n', '').replace('\t', '') if goose_object.cleaned_text else None
# If Goose can not found title or content, will try jusText to get article
if not title or not content:
content_language = None
for key, value in self.response.headers.items():
if "language" in key.lower():
content_language = value
# Goose would have found content language in meta
if not content_language:
content_language = goose_object.meta_lang
# If not content language has found, English will be default language
# TODO: take parameter from user for default language
if not content_language:
parapraphs = justext.justext(self.response.content, justext.get_stoplist(language='English'))
else:
path = os.path.join(BASE_DIR, 'textures')
if not os.path.exists(path):
os.makedirs(path)
# read json file first if exists
language_codes_json = os.path.join(path, 'language_codes.json')
stoplist_language = "English"
if os.path.exists(language_codes_json):
with open(language_codes_json, 'r') as f:
language_data = json.load(f)
for key, value in language_data.items():
if key == content_language:
stoplist_language = value
parapraphs = justext.justext(self.response.content, justext.get_stoplist(language=stoplist_language))
# Goose would have found title in article
if not title:
try:
title = [parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and parapraph.is_heading and parapraph.class_type == 'good'][0]
except IndexError:
pass
# Goose would have found content in article
if not content:
content = " ".join([parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and not parapraph.is_heading and parapraph.class_type == 'good'])
self.title = title
self.content = content
self.raw_content = self.response.text
self.meta_description = goose_object.meta_description
self.meta_keywords = goose_object.meta_keywords
# not using currently.
def get_pdf_details(self):
# save pdf to local
# it gives random name to pdf, it will delete it after processing
random_string = str(uuid.uuid4())[0:10]
file_path = os.path.join(BASE_DIR, 'pdf_files', "{}.pdf".format(random_string))
html_file_path = os.path.join(BASE_DIR, 'pdf_files', "{}.html".format(random_string))
with open(file_path, 'wb') as f:
f.write(self.response.content)
text = ""
# Usage Type 1:
# Rendering pdf as text. Best way to get PDF content, but got problems with jusText, not getting article as expected
with open(file_path, 'rb') as f:
parser = PDFParser(f)
document = PDFDocument(parser)
manager = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(manager, laparams=laparams)
interpreter = PDFPageInterpreter(manager, device)
for page in PDFPage.get_pages(f):
interpreter.process_page(page)
layout = device.get_result()
for element in layout:
if isinstance(element, (LTTextBoxHorizontal)):
# alterin element get as html element, so jusText library can find relative texts
text += "<p>{}</p>".format(element.get_text())
# End of usage type 1
# Usage Type 2:
# Rendering pdf as html. Not a great way to get PDF content. Font sizes, html elements etc. not rendering as expected.
# If fixed, would work with jusText as expected.
with open(html_file_path, 'wb') as outf:
extract_text_to_fp(f, outf, output_type='html')
with open(html_file_path, 'rb') as f:
text = " ".join([x.decode().replace('\n', '') for x in f.readlines()])
# End of usage type 2
if document.info:
self.title = document.info[0].get('Title', None)
if self.title:
self.title = self.title.decode()
# jusText raises exception if text variable is empty
if text:
parapraphs = justext.justext(text, justext.get_stoplist(language='English'))
content = " ".join([parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and not parapraph.is_heading and parapraph.class_type == 'good'])
self.content = content
self.raw_content = content
# Remove reduntant files.
os.unlink(file_path)
os.unlink(html_file_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Article crawler')
argparser.add_argument('url', help='Enter URL to fetch',
default='https://www.theguardian.com/politics/2018/aug/19/brexit-tory-mps-warn-of-entryism-threat-from-leave-eu-supporters')
argparser.add_argument('--export', help='Article export option. Choices are: sql, json. Default argument: sql', default='sql')
parser = argparser.parse_args()
url = parser.url
export_option = parser.export
article = ArticleCrawler(url=url)
ExportArticle(article=article, to=export_option)
| import argparse
import json
import os
import uuid
import justext
import requests
from bs4 import BeautifulSoup
from goose3 import Goose
from goose3.configuration import Configuration
from pdfminer.converter import PDFPageAggregator
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams, LTTextBoxHorizontal
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from requests.adapters import HTTPAdapter
from sqlalchemy import exists
from urllib3 import Retry
from constants import BASE_DIR, REQUESTS_RETRY, REQUESTS_TIMEOUT
from db import Article
from exceptions import UnknownContentType
class ExportArticle(object):
"""
Export article to SQL or JSON (yet)
Usage example:
ExportArticle(article=<ArticleCrawler object>, to='sql')
"""
def __init__(self, article, to):
"""
:param to: Extration type, only supports SQL and JSON
:param article: ArticleCrawler object
:return: None
"""
# article must be an ArticleCrawler object
assert isinstance(article, ArticleCrawler), "article must be an ArticleCrawler object"
self.to = to
self.article = article
if not self.article.title and not self.article.content:
return
try:
import sqlalchemy
except ImportError:
print("SqlAlchemy is not installed. Process will be extracted to JSON file")
self.to = 'json'
self.__extract_to_json() if self.to == 'json' else self.__extract_to_sql()
def __extract_to_sql(self):
"""
Creates article table if not exists
If url already exists in database, it will check if html content (raw_content) has changed
Otherwise it will create new article
Database sets for SQLite3.
#TODO: hardcoded to SQLite3, get parameter from user
"""
# Bad practice for importing
# But it's creating tables on import
# TODO: create table when __extract_to_sql() function called
from db import sql_session as sql
is_exists = sql.query(exists().where(Article.url == self.article.url)).scalar()
if is_exists:
# TODO: redundant query count. is_exists should be combined with article variable. affects database performance.
article = sql.query(Article).filter_by(url=self.article.url).first()
if article.raw_content != self.article.raw_content:
article.raw_content = self.article.raw_content
article.content = self.article.content
article.title = self.article.title
article.meta_keywords = self.article.meta_keywords
article.meta_description = self.article.meta_description
article.images = json.dumps(self.article.images)
sql.commit()
else:
article = Article(title=self.article.title,
content=self.article.content,
url=self.article.url,
raw_content=self.article.raw_content,
meta_description=self.article.meta_description,
meta_keywords=self.article.meta_keywords,
images=json.dumps(self.article.images))
sql.add(article)
sql.commit()
def __extract_to_json(self):
"""
Extracting data to JSON
"""
json_data = {}
article_json_file = os.path.join(BASE_DIR, 'article.json')
if os.path.exists(article_json_file):
with open(article_json_file, 'r') as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
# It will delete JSON file in anyway
pass
# delete json file so it can create it with edited version
os.remove(article_json_file)
else:
json_data[self.article.url] = {}
# TODO: Can cause performance issues
# must find another way to do it
if url not in json_data:
json_data[self.article.url] = {}
json_data[self.article.url] = {'title': self.article.title,
'content': self.article.content,
'raw_content': self.article.raw_content,
'url': self.article.url,
'meta_keywords': self.article.meta_keywords,
'meta_description': self.article.meta_description,
'images': self.article.images}
# create json file
with open(article_json_file, 'w') as f:
json.dump(json_data, f, indent=4)
class ArticleCrawler(object):
"""
Getting article details
It doesn't require to call any function in this class
Usage example:
article = ArticleCrawler(url='some link')
Example attributes:
article.title = 'Example Title'
article.content = 'Example Content'
article.raw_content = returns full html body without parsing
article.url = returns url for export to SQL or JSON
article.images = returns list of images URLs
"""
def __init__(self, url):
"""
Constructor of ArticleCrawler
:param url: URL to fetch
:return: None
"""
self.url = url
self.title = ''
self.content = ''
self.raw_content = ''
self.images = []
self.meta_description = ''
self.meta_keywords = ''
self.response = self.__get_response()
if not self.response:
return
try:
self.__get_content_type()
except UnknownContentType:
print()
self.is_html = False
self.is_pdf = False
def __get_content_type(self):
response = self.response
content_type = response.headers['Content-Type']
# known content types
# unknown content types will be added to json file
if 'text/html' in content_type:
self.is_html = True
self.get_article_details()
elif content_type == 'application/pdf':
self.is_pdf = True
print("PDF rendering is still in progress")
# self.get_pdf_details()
else:
# if content type is not a expected type, it will write it to json file for
# create textures folder if not exists
path = os.path.join(BASE_DIR, 'textures')
if not os.path.exists(path):
os.makedirs(path)
json_data = {}
# read json file first if exists
unknown_content_types_file = os.path.join(path, 'unknown-content-types.json')
if os.path.exists(unknown_content_types_file):
with open(unknown_content_types_file, 'r') as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
# It will delete JSON file in anyway
pass
# delete json file so it can create it with edited version
os.remove(unknown_content_types_file)
else:
json_data['content_types'] = []
# for broken JSON files, there must be content_types key in dict
if 'content_types' not in json_data:
json_data['content_types'] = []
json_data['content_types'].append(content_type)
# create json file
with open(unknown_content_types_file, 'w') as f:
json.dump(json_data, f, indent=4)
raise UnknownContentType
def __get_response(self):
with requests.Session() as s:
retry = Retry(total=REQUESTS_RETRY, backoff_factor=0.3, status_forcelist=[500, 503])
adapter = HTTPAdapter(max_retries=retry)
s.mount('https://', adapter=adapter)
s.mount('http://', adapter=adapter)
try:
return s.get(self.url, timeout=REQUESTS_TIMEOUT)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
# TODO: Do something in case of connectionerror and/or readtimeout
return
except (requests.exceptions.MissingSchema):
print("Invalid URL, Please make sure to add http:// or https:// to URL")
def __process_goose(self):
goose_config = Configuration()
goose_config.browser_user_agent = 'Mozilla 5.0'
goose_config.enable_image_fetching = True
g = Goose(config=goose_config)
try:
article = g.extract(self.url)
if article.top_image.src:
self.images = self.get_all_images_from_example_src(article.top_image.src)
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
return None
return article
def get_all_images_from_example_src(self, src):
"""
Goose library returns only one relevant image in article
In case of having multiple relevant images in article, this will collect them all if have similar attributes
:param src: Relevant image's URL
:return: images list
"""
soup = BeautifulSoup(self.response.text, 'html.parser')
image_element = soup.find_all('img', {'src': src})[0]
image_attrs = image_element.attrs
# class attribute may use in other irrelevant image
image_attrs.pop('class', None)
# alt and id attributes are unique in most cases.
image_attrs.pop('alt', None)
image_attrs.pop('id', None)
all_images = []
for key, value in image_attrs.items():
all_data = soup.find_all('img', {key: value})
for i in all_data:
all_images.append(i.get('src'))
# article's top_image will appear in list twice, thus it will convert it to set and convert it back to list again
all_images = list(set(all_images))
return all_images
def get_article_details(self):
goose_object = self.__process_goose()
title = None
content = None
if goose_object:
title = goose_object.title
# Removing newlines and tabs in article content
content = goose_object.cleaned_text.replace('\n', '').replace('\t', '') if goose_object.cleaned_text else None
# If Goose can not found title or content, will try jusText to get article
if not title or not content:
content_language = None
for key, value in self.response.headers.items():
if "language" in key.lower():
content_language = value
# Goose would have found content language in meta
if not content_language:
content_language = goose_object.meta_lang
# If not content language has found, English will be default language
# TODO: take parameter from user for default language
if not content_language:
parapraphs = justext.justext(self.response.content, justext.get_stoplist(language='English'))
else:
path = os.path.join(BASE_DIR, 'textures')
if not os.path.exists(path):
os.makedirs(path)
# read json file first if exists
language_codes_json = os.path.join(path, 'language_codes.json')
stoplist_language = "English"
if os.path.exists(language_codes_json):
with open(language_codes_json, 'r') as f:
language_data = json.load(f)
for key, value in language_data.items():
if key == content_language:
stoplist_language = value
parapraphs = justext.justext(self.response.content, justext.get_stoplist(language=stoplist_language))
# Goose would have found title in article
if not title:
try:
title = [parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and parapraph.is_heading and parapraph.class_type == 'good'][0]
except IndexError:
pass
# Goose would have found content in article
if not content:
content = " ".join([parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and not parapraph.is_heading and parapraph.class_type == 'good'])
self.title = title
self.content = content
self.raw_content = self.response.text
self.meta_description = goose_object.meta_description
self.meta_keywords = goose_object.meta_keywords
# not using currently.
def get_pdf_details(self):
# save pdf to local
# it gives random name to pdf, it will delete it after processing
random_string = str(uuid.uuid4())[0:10]
file_path = os.path.join(BASE_DIR, 'pdf_files', "{}.pdf".format(random_string))
html_file_path = os.path.join(BASE_DIR, 'pdf_files', "{}.html".format(random_string))
with open(file_path, 'wb') as f:
f.write(self.response.content)
text = ""
# Usage Type 1:
# Rendering pdf as text. Best way to get PDF content, but got problems with jusText, not getting article as expected
with open(file_path, 'rb') as f:
parser = PDFParser(f)
document = PDFDocument(parser)
manager = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(manager, laparams=laparams)
interpreter = PDFPageInterpreter(manager, device)
for page in PDFPage.get_pages(f):
interpreter.process_page(page)
layout = device.get_result()
for element in layout:
if isinstance(element, (LTTextBoxHorizontal)):
# alterin element get as html element, so jusText library can find relative texts
text += "<p>{}</p>".format(element.get_text())
# End of usage type 1
# Usage Type 2:
# Rendering pdf as html. Not a great way to get PDF content. Font sizes, html elements etc. not rendering as expected.
# If fixed, would work with jusText as expected.
with open(html_file_path, 'wb') as outf:
extract_text_to_fp(f, outf, output_type='html')
with open(html_file_path, 'rb') as f:
text = " ".join([x.decode().replace('\n', '') for x in f.readlines()])
# End of usage type 2
if document.info:
self.title = document.info[0].get('Title', None)
if self.title:
self.title = self.title.decode()
# jusText raises exception if text variable is empty
if text:
parapraphs = justext.justext(text, justext.get_stoplist(language='English'))
content = " ".join([parapraph.text for parapraph in parapraphs if
not parapraph.is_boilerplate and not parapraph.is_heading and parapraph.class_type == 'good'])
self.content = content
self.raw_content = content
# Remove reduntant files.
os.unlink(file_path)
os.unlink(html_file_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Article crawler')
argparser.add_argument('url', help='Enter URL to fetch',
default='https://www.theguardian.com/politics/2018/aug/19/brexit-tory-mps-warn-of-entryism-threat-from-leave-eu-supporters')
argparser.add_argument('--export', help='Article export option. Choices are: sql, json. Default argument: sql', default='sql')
parser = argparser.parse_args()
url = parser.url
export_option = parser.export
article = ArticleCrawler(url=url)
ExportArticle(article=article, to=export_option) | en | 0.790684 | Export article to SQL or JSON (yet) Usage example: ExportArticle(article=<ArticleCrawler object>, to='sql') :param to: Extration type, only supports SQL and JSON :param article: ArticleCrawler object :return: None # article must be an ArticleCrawler object Creates article table if not exists If url already exists in database, it will check if html content (raw_content) has changed Otherwise it will create new article Database sets for SQLite3. #TODO: hardcoded to SQLite3, get parameter from user # Bad practice for importing # But it's creating tables on import # TODO: create table when __extract_to_sql() function called # TODO: redundant query count. is_exists should be combined with article variable. affects database performance. Extracting data to JSON # It will delete JSON file in anyway # delete json file so it can create it with edited version # TODO: Can cause performance issues # must find another way to do it # create json file Getting article details It doesn't require to call any function in this class Usage example: article = ArticleCrawler(url='some link') Example attributes: article.title = 'Example Title' article.content = 'Example Content' article.raw_content = returns full html body without parsing article.url = returns url for export to SQL or JSON article.images = returns list of images URLs Constructor of ArticleCrawler :param url: URL to fetch :return: None # known content types # unknown content types will be added to json file # self.get_pdf_details() # if content type is not a expected type, it will write it to json file for # create textures folder if not exists # read json file first if exists # It will delete JSON file in anyway # delete json file so it can create it with edited version # for broken JSON files, there must be content_types key in dict # create json file # TODO: Do something in case of connectionerror and/or readtimeout Goose library returns only one relevant image in article In case of having multiple relevant images in article, this will collect them all if have similar attributes :param src: Relevant image's URL :return: images list # class attribute may use in other irrelevant image # alt and id attributes are unique in most cases. # article's top_image will appear in list twice, thus it will convert it to set and convert it back to list again # Removing newlines and tabs in article content # If Goose can not found title or content, will try jusText to get article # Goose would have found content language in meta # If not content language has found, English will be default language # TODO: take parameter from user for default language # read json file first if exists # Goose would have found title in article # Goose would have found content in article # not using currently. # save pdf to local # it gives random name to pdf, it will delete it after processing # Usage Type 1: # Rendering pdf as text. Best way to get PDF content, but got problems with jusText, not getting article as expected # alterin element get as html element, so jusText library can find relative texts # End of usage type 1 # Usage Type 2: # Rendering pdf as html. Not a great way to get PDF content. Font sizes, html elements etc. not rendering as expected. # If fixed, would work with jusText as expected. # End of usage type 2 # jusText raises exception if text variable is empty # Remove reduntant files. | 2.466074 | 2 |
mezzanine/blog/views.py | TecnoSalta/bg | 0 | 6620503 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.builtins import str
from future.builtins import int
from calendar import month_name
from django.http import Http404
from django.shortcuts import get_object_or_404
from mezzanine.blog.models import BlogPost, BlogCategory,Estudio
from mezzanine.blog.feeds import PostsRSS, PostsAtom
from mezzanine.conf import settings
from mezzanine.generic.models import Keyword
from mezzanine.utils.views import render, paginate
from mezzanine.utils.models import get_user_model
User = get_user_model()
def blog_post_list(request, tag=None, year=None, month=None, username=None,
category=None, template="blog/blog_post_list.html"):
"""
Display a list of blog posts that are filtered by tag, year, month,
author or category. Custom templates are checked for using the name
``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
category slug or author's username if given.
"""
settings.use_editable()
templates = []
blog_posts = BlogPost.objects.published(for_user=request.user)
if tag is not None:
tag = get_object_or_404(Keyword, slug=tag)
blog_posts = blog_posts.filter(keywords__keyword=tag)
if year is not None:
blog_posts = blog_posts.filter(publish_date__year=year)
if month is not None:
blog_posts = blog_posts.filter(publish_date__month=month)
try:
month = month_name[int(month)]
except IndexError:
raise Http404()
if category is not None:
category = get_object_or_404(BlogCategory, slug=category)
blog_posts = blog_posts.filter(categories=category)
templates.append(u"blog/blog_post_list_%s.html" %
str(category.slug))
author = None
if username is not None:
author = get_object_or_404(User, username=username)
blog_posts = blog_posts.filter(user=author)
templates.append(u"blog/blog_post_list_%s.html" % username)
prefetch = ("categories", "keywords__keyword")
blog_posts = blog_posts.select_related("user").prefetch_related(*prefetch)
blog_posts = paginate(blog_posts, request.GET.get("page", 1),
settings.BLOG_POST_PER_PAGE,
settings.MAX_PAGING_LINKS)
context = {"blog_posts": blog_posts, "year": year, "month": month,
"tag": tag, "category": category, "author": author}
templates.append(template)
return render(request, templates, context)
##inicio historiaC
def blog_post_historia(request, slug,template="blog/blog_post_historia.html"):
"""Display a list of contenidos that are filtered by slug,
"""
templates = []
#listamos todos los pacientes..
pacientes = BlogPost.objects.published(for_user=request.user)
paciente = get_object_or_404(pacientes, title=slug)
lista_estudios=Estudio.objects.all()
lista_estudios=lista_estudios.filter(paciente__title=slug).order_by("-created")
templates.append(u"blog/blog_post_historia_%s.html" % str(slug))
#Fecha,estudio_grupo,estudio_item,valor
context = {"estudios": lista_estudios,"paciente":paciente}
templates.append(template)
return render(request, templates, context)
##EndHC
from reportlab.pdfgen import canvas
def blog_post_pdf(request,slug):
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(100, 100, "Hello world.")
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
#templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
#return render(request, templates, context)
return response
from django.http import HttpResponse
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html"):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="'+slug+'.pdf"'
p = canvas.Canvas(response)
#image DRAW
from PIL import Image
import os
page_offset = 7*cm
page_width, page_height = p._pagesize
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
path=PROJECT_ROOT+"\logo.jpg"
#print path
try:
image = Image.open(path)
except:
print "NO LO ENCUENTRA"
image_width, image_height = image.size
#p.setFillColorRGB(0,0,255) #choose your font colour
p.drawImage(path, 60, 760, width=87, height=30,
preserveAspectRatio=True)
#eimagedraw
# Create the PDF object, using the response object as its "file."
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
#antecedentes_personales
p_content = str(blog_post.antecedentes_personales)
#print p_content
textobject = p.beginText(7*cm, 19.2*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#motivo de consulta
p_content = str(blog_post.motivo_de_consulta)
textobject = p.beginText(6*cm, 29.7*cm - 8*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#diagnostico
p_content = str(blog_post.diagnostico)
textobject = p.beginText(7*cm, 29.7*cm - 18.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
# biomicroscopia
p_content = str(blog_post.bmc.bio)
textobject = p.beginText(7*cm, 29.7*cm - 15.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#tratamiento
p_content = str(blog_post.dyt_tratamiento)
textobject = p.beginText(7*cm, 29.7*cm - 22.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#fondo_de_ojos_obi
p_content = str(blog_post.obi_fondo)
textobject = p.beginText(7*cm, 29.7*cm - 17*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
Max_y=29.7*cm
rpt=[{"x":2*cm,"y":4.5*cm,"texto":"Paciente:","tag":"l","z":blog_post.nombre},
{"x":13.5*cm,"y":4.5*cm,"texto":"Fecha de Nac.:","tag":"l","z":str(blog_post.fecha_de_nacimento)},
{"x":2*cm,"y":5.5*cm,"texto":"D.N.I.:","tag":"l","z":blog_post.title},#2
{"x":2*cm,"y":6*cm,"texto":"Obra Social / N°:","tag":"l","z":str(blog_post.obra_social)+"/"+(blog_post.obra_social_numero)},
{"x":2*cm,"y":6.5*cm,"texto":"Domicilio:","tag":"l","z":blog_post.domicilio},#4
{"x":2*cm,"y":7*cm,"texto":"Teléfonos:","tag":"l","z":str(blog_post.telefono)},
{"x":2*cm,"y":8*cm,"texto":"Motivo de Consulta:","tag":"ml","z":""},
{"x":2*cm,"y":10.5*cm,"texto":"Antecedentes Personales:","tag":"ml","z":""},
{"x":2*cm,"y":12.5*cm,"texto":"Ref. O.D.:","tag":"l","z":blog_post.r_od},#6
{"x":12*cm,"y":12.5*cm,"texto":"O.I.:","tag":"l","z":blog_post.r_oi},
{"x":2*cm,"y":13.5*cm,"texto":"A.V. S.C. O.D.:","tag":"l","z":blog_post.av_sc_od},#8
{"x":12*cm,"y":13.5*cm,"texto":"S.C. O.I.:","tag":"l","z":blog_post.av_sc_oi},
{"x":2*cm,"y":14*cm,"texto":"A.V. C.C. O.D.:","tag":"l","z":blog_post.av_cc_od},
{"x":12*cm,"y":14*cm,"texto":"C.C. O.I.:","tag":"l","z":blog_post.av_cc_oi},
{"x":2*cm,"y":14.5*cm,"texto":"P.O. O.D.:","tag":"l","z":blog_post.po_od},#12
{"x":12*cm,"y":14.5*cm,"texto":"O.I.:","tag":"l","z":blog_post.po_oi},
{"x":2*cm,"y":15.5*cm,"texto":"BIOMICROSCOPIA:","tag":"ml","z":""},
{"x":2*cm,"y":17*cm,"texto":"FONDO DE OJOS OBI:","tag":"ml","z":blog_post.title},#16
{"x":2*cm,"y":18.5*cm,"texto":"Diagnostico:","tag":"ml","z":blog_post.title},
{"x":2*cm,"y":22.5*cm,"texto":"Tratamiento:","tag":"ml","z":blog_post.title},
{"x":2*cm,"y":27.5*cm,"texto":"Observaciones:","tag":"ml","z":""},
]
for e in rpt:
#print str(Max_y-e["y"])
#print e["texto"]
p.drawString(e["x"], Max_y-e["y"], e["texto"])
if e["tag"]=="l":
p.drawString(e["x"]+(len(e["texto"].upper())*0.2*cm), Max_y-e["y"], e["z"])
# Close the PDF object cleanly, and we're done.
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
#templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
#return render(request, templates, context)
#p.drawString(100,200,blog_post.nombre)
p.showPage()
p.save()
return response
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404()
def blog_post_add(request):
raise Http404()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.builtins import str
from future.builtins import int
from calendar import month_name
from django.http import Http404
from django.shortcuts import get_object_or_404
from mezzanine.blog.models import BlogPost, BlogCategory,Estudio
from mezzanine.blog.feeds import PostsRSS, PostsAtom
from mezzanine.conf import settings
from mezzanine.generic.models import Keyword
from mezzanine.utils.views import render, paginate
from mezzanine.utils.models import get_user_model
User = get_user_model()
def blog_post_list(request, tag=None, year=None, month=None, username=None,
category=None, template="blog/blog_post_list.html"):
"""
Display a list of blog posts that are filtered by tag, year, month,
author or category. Custom templates are checked for using the name
``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
category slug or author's username if given.
"""
settings.use_editable()
templates = []
blog_posts = BlogPost.objects.published(for_user=request.user)
if tag is not None:
tag = get_object_or_404(Keyword, slug=tag)
blog_posts = blog_posts.filter(keywords__keyword=tag)
if year is not None:
blog_posts = blog_posts.filter(publish_date__year=year)
if month is not None:
blog_posts = blog_posts.filter(publish_date__month=month)
try:
month = month_name[int(month)]
except IndexError:
raise Http404()
if category is not None:
category = get_object_or_404(BlogCategory, slug=category)
blog_posts = blog_posts.filter(categories=category)
templates.append(u"blog/blog_post_list_%s.html" %
str(category.slug))
author = None
if username is not None:
author = get_object_or_404(User, username=username)
blog_posts = blog_posts.filter(user=author)
templates.append(u"blog/blog_post_list_%s.html" % username)
prefetch = ("categories", "keywords__keyword")
blog_posts = blog_posts.select_related("user").prefetch_related(*prefetch)
blog_posts = paginate(blog_posts, request.GET.get("page", 1),
settings.BLOG_POST_PER_PAGE,
settings.MAX_PAGING_LINKS)
context = {"blog_posts": blog_posts, "year": year, "month": month,
"tag": tag, "category": category, "author": author}
templates.append(template)
return render(request, templates, context)
##inicio historiaC
def blog_post_historia(request, slug,template="blog/blog_post_historia.html"):
"""Display a list of contenidos that are filtered by slug,
"""
templates = []
#listamos todos los pacientes..
pacientes = BlogPost.objects.published(for_user=request.user)
paciente = get_object_or_404(pacientes, title=slug)
lista_estudios=Estudio.objects.all()
lista_estudios=lista_estudios.filter(paciente__title=slug).order_by("-created")
templates.append(u"blog/blog_post_historia_%s.html" % str(slug))
#Fecha,estudio_grupo,estudio_item,valor
context = {"estudios": lista_estudios,"paciente":paciente}
templates.append(template)
return render(request, templates, context)
##EndHC
from reportlab.pdfgen import canvas
def blog_post_pdf(request,slug):
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(100, 100, "Hello world.")
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
#templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
#return render(request, templates, context)
return response
from django.http import HttpResponse
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html"):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="'+slug+'.pdf"'
p = canvas.Canvas(response)
#image DRAW
from PIL import Image
import os
page_offset = 7*cm
page_width, page_height = p._pagesize
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
path=PROJECT_ROOT+"\logo.jpg"
#print path
try:
image = Image.open(path)
except:
print "NO LO ENCUENTRA"
image_width, image_height = image.size
#p.setFillColorRGB(0,0,255) #choose your font colour
p.drawImage(path, 60, 760, width=87, height=30,
preserveAspectRatio=True)
#eimagedraw
# Create the PDF object, using the response object as its "file."
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
#antecedentes_personales
p_content = str(blog_post.antecedentes_personales)
#print p_content
textobject = p.beginText(7*cm, 19.2*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#motivo de consulta
p_content = str(blog_post.motivo_de_consulta)
textobject = p.beginText(6*cm, 29.7*cm - 8*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#diagnostico
p_content = str(blog_post.diagnostico)
textobject = p.beginText(7*cm, 29.7*cm - 18.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
# biomicroscopia
p_content = str(blog_post.bmc.bio)
textobject = p.beginText(7*cm, 29.7*cm - 15.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#tratamiento
p_content = str(blog_post.dyt_tratamiento)
textobject = p.beginText(7*cm, 29.7*cm - 22.5*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
#fondo_de_ojos_obi
p_content = str(blog_post.obi_fondo)
textobject = p.beginText(7*cm, 29.7*cm - 17*cm)
for c in p_content:
if c == '\n':
textobject.textLine()
elif c == '\r':
pass # do nothing
else:
textobject.textOut(c)
p.drawText(textobject)
Max_y=29.7*cm
rpt=[{"x":2*cm,"y":4.5*cm,"texto":"Paciente:","tag":"l","z":blog_post.nombre},
{"x":13.5*cm,"y":4.5*cm,"texto":"Fecha de Nac.:","tag":"l","z":str(blog_post.fecha_de_nacimento)},
{"x":2*cm,"y":5.5*cm,"texto":"D.N.I.:","tag":"l","z":blog_post.title},#2
{"x":2*cm,"y":6*cm,"texto":"Obra Social / N°:","tag":"l","z":str(blog_post.obra_social)+"/"+(blog_post.obra_social_numero)},
{"x":2*cm,"y":6.5*cm,"texto":"Domicilio:","tag":"l","z":blog_post.domicilio},#4
{"x":2*cm,"y":7*cm,"texto":"Teléfonos:","tag":"l","z":str(blog_post.telefono)},
{"x":2*cm,"y":8*cm,"texto":"Motivo de Consulta:","tag":"ml","z":""},
{"x":2*cm,"y":10.5*cm,"texto":"Antecedentes Personales:","tag":"ml","z":""},
{"x":2*cm,"y":12.5*cm,"texto":"Ref. O.D.:","tag":"l","z":blog_post.r_od},#6
{"x":12*cm,"y":12.5*cm,"texto":"O.I.:","tag":"l","z":blog_post.r_oi},
{"x":2*cm,"y":13.5*cm,"texto":"A.V. S.C. O.D.:","tag":"l","z":blog_post.av_sc_od},#8
{"x":12*cm,"y":13.5*cm,"texto":"S.C. O.I.:","tag":"l","z":blog_post.av_sc_oi},
{"x":2*cm,"y":14*cm,"texto":"A.V. C.C. O.D.:","tag":"l","z":blog_post.av_cc_od},
{"x":12*cm,"y":14*cm,"texto":"C.C. O.I.:","tag":"l","z":blog_post.av_cc_oi},
{"x":2*cm,"y":14.5*cm,"texto":"P.O. O.D.:","tag":"l","z":blog_post.po_od},#12
{"x":12*cm,"y":14.5*cm,"texto":"O.I.:","tag":"l","z":blog_post.po_oi},
{"x":2*cm,"y":15.5*cm,"texto":"BIOMICROSCOPIA:","tag":"ml","z":""},
{"x":2*cm,"y":17*cm,"texto":"FONDO DE OJOS OBI:","tag":"ml","z":blog_post.title},#16
{"x":2*cm,"y":18.5*cm,"texto":"Diagnostico:","tag":"ml","z":blog_post.title},
{"x":2*cm,"y":22.5*cm,"texto":"Tratamiento:","tag":"ml","z":blog_post.title},
{"x":2*cm,"y":27.5*cm,"texto":"Observaciones:","tag":"ml","z":""},
]
for e in rpt:
#print str(Max_y-e["y"])
#print e["texto"]
p.drawString(e["x"], Max_y-e["y"], e["texto"])
if e["tag"]=="l":
p.drawString(e["x"]+(len(e["texto"].upper())*0.2*cm), Max_y-e["y"], e["z"])
# Close the PDF object cleanly, and we're done.
blog_posts = BlogPost.objects.published(for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
context = {"blog_post": blog_post, "editable_obj": blog_post}
#templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
#return render(request, templates, context)
#p.drawString(100,200,blog_post.nombre)
p.showPage()
p.save()
return response
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404()
def blog_post_add(request):
raise Http404()
| en | 0.520807 | # -*- coding: utf-8 -*- Display a list of blog posts that are filtered by tag, year, month,
author or category. Custom templates are checked for using the name
``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
category slug or author's username if given. ##inicio historiaC Display a list of contenidos that are filtered by slug, #listamos todos los pacientes.. #Fecha,estudio_grupo,estudio_item,valor ##EndHC # Create the HttpResponse object with the appropriate PDF headers. # Create the PDF object, using the response object as its "file." # Draw things on the PDF. Here's where the PDF generation happens. # See the ReportLab documentation for the full list of functionality. # Close the PDF object cleanly, and we're done. #templates = [u"blog/blog_post_detail_%s.html" % str(slug), template] #return render(request, templates, context) #image DRAW #print path #p.setFillColorRGB(0,0,255) #choose your font colour #eimagedraw # Create the PDF object, using the response object as its "file." #antecedentes_personales #print p_content # do nothing #motivo de consulta # do nothing #diagnostico # do nothing # biomicroscopia # do nothing #tratamiento # do nothing #fondo_de_ojos_obi # do nothing #2 #4 #6 #8 #12 #16 #print str(Max_y-e["y"]) #print e["texto"] # Close the PDF object cleanly, and we're done. #templates = [u"blog/blog_post_detail_%s.html" % str(slug), template] #return render(request, templates, context) #p.drawString(100,200,blog_post.nombre) Blog posts feeds - maps format to the correct feed view. | 2.181699 | 2 |
ssl_fastai2/utils.py | Samjoel3101/Self-Supervised-Learning-fastai2 | 0 | 6620504 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_utils.ipynb (unless otherwise specified).
__all__ = ['Encoding', 'make_encoder', 'DropLastBatchCallback']
# Cell
from .imports import *
# Cell
class Encoding:
def __init__(self, encoder, size = (128, 128)):
self.sizes = model_sizes(encoder, size)
self.idxs = unet._get_sz_change_idxs(self.sizes)
@property
def final_channel(self):
return self.sizes[-1][1]
@property
def num_encodings(self):
return len(self.idxs)
def cut_idx_for_grid_size(self, grid_thresh = 12):
idxs = []; size = []
for idx, model_size in enumerate(self.sizes):
assert model_size[-2] == model_size[-1], "Rectangular image is feeded"
grid_size = model_size[-2]
if grid_size < grid_thresh:
break
idxs.append(idx); size.append(model_size[-1])
return idxs[-1] + 1, size[-1]
def get_hooks_for_encoding(self):
encodings = hook_outputs([model[i] for i in idxs], detach = False)
return encodings
# Cell
def make_encoder(arch, grid_size = 12, im_size = 224):
sample_model = create_body(arch)
enc = Encoding(sample_model, size = (im_size, im_size))
cut_idx, size = enc.cut_idx_for_grid_size(grid_size)
print(f'Cut index : {cut_idx}; Size of last Grid Size: {size}')
actual_model = create_body(arch, cut = cut_idx)
del sample_model; del enc
return actual_model, Encoding(actual_model, (im_size, im_size))
# Cell
class DropLastBatchCallback(Callback):
def before_batch(self):
if self.n_iter - 1 == self.iter:
raise CancelBatchException() | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_utils.ipynb (unless otherwise specified).
__all__ = ['Encoding', 'make_encoder', 'DropLastBatchCallback']
# Cell
from .imports import *
# Cell
class Encoding:
def __init__(self, encoder, size = (128, 128)):
self.sizes = model_sizes(encoder, size)
self.idxs = unet._get_sz_change_idxs(self.sizes)
@property
def final_channel(self):
return self.sizes[-1][1]
@property
def num_encodings(self):
return len(self.idxs)
def cut_idx_for_grid_size(self, grid_thresh = 12):
idxs = []; size = []
for idx, model_size in enumerate(self.sizes):
assert model_size[-2] == model_size[-1], "Rectangular image is feeded"
grid_size = model_size[-2]
if grid_size < grid_thresh:
break
idxs.append(idx); size.append(model_size[-1])
return idxs[-1] + 1, size[-1]
def get_hooks_for_encoding(self):
encodings = hook_outputs([model[i] for i in idxs], detach = False)
return encodings
# Cell
def make_encoder(arch, grid_size = 12, im_size = 224):
sample_model = create_body(arch)
enc = Encoding(sample_model, size = (im_size, im_size))
cut_idx, size = enc.cut_idx_for_grid_size(grid_size)
print(f'Cut index : {cut_idx}; Size of last Grid Size: {size}')
actual_model = create_body(arch, cut = cut_idx)
del sample_model; del enc
return actual_model, Encoding(actual_model, (im_size, im_size))
# Cell
class DropLastBatchCallback(Callback):
def before_batch(self):
if self.n_iter - 1 == self.iter:
raise CancelBatchException() | en | 0.495839 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_utils.ipynb (unless otherwise specified). # Cell # Cell # Cell # Cell | 2.054612 | 2 |
s3_backup/__main__.py | mattberther/docker-s3-backup | 0 | 6620505 | import datetime
import fnmatch
import getopt
import logging
import os
import string
import sys
import tarfile
import boto3
from botocore.exceptions import ClientError
from glob import glob
from datetime import timedelta
from .s3_rotate import S3Rotator
# Configuration
aws_bucket = os.environ['AWS_S3_BUCKET']
exclude_files = []
if os.environ.get('BACKUP_EXCLUDE_FILES'):
exclude_files = os.environ.get('BACKUP_EXCLUDE_FILES').split(';')
dirs = glob('/data/*/')
logger = logging.getLogger(__name__)
# Script Configuration
today = datetime.date.today()
# Establish S3 Connection
s3_client = boto3.client('s3')
def should_include(tarinfo):
for x in exclude_files:
if fnmatch.fnmatch(tarinfo.name, x):
logger.debug(f'[FILE] Excluding {tarinfo.name} based on filter: {x}')
return None
return tarinfo
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, 'w:gz') as tar:
tar.add(source_dir, filter=should_include)
tar.close()
def upload_s3(tarfile):
filename = os.path.basename(tarfile)
logger.info(f'[S3] Uploading file archive {tarfile}...')
try:
resp = s3_client.upload_file(tarfile, aws_bucket, filename)
except ClientError as e:
logging.error(e)
def main():
dry_run = False
options, arguments = getopt.getopt(sys.argv[1:], 'n', ['dry-run'])
for option, value in options:
if option in ('-n', '--dry-run'):
logger.info(f'Performing a dry run (because of {option})')
dry_run = True
for x in exclude_files:
logger.info(f'[FILE] Excluding patten {x}')
for d in dirs:
logger.info(f'[FILE] Found directory {d}')
folder = d.rstrip(os.sep).split(os.sep)[::-1][0]
filename = f'{folder}-{str(today)}.files.tar.gz'
out_file = os.path.join('/tmp', filename)
logger.info(f'[FILE] Creating archive for {folder}')
make_tarfile(out_file, d)
upload_s3(out_file)
S3Rotator(include_list=[f'{folder}-*.files.tar.gz'],
dry_run=dry_run).rotate_backups(aws_bucket)
os.remove(out_file)
if __name__ == "__main__":
main()
| import datetime
import fnmatch
import getopt
import logging
import os
import string
import sys
import tarfile
import boto3
from botocore.exceptions import ClientError
from glob import glob
from datetime import timedelta
from .s3_rotate import S3Rotator
# Configuration
aws_bucket = os.environ['AWS_S3_BUCKET']
exclude_files = []
if os.environ.get('BACKUP_EXCLUDE_FILES'):
exclude_files = os.environ.get('BACKUP_EXCLUDE_FILES').split(';')
dirs = glob('/data/*/')
logger = logging.getLogger(__name__)
# Script Configuration
today = datetime.date.today()
# Establish S3 Connection
s3_client = boto3.client('s3')
def should_include(tarinfo):
for x in exclude_files:
if fnmatch.fnmatch(tarinfo.name, x):
logger.debug(f'[FILE] Excluding {tarinfo.name} based on filter: {x}')
return None
return tarinfo
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, 'w:gz') as tar:
tar.add(source_dir, filter=should_include)
tar.close()
def upload_s3(tarfile):
filename = os.path.basename(tarfile)
logger.info(f'[S3] Uploading file archive {tarfile}...')
try:
resp = s3_client.upload_file(tarfile, aws_bucket, filename)
except ClientError as e:
logging.error(e)
def main():
dry_run = False
options, arguments = getopt.getopt(sys.argv[1:], 'n', ['dry-run'])
for option, value in options:
if option in ('-n', '--dry-run'):
logger.info(f'Performing a dry run (because of {option})')
dry_run = True
for x in exclude_files:
logger.info(f'[FILE] Excluding patten {x}')
for d in dirs:
logger.info(f'[FILE] Found directory {d}')
folder = d.rstrip(os.sep).split(os.sep)[::-1][0]
filename = f'{folder}-{str(today)}.files.tar.gz'
out_file = os.path.join('/tmp', filename)
logger.info(f'[FILE] Creating archive for {folder}')
make_tarfile(out_file, d)
upload_s3(out_file)
S3Rotator(include_list=[f'{folder}-*.files.tar.gz'],
dry_run=dry_run).rotate_backups(aws_bucket)
os.remove(out_file)
if __name__ == "__main__":
main()
| en | 0.614436 | # Configuration # Script Configuration # Establish S3 Connection | 2.191326 | 2 |
benchmark/registration_pipeline.py | humanpose1/riedones3d | 2 | 6620506 | <filename>benchmark/registration_pipeline.py<gh_stars>1-10
try:
import handcrafted_descriptor
except ImportError:
pass
import open3d as o3d
import torch
import hydra
import pandas as pd
from omegaconf import OmegaConf
import os
import os.path as osp
import time
from point_cloud.visu import torch2o3d
from point_cloud import instantiate_registrator
from torch_points3d.datasets.dataset_factory import instantiate_dataset
from torch_points3d.models import model_interface
from torch_points3d.metrics.colored_tqdm import Coloredtqdm as Ctq
from torch_points3d.utils.registration import estimate_transfo
class MockModel(model_interface.DatasetInterface):
def __init__(self, conv_type="SPARSE"):
self._conv_type = conv_type
@property
def conv_type(self):
return self._conv_type
def main_pipeline(dataset, registrator, name="Test"):
# instantiate dataset
# for loop
# list of dict with name of scene and name of coin
loader = dataset.test_dataloaders[0]
list_res = []
with Ctq(loader) as tq_test_loader:
for i, data in enumerate(tq_test_loader):
s, t = data.to_data()
name_scene, name_pair_source, name_pair_target = dataset.test_dataset[0].get_name(i)
res = dict(name_scene=name_scene, name_pair_source=name_pair_source, name_pair_target=name_pair_target)
metric = registrator.evaluate_pair(data)
res = dict(**res, **metric)
list_res.append(res)
tq_test_loader.set_postfix(**res)
df = pd.DataFrame(list_res)
output_path = os.path.join(name, registrator.__class__.__name__)
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
df.to_csv(osp.join(output_path, "final_res_{}.csv".format(time.strftime("%Y%m%d-%H%M%S"))))
print(df.groupby("name_scene").mean())
@hydra.main(config_path="conf", config_name="config")
def run(cfg):
OmegaConf.set_struct(cfg, False)
# cfg_data = OmegaConf.load("/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/conf/data/registration/testliffre.yaml").data
# cfg_data.dataroot = "/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/data"
cfg_data = cfg.data
dataset = instantiate_dataset(cfg_data)
dataset.create_dataloaders(
MockModel(), 1, False, cfg.num_workers, False,
)
registrator = instantiate_registrator(cfg.registrator)
main_pipeline(dataset, registrator, cfg_data.name)
if __name__ == "__main__":
run()
| <filename>benchmark/registration_pipeline.py<gh_stars>1-10
try:
import handcrafted_descriptor
except ImportError:
pass
import open3d as o3d
import torch
import hydra
import pandas as pd
from omegaconf import OmegaConf
import os
import os.path as osp
import time
from point_cloud.visu import torch2o3d
from point_cloud import instantiate_registrator
from torch_points3d.datasets.dataset_factory import instantiate_dataset
from torch_points3d.models import model_interface
from torch_points3d.metrics.colored_tqdm import Coloredtqdm as Ctq
from torch_points3d.utils.registration import estimate_transfo
class MockModel(model_interface.DatasetInterface):
def __init__(self, conv_type="SPARSE"):
self._conv_type = conv_type
@property
def conv_type(self):
return self._conv_type
def main_pipeline(dataset, registrator, name="Test"):
# instantiate dataset
# for loop
# list of dict with name of scene and name of coin
loader = dataset.test_dataloaders[0]
list_res = []
with Ctq(loader) as tq_test_loader:
for i, data in enumerate(tq_test_loader):
s, t = data.to_data()
name_scene, name_pair_source, name_pair_target = dataset.test_dataset[0].get_name(i)
res = dict(name_scene=name_scene, name_pair_source=name_pair_source, name_pair_target=name_pair_target)
metric = registrator.evaluate_pair(data)
res = dict(**res, **metric)
list_res.append(res)
tq_test_loader.set_postfix(**res)
df = pd.DataFrame(list_res)
output_path = os.path.join(name, registrator.__class__.__name__)
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
df.to_csv(osp.join(output_path, "final_res_{}.csv".format(time.strftime("%Y%m%d-%H%M%S"))))
print(df.groupby("name_scene").mean())
@hydra.main(config_path="conf", config_name="config")
def run(cfg):
OmegaConf.set_struct(cfg, False)
# cfg_data = OmegaConf.load("/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/conf/data/registration/testliffre.yaml").data
# cfg_data.dataroot = "/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/data"
cfg_data = cfg.data
dataset = instantiate_dataset(cfg_data)
dataset.create_dataloaders(
MockModel(), 1, False, cfg.num_workers, False,
)
registrator = instantiate_registrator(cfg.registrator)
main_pipeline(dataset, registrator, cfg_data.name)
if __name__ == "__main__":
run()
| en | 0.429194 | # instantiate dataset # for loop # list of dict with name of scene and name of coin # cfg_data = OmegaConf.load("/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/conf/data/registration/testliffre.yaml").data # cfg_data.dataroot = "/media/admincaor/DataHDD2To/mines/code/deeppointcloud-benchmarks/data" | 1.998854 | 2 |
perfrunner/workloads/tpcdsfun/driver.py | bochun/perfrunner | 18 | 6620507 | import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import cycle
from typing import Iterator, List
import numpy
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.rest import RestHelper
from perfrunner.workloads.tpcdsfun.query_gen import Query, new_queries
def store_metrics(statement: str, metrics: dict):
with open('tpcds.log', 'a') as fh:
fh.write(pretty_dict({
'statement': statement, 'metrics': metrics,
}))
fh.write('\n')
def run_query(rest: RestHelper, node: str, query: Query) -> float:
t0 = time.time()
response = rest.exec_analytics_statement(node, query.statement)
latency = time.time() - t0 # Latency in seconds
store_metrics(query.statement, response.json()['metrics'])
return latency
def run_concurrent_queries(rest: RestHelper,
nodes: List[str],
query: Query,
concurrency: int,
num_requests: int) -> List[float]:
with ThreadPoolExecutor(max_workers=concurrency) as executor:
nodes = cycle(nodes)
futures = [
executor.submit(run_query, rest, next(nodes), query)
for _ in range(num_requests)
]
timings = []
for future in as_completed(futures):
timings.append(future.result())
return timings
def tpcds(rest: RestHelper,
nodes: List[str],
concurrency: int,
num_requests: int,
query_set: str) -> Iterator:
for query in new_queries(query_set):
logger.info('Running: {}'.format(query.statement))
timings = run_concurrent_queries(rest,
nodes,
query,
concurrency,
num_requests)
avg_latency = int(1000 * numpy.mean(timings)) # Latency in ms
yield query, avg_latency
| import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import cycle
from typing import Iterator, List
import numpy
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.rest import RestHelper
from perfrunner.workloads.tpcdsfun.query_gen import Query, new_queries
def store_metrics(statement: str, metrics: dict):
with open('tpcds.log', 'a') as fh:
fh.write(pretty_dict({
'statement': statement, 'metrics': metrics,
}))
fh.write('\n')
def run_query(rest: RestHelper, node: str, query: Query) -> float:
t0 = time.time()
response = rest.exec_analytics_statement(node, query.statement)
latency = time.time() - t0 # Latency in seconds
store_metrics(query.statement, response.json()['metrics'])
return latency
def run_concurrent_queries(rest: RestHelper,
nodes: List[str],
query: Query,
concurrency: int,
num_requests: int) -> List[float]:
with ThreadPoolExecutor(max_workers=concurrency) as executor:
nodes = cycle(nodes)
futures = [
executor.submit(run_query, rest, next(nodes), query)
for _ in range(num_requests)
]
timings = []
for future in as_completed(futures):
timings.append(future.result())
return timings
def tpcds(rest: RestHelper,
nodes: List[str],
concurrency: int,
num_requests: int,
query_set: str) -> Iterator:
for query in new_queries(query_set):
logger.info('Running: {}'.format(query.statement))
timings = run_concurrent_queries(rest,
nodes,
query,
concurrency,
num_requests)
avg_latency = int(1000 * numpy.mean(timings)) # Latency in ms
yield query, avg_latency
| en | 0.922121 | # Latency in seconds # Latency in ms | 2.366166 | 2 |
flask_app/api/base.py | nishworks/Flask-starter | 0 | 6620508 | from __future__ import absolute_import
import decimal
import json
import logging
import flask
log = logging.getLogger(__name__)
def json_handler(obj):
""" Handles non-serializable objects """
if isinstance(obj, decimal.Decimal):
return float(obj)
try:
return str(obj)
except TypeError:
return obj.__dict__
def json_response(response, status_code, message=None, errors=None, headers=None):
""" Return a http json response """
response = {
'uri': flask.request.path,
'message': message,
'status': status_code,
'request-params': flask.g.request_args,
'request-method': flask.request.method,
'response': response,
'errors': errors
}
resp = flask.make_response(
json.dumps(response, default=json_handler),
status_code,
{'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'})
resp.headers.extend(headers or {})
return resp
| from __future__ import absolute_import
import decimal
import json
import logging
import flask
log = logging.getLogger(__name__)
def json_handler(obj):
""" Handles non-serializable objects """
if isinstance(obj, decimal.Decimal):
return float(obj)
try:
return str(obj)
except TypeError:
return obj.__dict__
def json_response(response, status_code, message=None, errors=None, headers=None):
""" Return a http json response """
response = {
'uri': flask.request.path,
'message': message,
'status': status_code,
'request-params': flask.g.request_args,
'request-method': flask.request.method,
'response': response,
'errors': errors
}
resp = flask.make_response(
json.dumps(response, default=json_handler),
status_code,
{'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'})
resp.headers.extend(headers or {})
return resp
| en | 0.30391 | Handles non-serializable objects Return a http json response | 2.556158 | 3 |
skbio/io/format/binary_dm.py | theAeon/scikit-bio | 0 | 6620509 | <filename>skbio/io/format/binary_dm.py
"""
Simple binary dissimilarity matrix format (:mod:`skbio.io.format.binary_dm`)
============================================================================
.. currentmodule:: skbio.io.format.binary_dm
The Binary DisSimilarity Matrix format (``binary_dm``) encodes a binary
representation for dissimilarity and distance matrices. The format is
designed to facilitate rapid random access to individual rows or columns of
a hollow matrix.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.distance.DissimilarityMatrix` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.stats.distance.DistanceMatrix` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The binary dissimilarity matrix and object identifiers are stored within an
HDF5 [1]_ file. Both datatypes are represented by their own datasets. The
`ids` dataset is of a variable length unicode type, while the
`matrix` dataset are floating point. The shape of the `ids` is
`(N,)`, and the shape of the `dissimilarities` is `(N, N)`. The diagonal of
`matrix` are all zeros.
The dissimilarity between `ids[i]` and `ids[j]` is interpreted
to be the value at `matrix[i, j]`. `i` and `j` are integer indices.
Required attributes:
+-----------+---------------------+------------------------------+
|Attribute |Value |Description |
| |type | |
+===========+=====================+==============================+
|format |string |A string identifying the file |
| | |as Binary DM format |
+-----------+---------------------+------------------------------+
|version |string |The version of the current |
| | |Binary DM format |
+-----------+---------------------+------------------------------+
|matrix |float32 or float64 |A (N, N) dataset containing |
| | |the values of the |
| | |dissimilarity matrix |
+-----------+---------------------+------------------------------+
|order |string |A (N,) dataset of the sample |
| | |IDs, where N is the total |
| | |number of IDs |
+-----------+---------------------+------------------------------+
.. note:: This file format is most useful for storing large matrices that do
not need to be represented in a human-readable format. This format is
especially appropriate for facilitating random access to entries in the
distance matrix, such as when calculating within and between distances for a
subset of samples in a large matrix.
References
----------
.. [1] http://www.hdfgroup.org/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import h5py
from skbio.io import create_format
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
binary_dm = create_format('binary_dm', encoding='binary')
_vlen_dtype = h5py.special_dtype(vlen=str)
@binary_dm.sniffer()
def _binary_dm_sniffer(fh):
try:
f = h5py.File(fh, 'r')
except OSError:
return False, {}
header = _get_header(f)
if header is None:
return False, {}
ids = f.get('order')
if ids is None:
return False, {}
mat = f.get('matrix')
if mat is None:
return False, {}
n = len(ids)
if mat.shape != (n, n):
return False, {}
return True, {}
@binary_dm.reader(DissimilarityMatrix)
def _binary_dm_to_dissimilarity(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.reader(DistanceMatrix)
def _binary_dm_to_distance(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.writer(DissimilarityMatrix)
def _dissimilarity_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
@binary_dm.writer(DistanceMatrix)
def _distance_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
def _h5py_mat_to_skbio_mat(cls, fh):
return cls(fh['matrix'], _parse_ids(fh['order']))
def _skbio_mat_to_h5py_mat(obj, fh):
_set_header(fh)
ids = fh.create_dataset('order', shape=(len(obj.ids), ), dtype=_vlen_dtype)
ids[:] = obj.ids
fh.create_dataset('matrix', data=obj.data)
def _get_header(fh):
format_ = fh.get('format')
version = fh.get('version')
if format is None or version is None:
return None
else:
return {'format': format_[0], 'version': version[0]}
def _parse_ids(ids):
if isinstance(ids[0], bytes):
return _bytes_decoder(ids)
else:
return _passthrough_decoder(ids)
def _verify_dimensions(fh):
if 'order' not in fh or 'matrix' not in fh:
return False
n = len(fh['order'])
return fh['matrix'].shape == (n, n)
def _bytes_decoder(x):
return [i.decode('utf8') for i in x]
def _passthrough_decoder(x):
return x
def _set_header(h5grp):
"""Set format spec header information"""
h5grp['format'] = [b'BDSM', ]
h5grp['version'] = [b'2020.06', ]
| <filename>skbio/io/format/binary_dm.py
"""
Simple binary dissimilarity matrix format (:mod:`skbio.io.format.binary_dm`)
============================================================================
.. currentmodule:: skbio.io.format.binary_dm
The Binary DisSimilarity Matrix format (``binary_dm``) encodes a binary
representation for dissimilarity and distance matrices. The format is
designed to facilitate rapid random access to individual rows or columns of
a hollow matrix.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.distance.DissimilarityMatrix` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.stats.distance.DistanceMatrix` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The binary dissimilarity matrix and object identifiers are stored within an
HDF5 [1]_ file. Both datatypes are represented by their own datasets. The
`ids` dataset is of a variable length unicode type, while the
`matrix` dataset are floating point. The shape of the `ids` is
`(N,)`, and the shape of the `dissimilarities` is `(N, N)`. The diagonal of
`matrix` are all zeros.
The dissimilarity between `ids[i]` and `ids[j]` is interpreted
to be the value at `matrix[i, j]`. `i` and `j` are integer indices.
Required attributes:
+-----------+---------------------+------------------------------+
|Attribute |Value |Description |
| |type | |
+===========+=====================+==============================+
|format |string |A string identifying the file |
| | |as Binary DM format |
+-----------+---------------------+------------------------------+
|version |string |The version of the current |
| | |Binary DM format |
+-----------+---------------------+------------------------------+
|matrix |float32 or float64 |A (N, N) dataset containing |
| | |the values of the |
| | |dissimilarity matrix |
+-----------+---------------------+------------------------------+
|order |string |A (N,) dataset of the sample |
| | |IDs, where N is the total |
| | |number of IDs |
+-----------+---------------------+------------------------------+
.. note:: This file format is most useful for storing large matrices that do
not need to be represented in a human-readable format. This format is
especially appropriate for facilitating random access to entries in the
distance matrix, such as when calculating within and between distances for a
subset of samples in a large matrix.
References
----------
.. [1] http://www.hdfgroup.org/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import h5py
from skbio.io import create_format
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
binary_dm = create_format('binary_dm', encoding='binary')
_vlen_dtype = h5py.special_dtype(vlen=str)
@binary_dm.sniffer()
def _binary_dm_sniffer(fh):
try:
f = h5py.File(fh, 'r')
except OSError:
return False, {}
header = _get_header(f)
if header is None:
return False, {}
ids = f.get('order')
if ids is None:
return False, {}
mat = f.get('matrix')
if mat is None:
return False, {}
n = len(ids)
if mat.shape != (n, n):
return False, {}
return True, {}
@binary_dm.reader(DissimilarityMatrix)
def _binary_dm_to_dissimilarity(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.reader(DistanceMatrix)
def _binary_dm_to_distance(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.writer(DissimilarityMatrix)
def _dissimilarity_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
@binary_dm.writer(DistanceMatrix)
def _distance_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
def _h5py_mat_to_skbio_mat(cls, fh):
return cls(fh['matrix'], _parse_ids(fh['order']))
def _skbio_mat_to_h5py_mat(obj, fh):
_set_header(fh)
ids = fh.create_dataset('order', shape=(len(obj.ids), ), dtype=_vlen_dtype)
ids[:] = obj.ids
fh.create_dataset('matrix', data=obj.data)
def _get_header(fh):
format_ = fh.get('format')
version = fh.get('version')
if format is None or version is None:
return None
else:
return {'format': format_[0], 'version': version[0]}
def _parse_ids(ids):
if isinstance(ids[0], bytes):
return _bytes_decoder(ids)
else:
return _passthrough_decoder(ids)
def _verify_dimensions(fh):
if 'order' not in fh or 'matrix' not in fh:
return False
n = len(fh['order'])
return fh['matrix'].shape == (n, n)
def _bytes_decoder(x):
return [i.decode('utf8') for i in x]
def _passthrough_decoder(x):
return x
def _set_header(h5grp):
"""Set format spec header information"""
h5grp['format'] = [b'BDSM', ]
h5grp['version'] = [b'2020.06', ]
| en | 0.58313 | Simple binary dissimilarity matrix format (:mod:`skbio.io.format.binary_dm`) ============================================================================ .. currentmodule:: skbio.io.format.binary_dm The Binary DisSimilarity Matrix format (``binary_dm``) encodes a binary representation for dissimilarity and distance matrices. The format is designed to facilitate rapid random access to individual rows or columns of a hollow matrix. Format Support -------------- **Has Sniffer: Yes** +------+------+---------------------------------------------------------------+ |Reader|Writer| Object Class | +======+======+===============================================================+ |Yes |Yes |:mod:`skbio.stats.distance.DissimilarityMatrix` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.stats.distance.DistanceMatrix` | +------+------+---------------------------------------------------------------+ Format Specification -------------------- The binary dissimilarity matrix and object identifiers are stored within an HDF5 [1]_ file. Both datatypes are represented by their own datasets. The `ids` dataset is of a variable length unicode type, while the `matrix` dataset are floating point. The shape of the `ids` is `(N,)`, and the shape of the `dissimilarities` is `(N, N)`. The diagonal of `matrix` are all zeros. The dissimilarity between `ids[i]` and `ids[j]` is interpreted to be the value at `matrix[i, j]`. `i` and `j` are integer indices. Required attributes: +-----------+---------------------+------------------------------+ |Attribute |Value |Description | | |type | | +===========+=====================+==============================+ |format |string |A string identifying the file | | | |as Binary DM format | +-----------+---------------------+------------------------------+ |version |string |The version of the current | | | |Binary DM format | +-----------+---------------------+------------------------------+ |matrix |float32 or float64 |A (N, N) dataset containing | | | |the values of the | | | |dissimilarity matrix | +-----------+---------------------+------------------------------+ |order |string |A (N,) dataset of the sample | | | |IDs, where N is the total | | | |number of IDs | +-----------+---------------------+------------------------------+ .. note:: This file format is most useful for storing large matrices that do not need to be represented in a human-readable format. This format is especially appropriate for facilitating random access to entries in the distance matrix, such as when calculating within and between distances for a subset of samples in a large matrix. References ---------- .. [1] http://www.hdfgroup.org/ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- Set format spec header information | 1.950889 | 2 |
funcs.py | omprakash1989/LilUrl | 0 | 6620510 |
def new_method(p):
return p + 1
|
def new_method(p):
return p + 1
| none | 1 | 1.935151 | 2 | |
main.py | betagouv/api-domaines | 0 | 6620511 | from fastapi import FastAPI
DATA = [
{ 'domain': 'modernisation.gouv.fr', 'administration_type': 'ministere', 'SIRET': '1234567'},
{ 'domain': 'dinum.gouv.fr', 'administration_type': 'interministériel', 'SIRET': '09887765'}
]
app = FastAPI()
@app.get('/')
async def get_root():
return { 'content': 'coucou coucou' }
@app.get('/domain/{name}')
async def get_domain(name: str):
for entry in DATA:
if entry.get('domain') == name:
return entry
return { 'error': 'domain not found' }
| from fastapi import FastAPI
DATA = [
{ 'domain': 'modernisation.gouv.fr', 'administration_type': 'ministere', 'SIRET': '1234567'},
{ 'domain': 'dinum.gouv.fr', 'administration_type': 'interministériel', 'SIRET': '09887765'}
]
app = FastAPI()
@app.get('/')
async def get_root():
return { 'content': 'coucou coucou' }
@app.get('/domain/{name}')
async def get_domain(name: str):
for entry in DATA:
if entry.get('domain') == name:
return entry
return { 'error': 'domain not found' }
| none | 1 | 3.018671 | 3 | |
MLBook/7 Committee/party.py | AjayKrP/Machine-Learning | 0 | 6620512 | <filename>MLBook/7 Committee/party.py
# Code from Chapter 7 of Machine Learning: An Algorithmic Perspective
# by <NAME> (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008
# Comparison of stumping and bagging on the Party dataset
from numpy import *
import dtw
import bagging
tree = dtw.dtree()
bagger = bagging.bagger()
party,classes,features = tree.read_data('../6 Trees/party.data')
w = ones((shape(party)[0]),dtype = float)/shape(party)[0]
t=tree.make_tree(party,w,classes,features,1)
#tree.printTree(t,' ')
print "Tree Stump Prediction"
print tree.classifyAll(t,party)
print "True Classes"
print classes
c=bagger.bag(party,classes,features,20)
print "Bagged Results"
print bagger.bagclass(c,party)
| <filename>MLBook/7 Committee/party.py
# Code from Chapter 7 of Machine Learning: An Algorithmic Perspective
# by <NAME> (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008
# Comparison of stumping and bagging on the Party dataset
from numpy import *
import dtw
import bagging
tree = dtw.dtree()
bagger = bagging.bagger()
party,classes,features = tree.read_data('../6 Trees/party.data')
w = ones((shape(party)[0]),dtype = float)/shape(party)[0]
t=tree.make_tree(party,w,classes,features,1)
#tree.printTree(t,' ')
print "Tree Stump Prediction"
print tree.classifyAll(t,party)
print "True Classes"
print classes
c=bagger.bag(party,classes,features,20)
print "Bagged Results"
print bagger.bagclass(c,party)
| en | 0.7603 | # Code from Chapter 7 of Machine Learning: An Algorithmic Perspective # by <NAME> (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html) # You are free to use, change, or redistribute the code in any way you wish for # non-commercial purposes, but please maintain the name of the original author. # This code comes with no warranty of any kind. # <NAME>, 2008 # Comparison of stumping and bagging on the Party dataset #tree.printTree(t,' ') | 2.893725 | 3 |
scpp_base/scpp_base/src/db/coin_value_handler.py | scorelab/social-currency | 4 | 6620513 | class coin_value_handler:
"""
later this class calculate coin value according to take data by
DB_Handler class.This iteration not use this class
""" | class coin_value_handler:
"""
later this class calculate coin value according to take data by
DB_Handler class.This iteration not use this class
""" | en | 0.84733 | later this class calculate coin value according to take data by DB_Handler class.This iteration not use this class | 2.18055 | 2 |
palettes.py | ggood/adsbTheremin | 1 | 6620514 | <gh_stars>1-10
#!/usr/bin/env python3
MIDI_NOTE_PALETTES = [
# C, D, F, G, Bflat "pentatonic"
(
24,
36,
48, 50, 53, 55, 58,
60, 62, 65, 67, 70,
72, 74, 77, 79, 82,
84, 86, 89, 91, 94,
106, 108, 111, 113, 116,
118, 120, 123
),
# Open 5ths, based on D
(
26, 33,
38, 45,
50, 57,
62, 69,
74, 81,
86, 93,
98, 105,
110, 117
),
# C Minor 11th
(
24,
36,
48, 51, 55, 58, 62, 65,
60, 63, 67, 70, 74, 77,
72, 75, 79, 82, 86, 89,
84, 87, 91, 94, 98, 101,
96, 99, 103, 106, 110, 113,
108, 111, 115, 118, 122, 125
),
]
| #!/usr/bin/env python3
MIDI_NOTE_PALETTES = [
# C, D, F, G, Bflat "pentatonic"
(
24,
36,
48, 50, 53, 55, 58,
60, 62, 65, 67, 70,
72, 74, 77, 79, 82,
84, 86, 89, 91, 94,
106, 108, 111, 113, 116,
118, 120, 123
),
# Open 5ths, based on D
(
26, 33,
38, 45,
50, 57,
62, 69,
74, 81,
86, 93,
98, 105,
110, 117
),
# C Minor 11th
(
24,
36,
48, 51, 55, 58, 62, 65,
60, 63, 67, 70, 74, 77,
72, 75, 79, 82, 86, 89,
84, 87, 91, 94, 98, 101,
96, 99, 103, 106, 110, 113,
108, 111, 115, 118, 122, 125
),
] | en | 0.803206 | #!/usr/bin/env python3 # C, D, F, G, Bflat "pentatonic" # Open 5ths, based on D # C Minor 11th | 1.73863 | 2 |
dataloader.py | AlanShaw-GitHub/toyRNNText | 6 | 6620515 | <filename>dataloader.py
# from gensim.models import Word2Vec
import csv
# import jieba
import pickle
import re
import random
import numpy as np
_stopwords = ['[','?',':','“','”',',','(',')','、','」','「','《','》',']']
stopwords = "".join(_stopwords)
train_len = 100000
test_len = 1000
class Loader():
# def __init__(self):
# self.max_tag_num = 10000
# self.max_ques_len = 20
# self.max_detail_len = 50
# self.batch_size = 64
# word2vec_path = '../model'
# file_path = '../train_data.csv'
# label_path = '../topic_2_id.csv'
# model = Word2Vec.load(word2vec_path)
# label = list(csv.reader(open(label_path, encoding='utf8').readlines()))[1:]
# self.label_dict = dict([(int(i[1]),i[0]) for i in label])
# self.freq_dict = dict([(int(i[1]),int(i[2])) for i in label])
# data = list(csv.reader(open(file_path,encoding='utf8').readlines()))[1:]
# data = np.array(data[:train_len])
# questions = data[:,1]
# self.o_questions = questions
# questions = [list(jieba.cut(re.sub(stopwords,'',i))) for i in questions]
# questions_dict = []
# for i in questions:
# questions_dict += i
# questions_dict = set(questions_dict)
# print('questions_dict len', len(questions_dict))
# self.questions_dict = dict(zip(range(1,len(questions_dict)+1),questions_dict))
# self.questions_dict[0] = 'UNK'
# self.reversed_questions_dict = {v: k for k, v in self.questions_dict.items()}
# self.embeddings = np.zeros([len(self.questions_dict),128])
# for i in range(len(questions_dict)):
# try:
# self.embeddings[i] = model.wv[self.questions_dict[i]]
# except:
# self.embeddings[i] = np.zeros([128])
# self.questions_len = np.array([len(i) if len(i) <= 20 else 20 for i in questions])
# self.questions = np.zeros([train_len,self.max_ques_len],dtype=int)
# for i in range(len(questions)):
# for j in range(self.max_ques_len):
# try:
# tmp = self.reversed_questions_dict[questions[i][j]]
# except:
# tmp = 0
# self.questions[i][j] = tmp
# tags = data[:,4]
# self.tags = np.array([[int(j) for j in i.split('|')] for i in tags])
# self.max_tag_len = max([len(i) for i in tags])
# pickle.dump([self.tags,self.max_tag_len,self.o_questions,self.questions_dict,self.questions_len,self.questions,self.reversed_questions_dict,self.embeddings,self.label_dict,self.freq_dict],open('./data.pickle','wb'))
# self.reset()
def __init__(self):
self.max_tag_num = 10000
self.max_ques_len = 20
self.max_detail_len = 50
self.batch_size = 64
self.tags,self.max_tag_len,self.o_questions,self.questions_dict,self.questions_len,self.questions,self.reversed_questions_dict,self.embeddings,self.label_dict,self.freq_dict = pickle.load(open('./data.pickle','rb'))
self.reset()
def reset(self):
self.index = 0
self.random = list(range(train_len-test_len))
self.random_test = list(range(train_len-test_len,train_len))
random.shuffle(self.random)
random.shuffle(self.random_test)
def generate(self):
while True:
if self.index + self.batch_size >= train_len - test_len:
break
o_questions = self.o_questions[self.random[self.index:self.index + self.batch_size]]
questions = self.questions[self.random[self.index:self.index + self.batch_size]]
ques_len = self.questions_len[self.random[self.index:self.index + self.batch_size]]
_tags = self.tags[self.random[self.index:self.index + self.batch_size]]
tags = np.zeros([self.batch_size,self.max_tag_num],dtype=float)
for i in range(self.batch_size):
for j in _tags[i]:
if j < self.max_tag_num:
tags[i][j-1] = 1.0
for i in range(self.batch_size):
for j in _tags[i]:
if j >= self.max_tag_num:
_tags[i].remove(j)
self.index += self.batch_size
yield questions,ques_len,tags,_tags,o_questions
def generate_test(self):
while True:
if self.index + self.batch_size >= test_len:
break
o_questions = self.o_questions[self.random_test[self.index:self.index+self.batch_size]]
questions = self.questions[self.random_test[self.index:self.index+self.batch_size]]
ques_len = self.questions_len[self.random_test[self.index:self.index+self.batch_size]]
_tags = self.tags[self.random_test[self.index:self.index + self.batch_size]]
tags = np.zeros([self.batch_size,self.max_tag_num],dtype=float)
for i in range(self.batch_size):
for j in _tags[i]:
if j < self.max_tag_num:
tags[i][j-1] = 1.0
for i in range(self.batch_size):
for j in _tags[i]:
if j >= self.max_tag_num:
_tags[i].remove(j)
self.index += self.batch_size
yield questions,ques_len,tags,_tags,o_questions
if __name__ == '__main__':
loader = Loader()
x = loader.generate()
print(x.__next__()[0])
| <filename>dataloader.py
# from gensim.models import Word2Vec
import csv
# import jieba
import pickle
import re
import random
import numpy as np
_stopwords = ['[','?',':','“','”',',','(',')','、','」','「','《','》',']']
stopwords = "".join(_stopwords)
train_len = 100000
test_len = 1000
class Loader():
# def __init__(self):
# self.max_tag_num = 10000
# self.max_ques_len = 20
# self.max_detail_len = 50
# self.batch_size = 64
# word2vec_path = '../model'
# file_path = '../train_data.csv'
# label_path = '../topic_2_id.csv'
# model = Word2Vec.load(word2vec_path)
# label = list(csv.reader(open(label_path, encoding='utf8').readlines()))[1:]
# self.label_dict = dict([(int(i[1]),i[0]) for i in label])
# self.freq_dict = dict([(int(i[1]),int(i[2])) for i in label])
# data = list(csv.reader(open(file_path,encoding='utf8').readlines()))[1:]
# data = np.array(data[:train_len])
# questions = data[:,1]
# self.o_questions = questions
# questions = [list(jieba.cut(re.sub(stopwords,'',i))) for i in questions]
# questions_dict = []
# for i in questions:
# questions_dict += i
# questions_dict = set(questions_dict)
# print('questions_dict len', len(questions_dict))
# self.questions_dict = dict(zip(range(1,len(questions_dict)+1),questions_dict))
# self.questions_dict[0] = 'UNK'
# self.reversed_questions_dict = {v: k for k, v in self.questions_dict.items()}
# self.embeddings = np.zeros([len(self.questions_dict),128])
# for i in range(len(questions_dict)):
# try:
# self.embeddings[i] = model.wv[self.questions_dict[i]]
# except:
# self.embeddings[i] = np.zeros([128])
# self.questions_len = np.array([len(i) if len(i) <= 20 else 20 for i in questions])
# self.questions = np.zeros([train_len,self.max_ques_len],dtype=int)
# for i in range(len(questions)):
# for j in range(self.max_ques_len):
# try:
# tmp = self.reversed_questions_dict[questions[i][j]]
# except:
# tmp = 0
# self.questions[i][j] = tmp
# tags = data[:,4]
# self.tags = np.array([[int(j) for j in i.split('|')] for i in tags])
# self.max_tag_len = max([len(i) for i in tags])
# pickle.dump([self.tags,self.max_tag_len,self.o_questions,self.questions_dict,self.questions_len,self.questions,self.reversed_questions_dict,self.embeddings,self.label_dict,self.freq_dict],open('./data.pickle','wb'))
# self.reset()
def __init__(self):
self.max_tag_num = 10000
self.max_ques_len = 20
self.max_detail_len = 50
self.batch_size = 64
self.tags,self.max_tag_len,self.o_questions,self.questions_dict,self.questions_len,self.questions,self.reversed_questions_dict,self.embeddings,self.label_dict,self.freq_dict = pickle.load(open('./data.pickle','rb'))
self.reset()
def reset(self):
self.index = 0
self.random = list(range(train_len-test_len))
self.random_test = list(range(train_len-test_len,train_len))
random.shuffle(self.random)
random.shuffle(self.random_test)
def generate(self):
while True:
if self.index + self.batch_size >= train_len - test_len:
break
o_questions = self.o_questions[self.random[self.index:self.index + self.batch_size]]
questions = self.questions[self.random[self.index:self.index + self.batch_size]]
ques_len = self.questions_len[self.random[self.index:self.index + self.batch_size]]
_tags = self.tags[self.random[self.index:self.index + self.batch_size]]
tags = np.zeros([self.batch_size,self.max_tag_num],dtype=float)
for i in range(self.batch_size):
for j in _tags[i]:
if j < self.max_tag_num:
tags[i][j-1] = 1.0
for i in range(self.batch_size):
for j in _tags[i]:
if j >= self.max_tag_num:
_tags[i].remove(j)
self.index += self.batch_size
yield questions,ques_len,tags,_tags,o_questions
def generate_test(self):
while True:
if self.index + self.batch_size >= test_len:
break
o_questions = self.o_questions[self.random_test[self.index:self.index+self.batch_size]]
questions = self.questions[self.random_test[self.index:self.index+self.batch_size]]
ques_len = self.questions_len[self.random_test[self.index:self.index+self.batch_size]]
_tags = self.tags[self.random_test[self.index:self.index + self.batch_size]]
tags = np.zeros([self.batch_size,self.max_tag_num],dtype=float)
for i in range(self.batch_size):
for j in _tags[i]:
if j < self.max_tag_num:
tags[i][j-1] = 1.0
for i in range(self.batch_size):
for j in _tags[i]:
if j >= self.max_tag_num:
_tags[i].remove(j)
self.index += self.batch_size
yield questions,ques_len,tags,_tags,o_questions
if __name__ == '__main__':
loader = Loader()
x = loader.generate()
print(x.__next__()[0])
| en | 0.448745 | # from gensim.models import Word2Vec # import jieba # def __init__(self): # self.max_tag_num = 10000 # self.max_ques_len = 20 # self.max_detail_len = 50 # self.batch_size = 64 # word2vec_path = '../model' # file_path = '../train_data.csv' # label_path = '../topic_2_id.csv' # model = Word2Vec.load(word2vec_path) # label = list(csv.reader(open(label_path, encoding='utf8').readlines()))[1:] # self.label_dict = dict([(int(i[1]),i[0]) for i in label]) # self.freq_dict = dict([(int(i[1]),int(i[2])) for i in label]) # data = list(csv.reader(open(file_path,encoding='utf8').readlines()))[1:] # data = np.array(data[:train_len]) # questions = data[:,1] # self.o_questions = questions # questions = [list(jieba.cut(re.sub(stopwords,'',i))) for i in questions] # questions_dict = [] # for i in questions: # questions_dict += i # questions_dict = set(questions_dict) # print('questions_dict len', len(questions_dict)) # self.questions_dict = dict(zip(range(1,len(questions_dict)+1),questions_dict)) # self.questions_dict[0] = 'UNK' # self.reversed_questions_dict = {v: k for k, v in self.questions_dict.items()} # self.embeddings = np.zeros([len(self.questions_dict),128]) # for i in range(len(questions_dict)): # try: # self.embeddings[i] = model.wv[self.questions_dict[i]] # except: # self.embeddings[i] = np.zeros([128]) # self.questions_len = np.array([len(i) if len(i) <= 20 else 20 for i in questions]) # self.questions = np.zeros([train_len,self.max_ques_len],dtype=int) # for i in range(len(questions)): # for j in range(self.max_ques_len): # try: # tmp = self.reversed_questions_dict[questions[i][j]] # except: # tmp = 0 # self.questions[i][j] = tmp # tags = data[:,4] # self.tags = np.array([[int(j) for j in i.split('|')] for i in tags]) # self.max_tag_len = max([len(i) for i in tags]) # pickle.dump([self.tags,self.max_tag_len,self.o_questions,self.questions_dict,self.questions_len,self.questions,self.reversed_questions_dict,self.embeddings,self.label_dict,self.freq_dict],open('./data.pickle','wb')) # self.reset() | 2.658717 | 3 |
tests/distributions/test_multivariate.py | weiwang2330/BayesNeuralNet | 0 | 6620516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from scipy import stats, misc, special
from tests.distributions import utils
from zhusuan.distributions.multivariate import *
class TestMultinomial(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Multinomial(tf.zeros([]), 10)
def test_init_n(self):
dist = Multinomial(tf.ones([2]), 10)
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 2)
self.assertTrue(isinstance(dist.n_experiments, int))
self.assertEqual(dist.n_experiments, 10)
with self.assertRaisesRegexp(ValueError, "must be positive"):
_ = Multinomial(tf.ones([2]), 0)
with self.test_session(use_gpu=True) as sess:
logits = tf.placeholder(tf.float32, None)
n_experiments = tf.placeholder(tf.int32, None)
dist2 = Multinomial(logits, n_experiments)
self.assertEqual(
sess.run([dist2.n_categories, dist2.n_experiments],
feed_dict={logits: np.ones([2]), n_experiments: 10}),
[2, 10])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist2.n_categories.eval(feed_dict={logits: 1.,
n_experiments: 10})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: [10]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"must be positive"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: 0})
def test_value_shape(self):
# static
dist = Multinomial(tf.placeholder(tf.float32, [None, 2]), 10)
self.assertEqual(dist.get_value_shape().as_list(), [2])
# dynamic
logits = tf.placeholder(tf.float32, None)
dist2 = Multinomial(logits, 10)
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={logits: np.ones([2])}).tolist(), [2])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_batch_shape_1parameter(
self, _distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_1parameter_sample_shape_one_rank_less(
self, _distribution, np.zeros)
def test_log_prob_shape(self):
def _distribution(param):
return Multinomial(param, 10)
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _distribution, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, n_experiments, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given)
dist = Multinomial(logits, n_experiments)
log_p = dist.log_prob(given)
target_log_p = np.log(misc.factorial(n_experiments)) - \
np.sum(np.log(misc.factorial(given)), -1) + \
np.sum(given * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.exp(target_log_p)
self.assertAllClose(p.eval(), target_p)
_test_value([-50., -20., 0.], 4, [1, 0, 3])
_test_value([1., 10., 1000.], 1, [1, 0, 0])
_test_value([[2., 3., 1.], [5., 7., 4.]], 3,
np.ones([3, 1, 3], dtype=np.int32))
_test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100],
[100, 99, 1, 0]])
def test_dtype(self):
def _distribution(param, dtype=None):
return Multinomial(param, 10, dtype)
utils.test_dtype_1parameter_discrete(self, _distribution)
with self.assertRaisesRegexp(TypeError, "n_experiments must be"):
Multinomial([1., 1.], tf.placeholder(tf.float32, []))
class TestOnehotCategorical(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
OnehotCategorical(logits=tf.zeros([]))
def test_init_n_categories(self):
cat = OnehotCategorical(tf.ones([10]))
self.assertTrue(isinstance(cat.n_categories, int))
self.assertEqual(cat.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertEqual(
cat2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
cat2.n_categories.eval(feed_dict={logits: 1.})
def test_value_shape(self):
# static
cat = OnehotCategorical(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(cat.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertTrue(cat2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(cat2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(cat._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, OnehotCategorical, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, OnehotCategorical, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, OnehotCategorical, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given, np.int32)
cat = OnehotCategorical(logits)
log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
def _one_hot(x, depth):
n_elements = x.size
ret = np.zeros((n_elements, depth))
ret[np.arange(n_elements), x.flat] = 1
return ret.reshape(list(x.shape) + [depth])
target_log_p = np.sum(_one_hot(
given, logits.shape[-1]) * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = cat.prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
target_p = np.sum(_one_hot(
given, logits.shape[-1]) * np.exp(normalized_logits), -1)
self.assertAllClose(p.eval(), target_p)
_test_value([0.], [0, 0, 0])
_test_value([-50., -10., -50.], [0, 1, 2, 1])
_test_value([0., 4.], [[0, 1], [0, 1]])
_test_value([[2., 3., 1.], [5., 7., 4.]],
np.ones([3, 1, 1], dtype=np.int32))
def test_dtype(self):
utils.test_dtype_1parameter_discrete(self, OnehotCategorical)
class TestDirichlet(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Dirichlet(alpha=tf.zeros([]))
def test_init_n_categories(self):
dist = Dirichlet(tf.ones([10]))
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 10)
with self.assertRaisesRegexp(ValueError,
"n_categories.*should be at least 2"):
Dirichlet(tf.ones([3, 1]))
dist2 = Dirichlet(tf.placeholder(tf.float32, [3, None]))
self.assertTrue(dist2.n_categories is not None)
with self.test_session(use_gpu=True):
alpha = tf.placeholder(tf.float32, None)
dist3 = Dirichlet(alpha)
self.assertEqual(
dist3.n_categories.eval(feed_dict={alpha: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist3.n_categories.eval(feed_dict={alpha: 1.})
def test_value_shape(self):
# static
dist = Dirichlet(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(dist.get_value_shape().as_list(), [10])
# dynamic
alpha = tf.placeholder(tf.float32, None)
dist2 = Dirichlet(alpha)
self.assertEqual(dist2.get_value_shape().as_list(), [None])
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={alpha: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, Dirichlet, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, Dirichlet, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return samples / samples.sum(axis=-1, keepdims=True)
# TODO: This failed with a bug in Tensorflow, waiting fix.
# https://github.com/tensorflow/tensorflow/issues/8391
# _test_static([3, None], [3, 2, 1, None], [3, 2, 3])
utils.test_1parameter_log_prob_shape_one_rank_less(
self, Dirichlet, np.ones, _make_samples)
def test_value(self):
def dirichlet_logpdf(x, alpha):
# scipy's implementation of dirichlet logpdf doesn't support
# batch of x, we use this modified version.
def _lnB(alpha):
return np.sum(special.gammaln(alpha)) - \
special.gammaln(np.sum(alpha))
lnB = _lnB(alpha)
return - lnB + np.sum(np.log(x) * (alpha - 1), -1)
def dirichlet_pdf(x, alpha):
return np.exp(dirichlet_logpdf(x, alpha))
with self.test_session(use_gpu=True):
def _test_value_alpha_rank1(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = dirichlet_logpdf(given, alpha)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = dirichlet_pdf(given, alpha)
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank1([1., 1., 1.],
[[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]])
_test_value_alpha_rank1([2., 3., 4.], [0.3, 0.7, 0.])
# TODO: fix for case when alpha=1, given=0
def _test_value_alpha_rank2_given_rank2(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
alpha_b = alpha * np.ones_like(given)
given_b = given * np.ones_like(alpha)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = np.array(
[dirichlet_logpdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.array(
[dirichlet_pdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank2_given_rank2([[1., 2.], [3., 4.]],
[0.5, 0.5])
_test_value_alpha_rank2_given_rank2([[5., 6.], [7., 8.]],
[[0.1, 0.9]])
_test_value_alpha_rank2_given_rank2([[100., 1.], [0.01, 10.]],
[[0., 1.], [1., 0.]])
def test_check_numerics(self):
alpha = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = Dirichlet(alpha, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had Inf"):
log_p.eval(feed_dict={alpha: np.ones([2]), given: [0., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"lbeta\(alpha\).*Tensor had NaN"):
log_p.eval(feed_dict={alpha: [-1., 1.], given: [0.5, 0.5]})
def test_dtype(self):
utils.test_dtype_1parameter_continuous(self, Dirichlet)
class TestExpConcrete(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
ExpConcrete(1., logits=tf.zeros([]))
def test_init_n_categories(self):
con = ExpConcrete(1., tf.ones([10]))
self.assertTrue(isinstance(con.n_categories, int))
self.assertEqual(con.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertEqual(
con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
con2.n_categories.eval(feed_dict={logits: 1.})
def test_init_temperature(self):
with self.assertRaisesRegexp(ValueError,
"should be a scalar"):
ExpConcrete([1.], [1., 2.])
with self.test_session(use_gpu=True):
temperature = tf.placeholder(tf.float32, None)
con = ExpConcrete(temperature, [1., 2.])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
con.temperature.eval(feed_dict={temperature: [1.]})
def test_value_shape(self):
# static
con = ExpConcrete(1., tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(con.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertTrue(con2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(con2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(con._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_batch_shape_1parameter(
self, _proxy_distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_1parameter_sample_shape_one_rank_less(
self, _proxy_distribution, np.zeros)
def test_log_prob_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return np.log(samples / samples.sum(axis=-1, keepdims=True))
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _proxy_distribution, np.ones, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - t * given).sum(axis=-1) - \
n * np.log(np.exp(logits - t * given).sum(axis=-1))
con = ExpConcrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([np.log(0.25), np.log(0.25), np.log(0.5)],
0.1,
[1., 1., 1.2])
_test_value([[np.log(0.25), np.log(0.25), np.log(0.5)],
[np.log(0.1), np.log(0.5), np.log(0.4)]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def test_dtype(self):
utils.test_dtype_2parameter(self, ExpConcrete)
def test_sample_reparameterized(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
con_rep = ExpConcrete(temperature, logits)
samples = con_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertTrue(t_grads is not None)
self.assertTrue(logits_grads is not None)
con_no_rep = ExpConcrete(temperature, logits, is_reparameterized=False)
samples = con_no_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertEqual(t_grads, None)
self.assertEqual(logits_grads, None)
def test_path_derivative(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
con_rep = ExpConcrete(temperature, logits, use_path_derivative=True)
samples = con_rep.sample(n_samples)
log_prob = con_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
sample_grads = tf.gradients(log_prob, samples)
t_true_grads = tf.gradients(samples, temperature, sample_grads)[0]
logits_true_grads = tf.gradients(samples, logits, sample_grads)[0]
with self.test_session(use_gpu=True) as sess:
outs = sess.run([t_path_grads, t_true_grads,
logits_path_grads, logits_true_grads],
feed_dict={n_samples: 7})
t_path, t_true, logits_path, logits_true = outs
self.assertAllClose(t_path, t_true)
self.assertAllClose(logits_path, logits_true)
con_no_rep = ExpConcrete(temperature, logits, is_reparameterized=False, use_path_derivative=True)
samples = con_no_rep.sample(n_samples)
log_prob = con_no_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
self.assertTrue(t_path_grads is None)
self.assertTrue(logits_path_grads is None)
def test_check_numerics(self):
tau = tf.placeholder(tf.float32, None)
logits = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = ExpConcrete(tau, logits, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 0., logits: np.ones([2]),
given: [1., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: -1., logits: np.ones([2]),
given: [1., 1.]})
class TestConcrete(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Concrete(1., logits=tf.zeros([]))
def test_init_n_categories(self):
con = Concrete(1., tf.ones([10]))
self.assertTrue(isinstance(con.n_categories, int))
self.assertEqual(con.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
con2 = Concrete(1., logits)
self.assertEqual(
con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
con2.n_categories.eval(feed_dict={logits: 1.})
def test_init_temperature(self):
with self.assertRaisesRegexp(ValueError,
"should be a scalar"):
Concrete([1.], [1., 2.])
with self.test_session(use_gpu=True):
temperature = tf.placeholder(tf.float32, None)
con = Concrete(temperature, [1., 2.])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
con.temperature.eval(feed_dict={temperature: [1.]})
def test_value_shape(self):
# static
con = Concrete(1., tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(con.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
con2 = Concrete(1., logits)
self.assertTrue(con2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(con2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(con._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
utils.test_batch_shape_1parameter(
self, _proxy_distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
utils.test_1parameter_sample_shape_one_rank_less(
self, _proxy_distribution, np.zeros)
def test_log_prob_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return np.log(samples / samples.sum(axis=-1, keepdims=True))
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _proxy_distribution, np.ones, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - (t + 1) * np.log(given)).sum(axis=-1) - \
n * np.log(np.exp(logits - t * np.log(given)).sum(axis=-1))
con = Concrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([0.25, 0.25, 0.5],
0.1,
[1., 1., 1.2])
_test_value([[0.25, 0.25, 0.5],
[0.1, 0.5, 0.4]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def test_dtype(self):
utils.test_dtype_2parameter(self, Concrete)
def test_sample_reparameterized(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
con_rep = Concrete(temperature, logits)
samples = con_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertTrue(t_grads is not None)
self.assertTrue(logits_grads is not None)
con_no_rep = Concrete(temperature, logits, is_reparameterized=False)
samples = con_no_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertEqual(t_grads, None)
self.assertEqual(logits_grads, None)
def test_path_derivative(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
con_rep = Concrete(temperature, logits, use_path_derivative=True)
samples = con_rep.sample(n_samples)
log_prob = con_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
sample_grads = tf.gradients(log_prob, samples)
t_true_grads = tf.gradients(samples, temperature, sample_grads)[0]
logits_true_grads = tf.gradients(samples, logits, sample_grads)[0]
with self.test_session(use_gpu=True) as sess:
outs = sess.run([t_path_grads, t_true_grads,
logits_path_grads, logits_true_grads],
feed_dict={n_samples: 7})
t_path, t_true, logits_path, logits_true = outs
self.assertAllClose(t_path, t_true)
self.assertAllClose(logits_path, logits_true)
con_no_rep = Concrete(temperature, logits, is_reparameterized=False,
use_path_derivative=True)
samples = con_no_rep.sample(n_samples)
log_prob = con_no_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
self.assertTrue(t_path_grads is None)
self.assertTrue(logits_path_grads is None)
def test_check_numerics(self):
tau = tf.placeholder(tf.float32, None)
logits = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = Concrete(tau, logits, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 1., logits: np.ones([2]),
given: [0., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: 1., logits: np.ones([2]),
given: [1., -1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 0., logits: np.ones([2]),
given: [1., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: -1., logits: np.ones([2]),
given: [1., 1.]})
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from scipy import stats, misc, special
from tests.distributions import utils
from zhusuan.distributions.multivariate import *
class TestMultinomial(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Multinomial(tf.zeros([]), 10)
def test_init_n(self):
dist = Multinomial(tf.ones([2]), 10)
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 2)
self.assertTrue(isinstance(dist.n_experiments, int))
self.assertEqual(dist.n_experiments, 10)
with self.assertRaisesRegexp(ValueError, "must be positive"):
_ = Multinomial(tf.ones([2]), 0)
with self.test_session(use_gpu=True) as sess:
logits = tf.placeholder(tf.float32, None)
n_experiments = tf.placeholder(tf.int32, None)
dist2 = Multinomial(logits, n_experiments)
self.assertEqual(
sess.run([dist2.n_categories, dist2.n_experiments],
feed_dict={logits: np.ones([2]), n_experiments: 10}),
[2, 10])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist2.n_categories.eval(feed_dict={logits: 1.,
n_experiments: 10})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: [10]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"must be positive"):
dist2.n_experiments.eval(feed_dict={logits: [1.],
n_experiments: 0})
def test_value_shape(self):
# static
dist = Multinomial(tf.placeholder(tf.float32, [None, 2]), 10)
self.assertEqual(dist.get_value_shape().as_list(), [2])
# dynamic
logits = tf.placeholder(tf.float32, None)
dist2 = Multinomial(logits, 10)
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={logits: np.ones([2])}).tolist(), [2])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_batch_shape_1parameter(
self, _distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _distribution(param):
return Multinomial(param, 10)
utils.test_1parameter_sample_shape_one_rank_less(
self, _distribution, np.zeros)
def test_log_prob_shape(self):
def _distribution(param):
return Multinomial(param, 10)
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _distribution, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, n_experiments, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given)
dist = Multinomial(logits, n_experiments)
log_p = dist.log_prob(given)
target_log_p = np.log(misc.factorial(n_experiments)) - \
np.sum(np.log(misc.factorial(given)), -1) + \
np.sum(given * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.exp(target_log_p)
self.assertAllClose(p.eval(), target_p)
_test_value([-50., -20., 0.], 4, [1, 0, 3])
_test_value([1., 10., 1000.], 1, [1, 0, 0])
_test_value([[2., 3., 1.], [5., 7., 4.]], 3,
np.ones([3, 1, 3], dtype=np.int32))
_test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100],
[100, 99, 1, 0]])
def test_dtype(self):
def _distribution(param, dtype=None):
return Multinomial(param, 10, dtype)
utils.test_dtype_1parameter_discrete(self, _distribution)
with self.assertRaisesRegexp(TypeError, "n_experiments must be"):
Multinomial([1., 1.], tf.placeholder(tf.float32, []))
class TestOnehotCategorical(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
OnehotCategorical(logits=tf.zeros([]))
def test_init_n_categories(self):
cat = OnehotCategorical(tf.ones([10]))
self.assertTrue(isinstance(cat.n_categories, int))
self.assertEqual(cat.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertEqual(
cat2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
cat2.n_categories.eval(feed_dict={logits: 1.})
def test_value_shape(self):
# static
cat = OnehotCategorical(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(cat.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
cat2 = OnehotCategorical(logits)
self.assertTrue(cat2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(cat2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(cat._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, OnehotCategorical, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, OnehotCategorical, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.zeros(shape)
samples = samples.reshape((-1, shape[-1]))
samples[:, 0] = 1
return samples.reshape(shape)
utils.test_1parameter_log_prob_shape_one_rank_less(
self, OnehotCategorical, _make_samples, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(logits, given):
logits = np.array(logits, np.float32)
normalized_logits = logits - misc.logsumexp(
logits, axis=-1, keepdims=True)
given = np.array(given, np.int32)
cat = OnehotCategorical(logits)
log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
def _one_hot(x, depth):
n_elements = x.size
ret = np.zeros((n_elements, depth))
ret[np.arange(n_elements), x.flat] = 1
return ret.reshape(list(x.shape) + [depth])
target_log_p = np.sum(_one_hot(
given, logits.shape[-1]) * normalized_logits, -1)
self.assertAllClose(log_p.eval(), target_log_p)
p = cat.prob(tf.one_hot(given, logits.shape[-1],
dtype=tf.int32))
target_p = np.sum(_one_hot(
given, logits.shape[-1]) * np.exp(normalized_logits), -1)
self.assertAllClose(p.eval(), target_p)
_test_value([0.], [0, 0, 0])
_test_value([-50., -10., -50.], [0, 1, 2, 1])
_test_value([0., 4.], [[0, 1], [0, 1]])
_test_value([[2., 3., 1.], [5., 7., 4.]],
np.ones([3, 1, 1], dtype=np.int32))
def test_dtype(self):
utils.test_dtype_1parameter_discrete(self, OnehotCategorical)
class TestDirichlet(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Dirichlet(alpha=tf.zeros([]))
def test_init_n_categories(self):
dist = Dirichlet(tf.ones([10]))
self.assertTrue(isinstance(dist.n_categories, int))
self.assertEqual(dist.n_categories, 10)
with self.assertRaisesRegexp(ValueError,
"n_categories.*should be at least 2"):
Dirichlet(tf.ones([3, 1]))
dist2 = Dirichlet(tf.placeholder(tf.float32, [3, None]))
self.assertTrue(dist2.n_categories is not None)
with self.test_session(use_gpu=True):
alpha = tf.placeholder(tf.float32, None)
dist3 = Dirichlet(alpha)
self.assertEqual(
dist3.n_categories.eval(feed_dict={alpha: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
dist3.n_categories.eval(feed_dict={alpha: 1.})
def test_value_shape(self):
# static
dist = Dirichlet(tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(dist.get_value_shape().as_list(), [10])
# dynamic
alpha = tf.placeholder(tf.float32, None)
dist2 = Dirichlet(alpha)
self.assertEqual(dist2.get_value_shape().as_list(), [None])
self.assertTrue(dist2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(dist2._value_shape().eval(
feed_dict={alpha: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(dist._value_shape().dtype, tf.int32)
def test_batch_shape(self):
utils.test_batch_shape_1parameter(
self, Dirichlet, np.zeros, is_univariate=False)
def test_sample_shape(self):
utils.test_1parameter_sample_shape_one_rank_less(
self, Dirichlet, np.zeros)
def test_log_prob_shape(self):
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return samples / samples.sum(axis=-1, keepdims=True)
# TODO: This failed with a bug in Tensorflow, waiting fix.
# https://github.com/tensorflow/tensorflow/issues/8391
# _test_static([3, None], [3, 2, 1, None], [3, 2, 3])
utils.test_1parameter_log_prob_shape_one_rank_less(
self, Dirichlet, np.ones, _make_samples)
def test_value(self):
def dirichlet_logpdf(x, alpha):
# scipy's implementation of dirichlet logpdf doesn't support
# batch of x, we use this modified version.
def _lnB(alpha):
return np.sum(special.gammaln(alpha)) - \
special.gammaln(np.sum(alpha))
lnB = _lnB(alpha)
return - lnB + np.sum(np.log(x) * (alpha - 1), -1)
def dirichlet_pdf(x, alpha):
return np.exp(dirichlet_logpdf(x, alpha))
with self.test_session(use_gpu=True):
def _test_value_alpha_rank1(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = dirichlet_logpdf(given, alpha)
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = dirichlet_pdf(given, alpha)
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank1([1., 1., 1.],
[[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]])
_test_value_alpha_rank1([2., 3., 4.], [0.3, 0.7, 0.])
# TODO: fix for case when alpha=1, given=0
def _test_value_alpha_rank2_given_rank2(alpha, given):
alpha = np.array(alpha, np.float32)
given = np.array(given, np.float32)
alpha_b = alpha * np.ones_like(given)
given_b = given * np.ones_like(alpha)
dist = Dirichlet(alpha)
log_p = dist.log_prob(given)
target_log_p = np.array(
[dirichlet_logpdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(log_p.eval(), target_log_p)
p = dist.prob(given)
target_p = np.array(
[dirichlet_pdf(given_b[i], alpha_b[i])
for i in range(alpha_b.shape[0])])
self.assertAllClose(p.eval(), target_p)
_test_value_alpha_rank2_given_rank2([[1., 2.], [3., 4.]],
[0.5, 0.5])
_test_value_alpha_rank2_given_rank2([[5., 6.], [7., 8.]],
[[0.1, 0.9]])
_test_value_alpha_rank2_given_rank2([[100., 1.], [0.01, 10.]],
[[0., 1.], [1., 0.]])
def test_check_numerics(self):
alpha = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = Dirichlet(alpha, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had Inf"):
log_p.eval(feed_dict={alpha: np.ones([2]), given: [0., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"lbeta\(alpha\).*Tensor had NaN"):
log_p.eval(feed_dict={alpha: [-1., 1.], given: [0.5, 0.5]})
def test_dtype(self):
utils.test_dtype_1parameter_continuous(self, Dirichlet)
class TestExpConcrete(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
ExpConcrete(1., logits=tf.zeros([]))
def test_init_n_categories(self):
con = ExpConcrete(1., tf.ones([10]))
self.assertTrue(isinstance(con.n_categories, int))
self.assertEqual(con.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertEqual(
con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
con2.n_categories.eval(feed_dict={logits: 1.})
def test_init_temperature(self):
with self.assertRaisesRegexp(ValueError,
"should be a scalar"):
ExpConcrete([1.], [1., 2.])
with self.test_session(use_gpu=True):
temperature = tf.placeholder(tf.float32, None)
con = ExpConcrete(temperature, [1., 2.])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
con.temperature.eval(feed_dict={temperature: [1.]})
def test_value_shape(self):
# static
con = ExpConcrete(1., tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(con.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
con2 = ExpConcrete(1., logits)
self.assertTrue(con2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(con2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(con._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_batch_shape_1parameter(
self, _proxy_distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
utils.test_1parameter_sample_shape_one_rank_less(
self, _proxy_distribution, np.zeros)
def test_log_prob_shape(self):
def _proxy_distribution(logits):
return ExpConcrete(1., logits)
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return np.log(samples / samples.sum(axis=-1, keepdims=True))
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _proxy_distribution, np.ones, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - t * given).sum(axis=-1) - \
n * np.log(np.exp(logits - t * given).sum(axis=-1))
con = ExpConcrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([np.log(0.25), np.log(0.25), np.log(0.5)],
0.1,
[1., 1., 1.2])
_test_value([[np.log(0.25), np.log(0.25), np.log(0.5)],
[np.log(0.1), np.log(0.5), np.log(0.4)]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def test_dtype(self):
utils.test_dtype_2parameter(self, ExpConcrete)
def test_sample_reparameterized(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
con_rep = ExpConcrete(temperature, logits)
samples = con_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertTrue(t_grads is not None)
self.assertTrue(logits_grads is not None)
con_no_rep = ExpConcrete(temperature, logits, is_reparameterized=False)
samples = con_no_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertEqual(t_grads, None)
self.assertEqual(logits_grads, None)
def test_path_derivative(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
con_rep = ExpConcrete(temperature, logits, use_path_derivative=True)
samples = con_rep.sample(n_samples)
log_prob = con_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
sample_grads = tf.gradients(log_prob, samples)
t_true_grads = tf.gradients(samples, temperature, sample_grads)[0]
logits_true_grads = tf.gradients(samples, logits, sample_grads)[0]
with self.test_session(use_gpu=True) as sess:
outs = sess.run([t_path_grads, t_true_grads,
logits_path_grads, logits_true_grads],
feed_dict={n_samples: 7})
t_path, t_true, logits_path, logits_true = outs
self.assertAllClose(t_path, t_true)
self.assertAllClose(logits_path, logits_true)
con_no_rep = ExpConcrete(temperature, logits, is_reparameterized=False, use_path_derivative=True)
samples = con_no_rep.sample(n_samples)
log_prob = con_no_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
self.assertTrue(t_path_grads is None)
self.assertTrue(logits_path_grads is None)
def test_check_numerics(self):
tau = tf.placeholder(tf.float32, None)
logits = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = ExpConcrete(tau, logits, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 0., logits: np.ones([2]),
given: [1., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: -1., logits: np.ones([2]),
given: [1., 1.]})
class TestConcrete(tf.test.TestCase):
def test_init_check_shape(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "should have rank"):
Concrete(1., logits=tf.zeros([]))
def test_init_n_categories(self):
con = Concrete(1., tf.ones([10]))
self.assertTrue(isinstance(con.n_categories, int))
self.assertEqual(con.n_categories, 10)
with self.test_session(use_gpu=True):
logits = tf.placeholder(tf.float32, None)
con2 = Concrete(1., logits)
self.assertEqual(
con2.n_categories.eval(feed_dict={logits: np.ones([10])}), 10)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should have rank"):
con2.n_categories.eval(feed_dict={logits: 1.})
def test_init_temperature(self):
with self.assertRaisesRegexp(ValueError,
"should be a scalar"):
Concrete([1.], [1., 2.])
with self.test_session(use_gpu=True):
temperature = tf.placeholder(tf.float32, None)
con = Concrete(temperature, [1., 2.])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"should be a scalar"):
con.temperature.eval(feed_dict={temperature: [1.]})
def test_value_shape(self):
# static
con = Concrete(1., tf.placeholder(tf.float32, [None, 10]))
self.assertEqual(con.get_value_shape().as_list(), [10])
# dynamic
logits = tf.placeholder(tf.float32, None)
con2 = Concrete(1., logits)
self.assertTrue(con2._value_shape().dtype is tf.int32)
with self.test_session(use_gpu=True):
self.assertEqual(con2._value_shape().eval(
feed_dict={logits: np.ones([2, 1, 3])}).tolist(), [3])
self.assertEqual(con._value_shape().dtype, tf.int32)
def test_batch_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
utils.test_batch_shape_1parameter(
self, _proxy_distribution, np.zeros, is_univariate=False)
def test_sample_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
utils.test_1parameter_sample_shape_one_rank_less(
self, _proxy_distribution, np.zeros)
def test_log_prob_shape(self):
def _proxy_distribution(logits):
return Concrete(1., logits)
def _make_samples(shape):
samples = np.ones(shape, dtype=np.float32)
return np.log(samples / samples.sum(axis=-1, keepdims=True))
utils.test_1parameter_log_prob_shape_one_rank_less(
self, _proxy_distribution, np.ones, _make_samples)
def test_value(self):
with self.test_session(use_gpu=True):
def _test_value(given, temperature, logits):
given = np.array(given, np.float32)
logits = np.array(logits, np.float32)
n = logits.shape[-1]
t = temperature
target_log_p = special.gammaln(n) + (n - 1) * np.log(t) + \
(logits - (t + 1) * np.log(given)).sum(axis=-1) - \
n * np.log(np.exp(logits - t * np.log(given)).sum(axis=-1))
con = Concrete(temperature, logits=logits)
log_p = con.log_prob(given)
self.assertAllClose(log_p.eval(), target_log_p)
p = con.prob(given)
self.assertAllClose(p.eval(), np.exp(target_log_p))
_test_value([0.25, 0.25, 0.5],
0.1,
[1., 1., 1.2])
_test_value([[0.25, 0.25, 0.5],
[0.1, 0.5, 0.4]],
0.5,
[[1., 1., 1.], [.5, .5, .4]])
def test_dtype(self):
utils.test_dtype_2parameter(self, Concrete)
def test_sample_reparameterized(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
con_rep = Concrete(temperature, logits)
samples = con_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertTrue(t_grads is not None)
self.assertTrue(logits_grads is not None)
con_no_rep = Concrete(temperature, logits, is_reparameterized=False)
samples = con_no_rep.sample(tf.placeholder(tf.int32, shape=[]))
t_grads, logits_grads = tf.gradients(samples, [temperature, logits])
self.assertEqual(t_grads, None)
self.assertEqual(logits_grads, None)
def test_path_derivative(self):
temperature = tf.constant(1.0)
logits = tf.ones([2, 3])
n_samples = tf.placeholder(tf.int32, shape=[])
con_rep = Concrete(temperature, logits, use_path_derivative=True)
samples = con_rep.sample(n_samples)
log_prob = con_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
sample_grads = tf.gradients(log_prob, samples)
t_true_grads = tf.gradients(samples, temperature, sample_grads)[0]
logits_true_grads = tf.gradients(samples, logits, sample_grads)[0]
with self.test_session(use_gpu=True) as sess:
outs = sess.run([t_path_grads, t_true_grads,
logits_path_grads, logits_true_grads],
feed_dict={n_samples: 7})
t_path, t_true, logits_path, logits_true = outs
self.assertAllClose(t_path, t_true)
self.assertAllClose(logits_path, logits_true)
con_no_rep = Concrete(temperature, logits, is_reparameterized=False,
use_path_derivative=True)
samples = con_no_rep.sample(n_samples)
log_prob = con_no_rep.log_prob(samples)
t_path_grads, logits_path_grads = tf.gradients(log_prob, [temperature, logits])
self.assertTrue(t_path_grads is None)
self.assertTrue(logits_path_grads is None)
def test_check_numerics(self):
tau = tf.placeholder(tf.float32, None)
logits = tf.placeholder(tf.float32, None)
given = tf.placeholder(tf.float32, None)
dist = Concrete(tau, logits, check_numerics=True)
log_p = dist.log_prob(given)
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 1., logits: np.ones([2]),
given: [0., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(given\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: 1., logits: np.ones([2]),
given: [1., -1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had Inf"):
log_p.eval(feed_dict={tau: 0., logits: np.ones([2]),
given: [1., 1.]})
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"log\(temperature\).*Tensor had NaN"):
log_p.eval(feed_dict={tau: -1., logits: np.ones([2]),
given: [1., 1.]})
| en | 0.647399 | #!/usr/bin/env python # -*- coding: utf-8 -*- # static # dynamic # static # dynamic # static # dynamic # TODO: This failed with a bug in Tensorflow, waiting fix. # https://github.com/tensorflow/tensorflow/issues/8391 # _test_static([3, None], [3, 2, 1, None], [3, 2, 3]) # scipy's implementation of dirichlet logpdf doesn't support # batch of x, we use this modified version. # TODO: fix for case when alpha=1, given=0 # static # dynamic # static # dynamic | 2.300104 | 2 |
tsktsk/commands/__init__.py | aymanizz/tsktsk | 3 | 6620517 | from tsktsk.commands.base import root
| from tsktsk.commands.base import root
| none | 1 | 1.06588 | 1 | |
services/incomes.py | cybersturmer/findragon-core-api | 1 | 6620518 | <filename>services/incomes.py<gh_stars>1-10
from typing import List
from fastapi import (
Depends,
HTTPException,
status
)
from database import (
Session,
get_session
)
from models import tables
from models.schemas.income import (
IncomeCreate,
IncomeUpdate
)
class Income:
def __init__(self, orm_session: Session = Depends(get_session)):
self.orm_session = orm_session
def _get(self, key: int) -> tables.PortfolioIncome:
income = (
self.orm_session
.query(tables.PortfolioIncome)
.filter_by(id=key)
.first()
)
if not income:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return income
def get(self, key: int) -> tables.PortfolioIncome:
return self._get(key)
def get_list(self) -> List[tables.PortfolioIncome]:
return (
self.orm_session
.query(tables.PortfolioIncome)
.all()
)
def create(self, data: IncomeCreate) -> tables.PortfolioIncome:
income_data = data.dict()
ticker = income_data.pop('ticker')
exchange = income_data.pop('exchange')
portfolio_id = income_data['portfolio_id']
asset = (
self.orm_session
.query(tables.PortfolioAsset)
.filter_by(
ticker=ticker,
exchange=exchange,
portfolio_id=portfolio_id
)
.first()
)
if not asset:
asset = tables.PortfolioAsset(
**dict(
ticker=ticker,
exchange=exchange,
portfolio_id=portfolio_id
)
)
self.orm_session.add(asset)
self.orm_session.commit()
income = tables.PortfolioIncome(**income_data)
income.asset_id = asset.id
self.orm_session.add(income)
self.orm_session.commit()
return income
def delete(self, key: int) -> None:
income = self._get(key=key)
self.orm_session.delete(income)
self.orm_session.commit()
def update(self,
key: int,
data: IncomeUpdate) -> tables.PortfolioIncome:
income = self._get(key=key)
for field, value in data:
if value is None:
continue
setattr(income, field, value)
self.orm_session.commit()
return income | <filename>services/incomes.py<gh_stars>1-10
from typing import List
from fastapi import (
Depends,
HTTPException,
status
)
from database import (
Session,
get_session
)
from models import tables
from models.schemas.income import (
IncomeCreate,
IncomeUpdate
)
class Income:
def __init__(self, orm_session: Session = Depends(get_session)):
self.orm_session = orm_session
def _get(self, key: int) -> tables.PortfolioIncome:
income = (
self.orm_session
.query(tables.PortfolioIncome)
.filter_by(id=key)
.first()
)
if not income:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return income
def get(self, key: int) -> tables.PortfolioIncome:
return self._get(key)
def get_list(self) -> List[tables.PortfolioIncome]:
return (
self.orm_session
.query(tables.PortfolioIncome)
.all()
)
def create(self, data: IncomeCreate) -> tables.PortfolioIncome:
income_data = data.dict()
ticker = income_data.pop('ticker')
exchange = income_data.pop('exchange')
portfolio_id = income_data['portfolio_id']
asset = (
self.orm_session
.query(tables.PortfolioAsset)
.filter_by(
ticker=ticker,
exchange=exchange,
portfolio_id=portfolio_id
)
.first()
)
if not asset:
asset = tables.PortfolioAsset(
**dict(
ticker=ticker,
exchange=exchange,
portfolio_id=portfolio_id
)
)
self.orm_session.add(asset)
self.orm_session.commit()
income = tables.PortfolioIncome(**income_data)
income.asset_id = asset.id
self.orm_session.add(income)
self.orm_session.commit()
return income
def delete(self, key: int) -> None:
income = self._get(key=key)
self.orm_session.delete(income)
self.orm_session.commit()
def update(self,
key: int,
data: IncomeUpdate) -> tables.PortfolioIncome:
income = self._get(key=key)
for field, value in data:
if value is None:
continue
setattr(income, field, value)
self.orm_session.commit()
return income | none | 1 | 2.501718 | 3 | |
entertainment_center.py | clockworkaiesh/Nanodegree-Project1 | 1 | 6620519 | import fresh_tomatoes
import media
lone_survivor = media.Movie(
"<NAME>",
"Lone Survivor is a 2013 American biographical war thriller film based on the 2007 eponymous non-fiction book", # NOQA
"https://movieposters2.com/images/1301381-b.jpg",
"https://youtu.be/yoLFk4JK_RM")
adrift = media.Movie(
"Adrift",
"Story of 2 avid sailors setting out on a journey across the ocean, meets a catastrophic hurricanes in recorded history.", # NOQA
"https://goo.gl/EaCfqR",
"https://youtu.be/n9ukI7khQpE")
ratatouille = media.Movie(
"Ratatouille",
"Remy dreams of becoming a great chef, despite being a rat in a definitely rodent-phobic profession.", # NOQA
"https://goo.gl/35XcjG",
"https://youtu.be/uVeNEbh3A4U")
big_hero_6 = media.Movie(
"Big Hero 6",
"Determined to uncover the mystery, Hiro transforms his friends into a band of high-tech heroes called Big Hero 6", # NOQA
"https://goo.gl/RQQzAd",
"https://youtu.be/z3biFxZIJOQ")
age_of_adaline = media.Movie(
"Age Of Adaline",
"<NAME> has miraculously remained a youthful 29 years of age for nearly eight decades, and hides her secret", # NOQA
"https://goo.gl/gP3GLC",
"https://youtu.be/7UzSekc0LoQ")
me_before_you = media.Movie(
"Me Before You",
"Young and quirky Louisa, is put to the test when she becomes a caregiver for Will Traynoraccident.", # NOQA
"https://goo.gl/qk82bS",
"https://youtu.be/Eh993__rOxA")
kingsman = media.Movie(
"Kingsman: The Golden Circle",
"It's James Bond On Laughing Gas.",
"https://goo.gl/g3gh53",
"https://youtu.be/6Nxc-3WpMbg")
mission_impossible = media.Movie(
"Mission Impossible: Rogue Nation",
"With the IMF now disbanded and Ethan Hunt out in the cold, a new threat - called the Syndicate - soon emerges", # NOQA
"https://goo.gl/mYiuzQ",
"https://youtu.be/pXwaKB7YOjw")
camp_xray = media.Movie(
"Camp X Ray",
"A female guard at Guantanamo Bay forms an unlikely friendship with one of the facility's longtime detainees.", # NOQA
"https://goo.gl/1MEsqM",
"https://youtu.be/V602GMSSo0Y")
movies = [
lone_survivor, adrift, ratatouille,
big_hero_6, age_of_adaline,
me_before_you, kingsman,
mission_impossible, camp_xray]
fresh_tomatoes.open_movies_page(movies)
| import fresh_tomatoes
import media
lone_survivor = media.Movie(
"<NAME>",
"Lone Survivor is a 2013 American biographical war thriller film based on the 2007 eponymous non-fiction book", # NOQA
"https://movieposters2.com/images/1301381-b.jpg",
"https://youtu.be/yoLFk4JK_RM")
adrift = media.Movie(
"Adrift",
"Story of 2 avid sailors setting out on a journey across the ocean, meets a catastrophic hurricanes in recorded history.", # NOQA
"https://goo.gl/EaCfqR",
"https://youtu.be/n9ukI7khQpE")
ratatouille = media.Movie(
"Ratatouille",
"Remy dreams of becoming a great chef, despite being a rat in a definitely rodent-phobic profession.", # NOQA
"https://goo.gl/35XcjG",
"https://youtu.be/uVeNEbh3A4U")
big_hero_6 = media.Movie(
"Big Hero 6",
"Determined to uncover the mystery, Hiro transforms his friends into a band of high-tech heroes called Big Hero 6", # NOQA
"https://goo.gl/RQQzAd",
"https://youtu.be/z3biFxZIJOQ")
age_of_adaline = media.Movie(
"Age Of Adaline",
"<NAME> has miraculously remained a youthful 29 years of age for nearly eight decades, and hides her secret", # NOQA
"https://goo.gl/gP3GLC",
"https://youtu.be/7UzSekc0LoQ")
me_before_you = media.Movie(
"Me Before You",
"Young and quirky Louisa, is put to the test when she becomes a caregiver for Will Traynoraccident.", # NOQA
"https://goo.gl/qk82bS",
"https://youtu.be/Eh993__rOxA")
kingsman = media.Movie(
"Kingsman: The Golden Circle",
"It's James Bond On Laughing Gas.",
"https://goo.gl/g3gh53",
"https://youtu.be/6Nxc-3WpMbg")
mission_impossible = media.Movie(
"Mission Impossible: Rogue Nation",
"With the IMF now disbanded and Ethan Hunt out in the cold, a new threat - called the Syndicate - soon emerges", # NOQA
"https://goo.gl/mYiuzQ",
"https://youtu.be/pXwaKB7YOjw")
camp_xray = media.Movie(
"Camp X Ray",
"A female guard at Guantanamo Bay forms an unlikely friendship with one of the facility's longtime detainees.", # NOQA
"https://goo.gl/1MEsqM",
"https://youtu.be/V602GMSSo0Y")
movies = [
lone_survivor, adrift, ratatouille,
big_hero_6, age_of_adaline,
me_before_you, kingsman,
mission_impossible, camp_xray]
fresh_tomatoes.open_movies_page(movies)
| ur | 0.235628 | # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA # NOQA | 2.004631 | 2 |
src/main.py | alexcarrega/prompt-able | 1 | 6620520 | # Copyright (c) 2020-2029 <NAME> <<EMAIL>>
# author: <NAME> <<EMAIL>>
import json
import threading
from datetime import datetime
from string import Template
from subprocess import PIPE, CompletedProcess, run
from typing import Dict, Iterable, List, Optional, Tuple, TypeVar
from bs4 import BeautifulSoup
from bunch import Bunch
from dynaconf import Dynaconf
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.validation import ValidationError, Validator
from pygments import formatters, highlight, lexers
T_Data = TypeVar('T_Data', bound='Data')
NOT_AVAILABLE = 'N.A.'
def get_keys(data: Iterable[str]) -> List[str]:
return list(map(lambda item: item.strip().lower(), data))
class Data:
def __init__(self: T_Data):
self.update()
self.last_exec_start = NOT_AVAILABLE
self.last_exec_end = datetime.now()
self.last_exec_ret_code = NOT_AVAILABLE
def update(self: T_Data) -> None:
self.settings = Dynaconf(settings_files=["settings.yaml", ".secrets.yaml"])
self.available_commands = get_keys(self.settings.get('commands', {}).keys()) + ['q']
t = threading.Timer(1, self.update)
t.daemon = True
t.start()
T_CommandValidator = TypeVar('T_CommandValidator', bound='CommandValidator')
class CommandValidator(Validator):
def __init__(self: T_CommandValidator, data: Data):
self.data = data
def validate(self: T_CommandValidator, document: Document) -> None:
text = document.text.strip().lower()
if not text.startswith(tuple(self.data.available_commands)):
raise ValidationError(message=f'Command {text} not found')
def exec(command: str, args: str, data: Data) -> CompletedProcess[str]:
data.last_exec_start = datetime.now()
args = list(filter(lambda arg: arg.strip(), args))
args_dict = {f'arg_{i}': v for i, v in enumerate(args, 1)}
args_dict['args'] = '\n'.join(args)
try:
command = Template(command).substitute(**data.settings.get('vars', {}), **args_dict)
output = run(command, check=False, shell=True,
stdout=PIPE, stderr=PIPE, universal_newlines=True)
except KeyError as key_error:
return Bunch(args=command, stdout=NOT_AVAILABLE, stderr=f'Variable not found: {key_error}', returncode=1)
data.last_exec_end = datetime.now()
data.last_exec_ret_code = output.returncode
return output
def default(text: str) -> str:
return text if text else NOT_AVAILABLE
def format(data: CompletedProcess[str], type: str, lines: bool) -> str:
if type == 'html':
output = BeautifulSoup(data.stdout, features='html5lib').prettify()
output = highlight(output, lexers.HtmlLexer(), formatters.TerminalFormatter())
if type == 'json':
try:
json_data = json.loads(data.stdout)
output = json.dumps(json_data, indent=4, sort_keys=True)
output = highlight(output, lexers.JsonLexer(), formatters.TerminalFormatter())
except Exception as exception:
output = f'\nError: {exception}\nInput: {data.args}\nOutput: {default(data.stdout)}\nMessage: {default(data.stderr)}\n'
if type == 'std':
output = data
if lines:
return '\n'.join(map(lambda line: '{0:>5}'.format(line[0]) + '.\t' + line[1], enumerate(output.split('\n'))))
else:
return output
def bottom_toolbar(data):
ret_code_style = 'red' if data.last_exec_ret_code == NOT_AVAILABLE or data.last_exec_ret_code > 0 else 'green'
if data.last_exec_start == NOT_AVAILABLE:
duration = NOT_AVAILABLE
else:
duration = (data.last_exec_end - data.last_exec_start).total_seconds()
return lambda: HTML(f'<aaa fg="blue" bg="white"> - Time: <b>{data.last_exec_end}</b> </aaa><aaa fg="lightyellow"> - Duration: <b>{duration}</b> </aaa><aaa fg="dark{ret_code_style}" bg="white"> - Return code: <b>{data.last_exec_ret_code}</b> </aaa>')
def get_command(input: str, data: Data) -> Tuple[Optional[str], Optional[str]]:
for cmd_key, cmd_data in data.settings.commands.items():
if input.startswith(cmd_key):
return (cmd_key, cmd_data)
return (None, None)
def main():
data = Data()
session = PromptSession(history=FileHistory('.prompt-able'))
while True:
try:
input = session.prompt(f'{data.settings.prompt} ',
bottom_toolbar=bottom_toolbar(data),
auto_suggest=AutoSuggestFromHistory(),
completer=WordCompleter(data.available_commands),
validator=CommandValidator(data),
validate_while_typing=False).strip().lower()
command_key, command_data = get_command(input, data)
if command_data:
type = command_data.output.strip().lower()
lines = command_data.lines
args = input.replace(command_key, '').split(' ')
output_process = exec(command_data.exec, args, data)
output = format(output_process, type, lines)
print(output)
elif input == 'q':
exit(0)
except KeyboardInterrupt or EOFError as err:
print(err)
if __name__ == '__main__':
main()
| # Copyright (c) 2020-2029 <NAME> <<EMAIL>>
# author: <NAME> <<EMAIL>>
import json
import threading
from datetime import datetime
from string import Template
from subprocess import PIPE, CompletedProcess, run
from typing import Dict, Iterable, List, Optional, Tuple, TypeVar
from bs4 import BeautifulSoup
from bunch import Bunch
from dynaconf import Dynaconf
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.validation import ValidationError, Validator
from pygments import formatters, highlight, lexers
T_Data = TypeVar('T_Data', bound='Data')
NOT_AVAILABLE = 'N.A.'
def get_keys(data: Iterable[str]) -> List[str]:
return list(map(lambda item: item.strip().lower(), data))
class Data:
def __init__(self: T_Data):
self.update()
self.last_exec_start = NOT_AVAILABLE
self.last_exec_end = datetime.now()
self.last_exec_ret_code = NOT_AVAILABLE
def update(self: T_Data) -> None:
self.settings = Dynaconf(settings_files=["settings.yaml", ".secrets.yaml"])
self.available_commands = get_keys(self.settings.get('commands', {}).keys()) + ['q']
t = threading.Timer(1, self.update)
t.daemon = True
t.start()
T_CommandValidator = TypeVar('T_CommandValidator', bound='CommandValidator')
class CommandValidator(Validator):
def __init__(self: T_CommandValidator, data: Data):
self.data = data
def validate(self: T_CommandValidator, document: Document) -> None:
text = document.text.strip().lower()
if not text.startswith(tuple(self.data.available_commands)):
raise ValidationError(message=f'Command {text} not found')
def exec(command: str, args: str, data: Data) -> CompletedProcess[str]:
data.last_exec_start = datetime.now()
args = list(filter(lambda arg: arg.strip(), args))
args_dict = {f'arg_{i}': v for i, v in enumerate(args, 1)}
args_dict['args'] = '\n'.join(args)
try:
command = Template(command).substitute(**data.settings.get('vars', {}), **args_dict)
output = run(command, check=False, shell=True,
stdout=PIPE, stderr=PIPE, universal_newlines=True)
except KeyError as key_error:
return Bunch(args=command, stdout=NOT_AVAILABLE, stderr=f'Variable not found: {key_error}', returncode=1)
data.last_exec_end = datetime.now()
data.last_exec_ret_code = output.returncode
return output
def default(text: str) -> str:
return text if text else NOT_AVAILABLE
def format(data: CompletedProcess[str], type: str, lines: bool) -> str:
if type == 'html':
output = BeautifulSoup(data.stdout, features='html5lib').prettify()
output = highlight(output, lexers.HtmlLexer(), formatters.TerminalFormatter())
if type == 'json':
try:
json_data = json.loads(data.stdout)
output = json.dumps(json_data, indent=4, sort_keys=True)
output = highlight(output, lexers.JsonLexer(), formatters.TerminalFormatter())
except Exception as exception:
output = f'\nError: {exception}\nInput: {data.args}\nOutput: {default(data.stdout)}\nMessage: {default(data.stderr)}\n'
if type == 'std':
output = data
if lines:
return '\n'.join(map(lambda line: '{0:>5}'.format(line[0]) + '.\t' + line[1], enumerate(output.split('\n'))))
else:
return output
def bottom_toolbar(data):
ret_code_style = 'red' if data.last_exec_ret_code == NOT_AVAILABLE or data.last_exec_ret_code > 0 else 'green'
if data.last_exec_start == NOT_AVAILABLE:
duration = NOT_AVAILABLE
else:
duration = (data.last_exec_end - data.last_exec_start).total_seconds()
return lambda: HTML(f'<aaa fg="blue" bg="white"> - Time: <b>{data.last_exec_end}</b> </aaa><aaa fg="lightyellow"> - Duration: <b>{duration}</b> </aaa><aaa fg="dark{ret_code_style}" bg="white"> - Return code: <b>{data.last_exec_ret_code}</b> </aaa>')
def get_command(input: str, data: Data) -> Tuple[Optional[str], Optional[str]]:
for cmd_key, cmd_data in data.settings.commands.items():
if input.startswith(cmd_key):
return (cmd_key, cmd_data)
return (None, None)
def main():
data = Data()
session = PromptSession(history=FileHistory('.prompt-able'))
while True:
try:
input = session.prompt(f'{data.settings.prompt} ',
bottom_toolbar=bottom_toolbar(data),
auto_suggest=AutoSuggestFromHistory(),
completer=WordCompleter(data.available_commands),
validator=CommandValidator(data),
validate_while_typing=False).strip().lower()
command_key, command_data = get_command(input, data)
if command_data:
type = command_data.output.strip().lower()
lines = command_data.lines
args = input.replace(command_key, '').split(' ')
output_process = exec(command_data.exec, args, data)
output = format(output_process, type, lines)
print(output)
elif input == 'q':
exit(0)
except KeyboardInterrupt or EOFError as err:
print(err)
if __name__ == '__main__':
main()
| en | 0.16525 | # Copyright (c) 2020-2029 <NAME> <<EMAIL>> # author: <NAME> <<EMAIL>> | 2.112754 | 2 |
pipeline/train_metric_effnet.py | NhuanTDBK/visual-image-search | 0 | 6620521 | import argparse
import glob
import logging
import os
import efficientnet.tfkeras as efn
from sklearn.model_selection import KFold
from models.losses import *
from models.pool import *
from models.preprocess_img import *
from pipeline.utils import get_linear_decay, get_disk_path, seed_everything, count_data_items, train
logger = logging.getLogger("main")
image_extractor_mapper = {
"b0": efn.EfficientNetB0,
"b1": efn.EfficientNetB1,
"b2": efn.EfficientNetB2,
"b3": efn.EfficientNetB3,
"b4": efn.EfficientNetB4,
"b5": efn.EfficientNetB5,
"b6": efn.EfficientNetB6,
"b7": efn.EfficientNetB7
}
def create_model():
inp = tf.keras.layers.Input(shape=(*IMAGE_SIZE, 3), name='inp1')
label = tf.keras.layers.Input(shape=(), dtype=tf.int32, name='inp2')
labels_onehot = tf.one_hot(label, depth=N_CLASSES, name="onehot")
effnet = image_extractor_mapper[params["model_name"]](include_top=False, weights="imagenet", )
x = effnet(inp)
emb = LocalGlobalExtractor(params["pool"], params["fc_dim"], params["dropout"])(x)
x1 = MetricLearner(N_CLASSES, metric=params["metric"], l2_wd=params["l2_wd"])([emb, labels_onehot])
model = tf.keras.Model(inputs=[inp, label], outputs=[x1])
model.summary()
emb_model = tf.keras.Model(inputs=[inp], outputs=[emb])
return model, emb_model
def main():
seed_everything(SEED)
logger.info("Loading data")
input_paths = params['input_path']
train_files = np.array([fpath for fpath in glob.glob(input_paths + "/train*.tfrec")])
valid_files = np.array([fpath for fpath in glob.glob(input_paths + "/valid*.tfrec")])
logger.info("Found files: ", train_files)
n_folds = len(train_files)
cv = KFold(n_folds, shuffle=True, random_state=SEED)
for fold_idx, (_, _) in enumerate(cv.split(train_files, np.arange(n_folds))):
if params["resume_fold"] and params["resume_fold"] != fold_idx:
continue
ds_train = get_training_dataset(train_files[fold_idx], params["batch_size"], image_size=IMAGE_SIZE)
num_training_images = count_data_items(train_files[[fold_idx]])
logger.info("Get fold %s, ds training, %s images" % (fold_idx + 1, num_training_images))
ds_val = get_validation_dataset(valid_files[fold_idx], params["batch_size"], image_size=IMAGE_SIZE)
num_valid_images = count_data_items(valid_files[[fold_idx]])
logger.info("Get fold %s, ds valid, %s images" % (fold_idx + 1, num_valid_images))
optimizer = tf.keras.optimizers.Adam(learning_rate=params["lr"])
if params["optim"] == "sgd":
optimizer = tf.optimizers.SGD(learning_rate=params["lr"], momentum=0.9, decay=1e-5)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
metrics = tf.keras.metrics.SparseCategoricalAccuracy()
callbacks = []
if params["lr_schedule"]:
# if params["lr_schedule"] == "cosine":
# callbacks.append(get_cosine_annealing(params, num_training_images))
if params["lr_schedule"] == "linear":
callbacks.append(get_linear_decay(params))
logger.info(callbacks)
model_id = "fold_" + str(fold_idx)
train(params, create_model, optimizer, loss, metrics, callbacks, ds_train, ds_val,
num_training_images, model_dir, model_id)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default='effb7')
parser.add_argument("--epochs", type=int, default=25)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--margin", type=float, default=0.3)
parser.add_argument("--s", type=float, default=30)
parser.add_argument("--pool", type=str, default="gem")
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--last_hidden_states", type=int, default=3)
parser.add_argument("--fc_dim", type=int, default=512)
parser.add_argument("--lr", type=float, default=0.00001)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--l2_wd", type=float, default=1e-5)
parser.add_argument("--metric", type=str, default="adacos")
parser.add_argument("--input_path", type=str)
parser.add_argument("--warmup_epoch", type=int, default=10)
parser.add_argument("--verbose", type=int, default=0)
parser.add_argument("--resume_fold", type=int, default=None)
parser.add_argument("--image_size", type=int, default=512)
parser.add_argument("--freeze", type=bool, default=False)
parser.add_argument("--saved_path", type=str, default=get_disk_path())
parser.add_argument("--check_period", type=int, default=5)
parser.add_argument("--lr_schedule", type=str, default="cosine")
parser.add_argument("--is_checkpoint", type=bool, default=True)
parser.add_argument("--optim", type=str, default="adam")
parser.add_argument("--patience", type=int, default=5)
args = parser.parse_args()
params = vars(args)
return params
if __name__ == "__main__":
params = parse_args()
SEED = 4111
N_CLASSES = 11014
IMAGE_SIZE = (params["image_size"], params["image_size"])
saved_path = params["saved_path"]
model_dir = os.path.join(saved_path, "saved", params["model_name"], str(params["image_size"]))
os.makedirs(model_dir, exist_ok=True)
main()
| import argparse
import glob
import logging
import os
import efficientnet.tfkeras as efn
from sklearn.model_selection import KFold
from models.losses import *
from models.pool import *
from models.preprocess_img import *
from pipeline.utils import get_linear_decay, get_disk_path, seed_everything, count_data_items, train
logger = logging.getLogger("main")
image_extractor_mapper = {
"b0": efn.EfficientNetB0,
"b1": efn.EfficientNetB1,
"b2": efn.EfficientNetB2,
"b3": efn.EfficientNetB3,
"b4": efn.EfficientNetB4,
"b5": efn.EfficientNetB5,
"b6": efn.EfficientNetB6,
"b7": efn.EfficientNetB7
}
def create_model():
inp = tf.keras.layers.Input(shape=(*IMAGE_SIZE, 3), name='inp1')
label = tf.keras.layers.Input(shape=(), dtype=tf.int32, name='inp2')
labels_onehot = tf.one_hot(label, depth=N_CLASSES, name="onehot")
effnet = image_extractor_mapper[params["model_name"]](include_top=False, weights="imagenet", )
x = effnet(inp)
emb = LocalGlobalExtractor(params["pool"], params["fc_dim"], params["dropout"])(x)
x1 = MetricLearner(N_CLASSES, metric=params["metric"], l2_wd=params["l2_wd"])([emb, labels_onehot])
model = tf.keras.Model(inputs=[inp, label], outputs=[x1])
model.summary()
emb_model = tf.keras.Model(inputs=[inp], outputs=[emb])
return model, emb_model
def main():
seed_everything(SEED)
logger.info("Loading data")
input_paths = params['input_path']
train_files = np.array([fpath for fpath in glob.glob(input_paths + "/train*.tfrec")])
valid_files = np.array([fpath for fpath in glob.glob(input_paths + "/valid*.tfrec")])
logger.info("Found files: ", train_files)
n_folds = len(train_files)
cv = KFold(n_folds, shuffle=True, random_state=SEED)
for fold_idx, (_, _) in enumerate(cv.split(train_files, np.arange(n_folds))):
if params["resume_fold"] and params["resume_fold"] != fold_idx:
continue
ds_train = get_training_dataset(train_files[fold_idx], params["batch_size"], image_size=IMAGE_SIZE)
num_training_images = count_data_items(train_files[[fold_idx]])
logger.info("Get fold %s, ds training, %s images" % (fold_idx + 1, num_training_images))
ds_val = get_validation_dataset(valid_files[fold_idx], params["batch_size"], image_size=IMAGE_SIZE)
num_valid_images = count_data_items(valid_files[[fold_idx]])
logger.info("Get fold %s, ds valid, %s images" % (fold_idx + 1, num_valid_images))
optimizer = tf.keras.optimizers.Adam(learning_rate=params["lr"])
if params["optim"] == "sgd":
optimizer = tf.optimizers.SGD(learning_rate=params["lr"], momentum=0.9, decay=1e-5)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
metrics = tf.keras.metrics.SparseCategoricalAccuracy()
callbacks = []
if params["lr_schedule"]:
# if params["lr_schedule"] == "cosine":
# callbacks.append(get_cosine_annealing(params, num_training_images))
if params["lr_schedule"] == "linear":
callbacks.append(get_linear_decay(params))
logger.info(callbacks)
model_id = "fold_" + str(fold_idx)
train(params, create_model, optimizer, loss, metrics, callbacks, ds_train, ds_val,
num_training_images, model_dir, model_id)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default='effb7')
parser.add_argument("--epochs", type=int, default=25)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--margin", type=float, default=0.3)
parser.add_argument("--s", type=float, default=30)
parser.add_argument("--pool", type=str, default="gem")
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--last_hidden_states", type=int, default=3)
parser.add_argument("--fc_dim", type=int, default=512)
parser.add_argument("--lr", type=float, default=0.00001)
parser.add_argument("--weight_decay", type=float, default=1e-5)
parser.add_argument("--l2_wd", type=float, default=1e-5)
parser.add_argument("--metric", type=str, default="adacos")
parser.add_argument("--input_path", type=str)
parser.add_argument("--warmup_epoch", type=int, default=10)
parser.add_argument("--verbose", type=int, default=0)
parser.add_argument("--resume_fold", type=int, default=None)
parser.add_argument("--image_size", type=int, default=512)
parser.add_argument("--freeze", type=bool, default=False)
parser.add_argument("--saved_path", type=str, default=get_disk_path())
parser.add_argument("--check_period", type=int, default=5)
parser.add_argument("--lr_schedule", type=str, default="cosine")
parser.add_argument("--is_checkpoint", type=bool, default=True)
parser.add_argument("--optim", type=str, default="adam")
parser.add_argument("--patience", type=int, default=5)
args = parser.parse_args()
params = vars(args)
return params
if __name__ == "__main__":
params = parse_args()
SEED = 4111
N_CLASSES = 11014
IMAGE_SIZE = (params["image_size"], params["image_size"])
saved_path = params["saved_path"]
model_dir = os.path.join(saved_path, "saved", params["model_name"], str(params["image_size"]))
os.makedirs(model_dir, exist_ok=True)
main()
| en | 0.324549 | # if params["lr_schedule"] == "cosine": # callbacks.append(get_cosine_annealing(params, num_training_images)) | 2.21002 | 2 |
prob_mbrl/envs/pendulum/__init__.py | Praneethsv/prob_mbrl | 108 | 6620522 | from .env import Pendulum, PendulumReward
from .model import PendulumModel
__all__ = ["Pendulum", "PendulumReward", "PendulumModel"]
| from .env import Pendulum, PendulumReward
from .model import PendulumModel
__all__ = ["Pendulum", "PendulumReward", "PendulumModel"]
| none | 1 | 1.058917 | 1 | |
puskesmas_app/resources.py | kurniantoska/medicalwebapp_project | 1 | 6620523 | from import_export import resources
from .models import Person
class PersonResource(resources.ModelResource):
class Meta:
model = Person | from import_export import resources
from .models import Person
class PersonResource(resources.ModelResource):
class Meta:
model = Person | none | 1 | 1.614178 | 2 | |
suds/__init__.py | slushie0/interactive-tutorials | 2,750 | 6620524 | <reponame>slushie0/interactive-tutorials
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: <NAME> ( <EMAIL> )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
from .compat import basestring, unicode
#
# Project properties
#
__version__ = '1.4.4.1'
__build__ = "IN 20210108"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
def smart_str(s, encoding='utf-8', errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
from django
"""
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join(smart_str(arg, encoding, errors) for arg in s)
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, smart_str("Server raised fault: '%s'" % fault.faultstring))
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
class Object(object):
"""
The python 3 base Object
"""
pass
def objid(obj):
return obj.__class__.__name__ + ':' + hex(id(obj))
from .client import Client
| # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: <NAME> ( <EMAIL> )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
from .compat import basestring, unicode
#
# Project properties
#
__version__ = '1.4.4.1'
__build__ = "IN 20210108"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
def smart_str(s, encoding='utf-8', errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
from django
"""
if not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join(smart_str(arg, encoding, errors) for arg in s)
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, smart_str("Server raised fault: '%s'" % fault.faultstring))
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
class Object(object):
"""
The python 3 base Object
"""
pass
def objid(obj):
return obj.__class__.__name__ + ':' + hex(id(obj))
from .client import Client | en | 0.835661 | # This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: <NAME> ( <EMAIL> ) Suds is a lightweight SOAP python client that provides a service proxy for Web Services. # # Project properties # # # Exceptions # An error occured while building a instance of (%s). As a result the object you requested could not be constructed. It is recommended that you construct the type manually using a Suds object. Please open a ticket with a description of this error. Reason: %s Method (%s) was invoked with SOAP headers. The WSDL does not define SOAP headers for this method. Retry without the soapheaders keyword argument. Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. from django # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. # # Logging # # # Utility # get a unicode safe string representation of an object The I{null} object. Used to pass NULL for optional XML nodes. The python 3 base Object | 2.460179 | 2 |
stylee/cloth/migrations/0001_initial.py | jbaek7023/Stylee-API | 1 | 6620525 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-15 11:02
from __future__ import unicode_literals
import cloth.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cloth',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('publish', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('color', models.CharField(max_length=10)),
('cloth_type', models.CharField(choices=[('ts', 't-shirt'), ('ct', 'coat'), ('sh', 'shirt'), ('j', 'jean'), ('', 'py')], default='1', max_length=9)),
('cloth_image', models.ImageField(blank=True, height_field=1080, null=True, upload_to=cloth.models.upload_location, width_field=1080)),
('size', models.CharField(max_length=3)),
('link', models.CharField(max_length=20)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Wear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True)),
('which', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cloth.Cloth')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-15 11:02
from __future__ import unicode_literals
import cloth.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cloth',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('publish', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('color', models.CharField(max_length=10)),
('cloth_type', models.CharField(choices=[('ts', 't-shirt'), ('ct', 'coat'), ('sh', 'shirt'), ('j', 'jean'), ('', 'py')], default='1', max_length=9)),
('cloth_image', models.ImageField(blank=True, height_field=1080, null=True, upload_to=cloth.models.upload_location, width_field=1080)),
('size', models.CharField(max_length=3)),
('link', models.CharField(max_length=20)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Wear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True)),
('which', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cloth.Cloth')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] | en | 0.774643 | # -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-09-15 11:02 | 1.769481 | 2 |
vidsitu_code/extended_config.py | TheShadow29/VidSitu | 37 | 6620526 | <gh_stars>10-100
import json
from pathlib import Path
from yacs.config import CfgNode as CN
from utils._init_stuff import yaml
from typing import Dict, Any
from slowfast.config.defaults import get_cfg
import argparse
from fairseq.models import ARCH_CONFIG_REGISTRY, ARCH_MODEL_REGISTRY
from fairseq.models.transformer import (
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
)
sf_mdl_to_cfg_fpath_dct = {
"slow_fast_nl_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_SLOWFAST_8x8_R50.yaml",
"slow_nl_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_SLOW_8x8_R50.yaml",
"c2d_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_C2D_8x8_R50.yaml",
"i3d_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_I3D_8x8_R50.yaml",
"i3d_r50_nl_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_I3D_NLN_8x8_R50.yaml",
}
tx_to_cfg_fpath_dct = {
"transformer": "./configs/vsitu_tx_cfgs/transformer.yaml",
}
def get_default_tx_dec_cfg():
parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, allow_abbrev=False
)
ARCH_MODEL_REGISTRY["transformer"].add_args(parser)
args1 = parser.parse_known_args()[0]
ARCH_CONFIG_REGISTRY["transformer"](args1)
args1_dct = vars(args1)
args1_dct["max_source_positions"] = DEFAULT_MAX_SOURCE_POSITIONS
args1_dct["max_target_positions"] = DEFAULT_MAX_TARGET_POSITIONS
return CN(args1_dct)
class CfgProcessor:
def __init__(self, cfg_pth):
assert Path(cfg_pth).exists()
self.cfg_pth = cfg_pth
def get_vsitu_default_cfg(self):
with open(self.cfg_pth) as f:
c4 = yaml.safe_load(f)
cfg_dct = c4.copy()
return CN(cfg_dct)
def get_key_maps(self):
key_maps = {}
return key_maps
@staticmethod
def get_val_from_cfg(cfg, key_str):
key_split = key_str.split(".")
d = cfg
for k in key_split[:-1]:
d = d[k]
return d[key_split[-1]]
def create_from_dict(self, dct: Dict[str, Any], prefix: str, cfg: CN):
"""
Helper function to create yacs config from dictionary
"""
dct_cfg = CN(dct, new_allowed=True)
prefix_list = prefix.split(".")
d = cfg
for pref in prefix_list[:-1]:
assert isinstance(d, CN)
if pref not in d:
setattr(d, pref, CN())
d = d[pref]
if hasattr(d, prefix_list[-1]):
old_dct_cfg = d[prefix_list[-1]]
dct_cfg.merge_from_other_cfg(old_dct_cfg)
setattr(d, prefix_list[-1], dct_cfg)
return cfg
@staticmethod
def update_one_full_key(cfg: CN, dct, full_key, val=None):
if cfg.key_is_deprecated(full_key):
return
if cfg.key_is_renamed(full_key):
cfg.raise_key_rename_error(full_key)
if val is None:
assert full_key in dct
v = dct[full_key]
else:
v = val
key_list = full_key.split(".")
d = cfg
for subkey in key_list[:-1]:
# Most important statement
assert subkey in d, f"key {full_key} doesnot exist"
d = d[subkey]
subkey = key_list[-1]
# Most important statement
assert subkey in d, f"key {full_key} doesnot exist"
value = cfg._decode_cfg_value(v)
assert isinstance(value, type(d[subkey]))
d[subkey] = value
return
def update_from_dict(
self, cfg: CN, dct: Dict[str, Any], key_maps: Dict[str, str] = None
) -> CN:
"""
Given original CfgNode (cfg) and input dictionary allows changing
the cfg with the updated dictionary values
Optional key_maps argument which defines a mapping between
same keys of the cfg node. Only used for convenience
Adapted from:
https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L219
"""
# Original cfg
# root = cfg
if key_maps is None:
key_maps = []
# Change the input dictionary using keymaps
# Now it is aligned with the cfg
full_key_list = list(dct.keys())
for full_key in full_key_list:
if full_key in key_maps:
# cfg[full_key] = dct[full_key]
self.update_one_full_key(cfg, dct, full_key)
new_key = key_maps[full_key]
# dct[new_key] = dct.pop(full_key)
self.update_one_full_key(cfg, dct, new_key, val=dct[full_key])
# Convert the cfg using dictionary input
# for full_key, v in dct.items():
for full_key in dct.keys():
self.update_one_full_key(cfg, dct, full_key)
return cfg
@staticmethod
def pre_proc_config(cfg: CN, dct: Dict = None):
"""
Add any pre processing based on cfg
"""
def upd_sub_mdl(
cfg: CN,
sub_mdl_default_cfg: CN,
sub_mdl_name_key: str,
sub_mdl_file_key: str,
sub_mdl_mapper: Dict,
new_dct: Dict,
):
if new_dct is not None and sub_mdl_name_key in new_dct:
sub_mdl_name = new_dct[sub_mdl_name_key]
else:
sub_mdl_name = CfgProcessor.get_val_from_cfg(cfg, sub_mdl_name_key)
assert sub_mdl_name in sub_mdl_mapper
sub_mdl_file = sub_mdl_mapper[sub_mdl_name]
assert Path(sub_mdl_file).exists()
CfgProcessor.update_one_full_key(
cfg, {sub_mdl_file_key: sub_mdl_file}, full_key=sub_mdl_file_key
)
sub_mdl_default_cfg.merge_from_file(sub_mdl_file)
sub_mdl_cfg = yaml.safe_load(sub_mdl_default_cfg.dump())
sub_mdl_cfg_dct_keep = {k: v for k, v in sub_mdl_cfg.items()}
return CN(sub_mdl_cfg_dct_keep)
sf_mdl_cfg_default = get_cfg()
cfg.sf_mdl = upd_sub_mdl(
cfg,
sf_mdl_cfg_default,
"mdl.sf_mdl_name",
"mdl.sf_mdl_cfg_file",
sf_mdl_to_cfg_fpath_dct,
dct,
)
tx_dec_default = get_default_tx_dec_cfg()
cfg.tx_dec = upd_sub_mdl(
cfg,
tx_dec_default,
"mdl.tx_dec_mdl_name",
"mdl.tx_dec_cfg_file",
tx_to_cfg_fpath_dct,
dct,
)
return cfg
@staticmethod
def post_proc_config(cfg: CN):
"""
Add any post processing based on cfg
"""
return cfg
@staticmethod
def cfg_to_flat_dct(cfg: CN):
def to_flat_dct(dct, prefix_key: str):
def get_new_key(prefix_key, curr_key):
if prefix_key == "":
return curr_key
return prefix_key + "." + curr_key
out_dct = {}
for k, v in dct.items():
if isinstance(v, dict):
out_dct1 = to_flat_dct(v, prefix_key=get_new_key(prefix_key, k))
else:
out_dct1 = {get_new_key(prefix_key, k): v}
out_dct.update(out_dct1)
return out_dct
cfg_dct = json.loads(json.dumps(cfg))
return to_flat_dct(cfg_dct, prefix_key="")
@staticmethod
def to_str(cfg: CN):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(cfg.items()):
# seperator = "\n" if isinstance(v, CN) else " "
if isinstance(v, CN):
seperator = "\n"
str_v = CfgProcessor.to_str(v)
else:
seperator = " "
str_v = str(v)
if str_v == "" or str_v == "":
str_v = "''"
attr_str = "{}:{}{}".format(str(k), seperator, str_v)
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
| import json
from pathlib import Path
from yacs.config import CfgNode as CN
from utils._init_stuff import yaml
from typing import Dict, Any
from slowfast.config.defaults import get_cfg
import argparse
from fairseq.models import ARCH_CONFIG_REGISTRY, ARCH_MODEL_REGISTRY
from fairseq.models.transformer import (
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
)
sf_mdl_to_cfg_fpath_dct = {
"slow_fast_nl_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_SLOWFAST_8x8_R50.yaml",
"slow_nl_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_SLOW_8x8_R50.yaml",
"c2d_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_C2D_8x8_R50.yaml",
"i3d_r50_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_I3D_8x8_R50.yaml",
"i3d_r50_nl_8x8": "./configs/vsitu_mdl_cfgs/Kinetics_c2_I3D_NLN_8x8_R50.yaml",
}
tx_to_cfg_fpath_dct = {
"transformer": "./configs/vsitu_tx_cfgs/transformer.yaml",
}
def get_default_tx_dec_cfg():
parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, allow_abbrev=False
)
ARCH_MODEL_REGISTRY["transformer"].add_args(parser)
args1 = parser.parse_known_args()[0]
ARCH_CONFIG_REGISTRY["transformer"](args1)
args1_dct = vars(args1)
args1_dct["max_source_positions"] = DEFAULT_MAX_SOURCE_POSITIONS
args1_dct["max_target_positions"] = DEFAULT_MAX_TARGET_POSITIONS
return CN(args1_dct)
class CfgProcessor:
def __init__(self, cfg_pth):
assert Path(cfg_pth).exists()
self.cfg_pth = cfg_pth
def get_vsitu_default_cfg(self):
with open(self.cfg_pth) as f:
c4 = yaml.safe_load(f)
cfg_dct = c4.copy()
return CN(cfg_dct)
def get_key_maps(self):
key_maps = {}
return key_maps
@staticmethod
def get_val_from_cfg(cfg, key_str):
key_split = key_str.split(".")
d = cfg
for k in key_split[:-1]:
d = d[k]
return d[key_split[-1]]
def create_from_dict(self, dct: Dict[str, Any], prefix: str, cfg: CN):
"""
Helper function to create yacs config from dictionary
"""
dct_cfg = CN(dct, new_allowed=True)
prefix_list = prefix.split(".")
d = cfg
for pref in prefix_list[:-1]:
assert isinstance(d, CN)
if pref not in d:
setattr(d, pref, CN())
d = d[pref]
if hasattr(d, prefix_list[-1]):
old_dct_cfg = d[prefix_list[-1]]
dct_cfg.merge_from_other_cfg(old_dct_cfg)
setattr(d, prefix_list[-1], dct_cfg)
return cfg
@staticmethod
def update_one_full_key(cfg: CN, dct, full_key, val=None):
if cfg.key_is_deprecated(full_key):
return
if cfg.key_is_renamed(full_key):
cfg.raise_key_rename_error(full_key)
if val is None:
assert full_key in dct
v = dct[full_key]
else:
v = val
key_list = full_key.split(".")
d = cfg
for subkey in key_list[:-1]:
# Most important statement
assert subkey in d, f"key {full_key} doesnot exist"
d = d[subkey]
subkey = key_list[-1]
# Most important statement
assert subkey in d, f"key {full_key} doesnot exist"
value = cfg._decode_cfg_value(v)
assert isinstance(value, type(d[subkey]))
d[subkey] = value
return
def update_from_dict(
self, cfg: CN, dct: Dict[str, Any], key_maps: Dict[str, str] = None
) -> CN:
"""
Given original CfgNode (cfg) and input dictionary allows changing
the cfg with the updated dictionary values
Optional key_maps argument which defines a mapping between
same keys of the cfg node. Only used for convenience
Adapted from:
https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L219
"""
# Original cfg
# root = cfg
if key_maps is None:
key_maps = []
# Change the input dictionary using keymaps
# Now it is aligned with the cfg
full_key_list = list(dct.keys())
for full_key in full_key_list:
if full_key in key_maps:
# cfg[full_key] = dct[full_key]
self.update_one_full_key(cfg, dct, full_key)
new_key = key_maps[full_key]
# dct[new_key] = dct.pop(full_key)
self.update_one_full_key(cfg, dct, new_key, val=dct[full_key])
# Convert the cfg using dictionary input
# for full_key, v in dct.items():
for full_key in dct.keys():
self.update_one_full_key(cfg, dct, full_key)
return cfg
@staticmethod
def pre_proc_config(cfg: CN, dct: Dict = None):
"""
Add any pre processing based on cfg
"""
def upd_sub_mdl(
cfg: CN,
sub_mdl_default_cfg: CN,
sub_mdl_name_key: str,
sub_mdl_file_key: str,
sub_mdl_mapper: Dict,
new_dct: Dict,
):
if new_dct is not None and sub_mdl_name_key in new_dct:
sub_mdl_name = new_dct[sub_mdl_name_key]
else:
sub_mdl_name = CfgProcessor.get_val_from_cfg(cfg, sub_mdl_name_key)
assert sub_mdl_name in sub_mdl_mapper
sub_mdl_file = sub_mdl_mapper[sub_mdl_name]
assert Path(sub_mdl_file).exists()
CfgProcessor.update_one_full_key(
cfg, {sub_mdl_file_key: sub_mdl_file}, full_key=sub_mdl_file_key
)
sub_mdl_default_cfg.merge_from_file(sub_mdl_file)
sub_mdl_cfg = yaml.safe_load(sub_mdl_default_cfg.dump())
sub_mdl_cfg_dct_keep = {k: v for k, v in sub_mdl_cfg.items()}
return CN(sub_mdl_cfg_dct_keep)
sf_mdl_cfg_default = get_cfg()
cfg.sf_mdl = upd_sub_mdl(
cfg,
sf_mdl_cfg_default,
"mdl.sf_mdl_name",
"mdl.sf_mdl_cfg_file",
sf_mdl_to_cfg_fpath_dct,
dct,
)
tx_dec_default = get_default_tx_dec_cfg()
cfg.tx_dec = upd_sub_mdl(
cfg,
tx_dec_default,
"mdl.tx_dec_mdl_name",
"mdl.tx_dec_cfg_file",
tx_to_cfg_fpath_dct,
dct,
)
return cfg
@staticmethod
def post_proc_config(cfg: CN):
"""
Add any post processing based on cfg
"""
return cfg
@staticmethod
def cfg_to_flat_dct(cfg: CN):
def to_flat_dct(dct, prefix_key: str):
def get_new_key(prefix_key, curr_key):
if prefix_key == "":
return curr_key
return prefix_key + "." + curr_key
out_dct = {}
for k, v in dct.items():
if isinstance(v, dict):
out_dct1 = to_flat_dct(v, prefix_key=get_new_key(prefix_key, k))
else:
out_dct1 = {get_new_key(prefix_key, k): v}
out_dct.update(out_dct1)
return out_dct
cfg_dct = json.loads(json.dumps(cfg))
return to_flat_dct(cfg_dct, prefix_key="")
@staticmethod
def to_str(cfg: CN):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(cfg.items()):
# seperator = "\n" if isinstance(v, CN) else " "
if isinstance(v, CN):
seperator = "\n"
str_v = CfgProcessor.to_str(v)
else:
seperator = " "
str_v = str(v)
if str_v == "" or str_v == "":
str_v = "''"
attr_str = "{}:{}{}".format(str(k), seperator, str_v)
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r | en | 0.547582 | Helper function to create yacs config from dictionary # Most important statement # Most important statement Given original CfgNode (cfg) and input dictionary allows changing the cfg with the updated dictionary values Optional key_maps argument which defines a mapping between same keys of the cfg node. Only used for convenience Adapted from: https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L219 # Original cfg # root = cfg # Change the input dictionary using keymaps # Now it is aligned with the cfg # cfg[full_key] = dct[full_key] # dct[new_key] = dct.pop(full_key) # Convert the cfg using dictionary input # for full_key, v in dct.items(): Add any pre processing based on cfg Add any post processing based on cfg # seperator = "\n" if isinstance(v, CN) else " " | 1.840313 | 2 |
venv/lib/python3.9/site-packages/cv2/version.py | fernandoapem/CNT-Stream | 0 | 6620527 | <reponame>fernandoapem/CNT-Stream
opencv_version = "4.5.3.56"
contrib = True
headless = True
ci_build = True | opencv_version = "4.5.3.56"
contrib = True
headless = True
ci_build = True | none | 1 | 1.066112 | 1 | |
pycalc/net_sr.py | Hirico/supic | 4 | 6620528 | # --utf8--#
import tensorflow as tf
import numpy as np
import layer_sr as layer
import argument_sr as arg
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def bilinear_upsample_weights(channel, number_of_classes):
"""
Create weights matrix for transposed convolution with bilinear filter
initialization.
"""
weights = np.zeros((channel,
channel,
number_of_classes,
number_of_classes), dtype=np.float32)
upsample_kernel = upsample_filt(channel)
for i in range(number_of_classes):
weights[:, :, i, i] = upsample_kernel
return weights
def featureExtraction(low_res_input, level):
filters = arg.options.conv_f
filters_tranpose = arg.options.conv_ft
channel = arg.options.conv_n
depth = arg.options.depth
wd = arg.options.weight_decay
"""
特征提取层
"""
with tf.name_scope("input" + str(level)):
"""
input layer
"""
conv_input = layer.conv2d(low_res_input, filters, filters, channel, level, weight_dacay=wd, name='conv_input')
lrelu_input = layer.leaky_relu(conv_input, leak=-0.2, name='lrelu_input')
with tf.name_scope("deep_cnn" + str(level)):
"""
cnn s ,depth is given by options
"""
last_lrelu = lrelu_input
for i in range(depth):
conv_cnn = layer.conv2d(last_lrelu, filters, filters, channel, level, weight_dacay=wd, isBias=False,
name="block_conv_" + str(i + 1))
lrelu_cnn = layer.leaky_relu(conv_cnn, -0.2, name="block_lrelu_" + str(i + 1))
last_lrelu = lrelu_cnn
with tf.name_scope("up_sampling" + str(level)):
"""
up_sampling layer
"""
deconv_up = layer.deconv2d(last_lrelu, bilinear_upsample_weights(filters_tranpose, channel), [2, 2], level,
weight_dacay=wd, name="up_sampling")
up_samping_output = layer.leaky_relu(deconv_up, -0.2, name="up_samping_output")
return up_samping_output
def imageReconstruction(low_res_input, conv_up, level):
filters_tranpose = arg.options.conv_ft
channel = arg.options.output_channel
filters = arg.options.conv_f
wd = arg.options.weight_decay
"""
图像重构层
"""
with tf.name_scope("Reconstruction" + str(level)):
"""
image reconstruction
"""
deconv_image = layer.deconv2d(low_res_input, bilinear_upsample_weights(filters_tranpose, channel), [2, 2],
level,
weight_dacay=wd, name="deconv_image")
conv_res = layer.conv2d(conv_up, filters, filters, channel, level, isBias=False, name="conv_res")
HR = deconv_image + conv_res
return HR
def get_LasSRN(low_res_input):
"""
获得2x 4x 8x 三种预测结果
Args:
low_res_input:
options:
Returns:
"""
convt_F1 = featureExtraction(low_res_input, 1)
HR_2 = imageReconstruction(low_res_input, convt_F1, 1)
convt_F2 = featureExtraction(convt_F1, 2)
HR_4 = imageReconstruction(HR_2, convt_F2, 2)
convt_F3 = featureExtraction(convt_F2, 3)
HR_8 = imageReconstruction(HR_4, convt_F3, 3)
return HR_2, HR_4, HR_8
def L1_Charbonnier_loss(predict, real):
"""
损失函数
Args:
predict: 预测结果
real: 真实结果
Returns:
损失代价
"""
eps = 1e-6
diff = tf.add(predict, -real)
error = tf.sqrt(diff * diff + eps)
loss = tf.reduce_mean(error)
return loss
def weight_decay_losses():
"""
Returns: 返回损失权重的值
"""
ll = tf.abs(tf.add_n(tf.get_collection('weight_losses'), name='total_loss'))
return ll
def get_bicubic(low_res_input):
image_height = arg.options.height
image_width = arg.options.width
HR_2 = tf.image.resize_images(low_res_input, [int(image_height / 4), int(image_width / 4)],
method=tf.image.ResizeMethod.BICUBIC)
HR_4 = tf.image.resize_images(low_res_input, [int(image_height / 2), int(image_width / 2)],
method=tf.image.ResizeMethod.BICUBIC)
HR_8 = tf.image.resize_images(low_res_input, [int(image_height ), int(image_width)],
method=tf.image.ResizeMethod.BICUBIC)
return HR_2,HR_4,HR_8
if __name__ == '__main__':
print(bilinear_upsample_weights(64,1))
print(7//2)
| # --utf8--#
import tensorflow as tf
import numpy as np
import layer_sr as layer
import argument_sr as arg
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def bilinear_upsample_weights(channel, number_of_classes):
"""
Create weights matrix for transposed convolution with bilinear filter
initialization.
"""
weights = np.zeros((channel,
channel,
number_of_classes,
number_of_classes), dtype=np.float32)
upsample_kernel = upsample_filt(channel)
for i in range(number_of_classes):
weights[:, :, i, i] = upsample_kernel
return weights
def featureExtraction(low_res_input, level):
filters = arg.options.conv_f
filters_tranpose = arg.options.conv_ft
channel = arg.options.conv_n
depth = arg.options.depth
wd = arg.options.weight_decay
"""
特征提取层
"""
with tf.name_scope("input" + str(level)):
"""
input layer
"""
conv_input = layer.conv2d(low_res_input, filters, filters, channel, level, weight_dacay=wd, name='conv_input')
lrelu_input = layer.leaky_relu(conv_input, leak=-0.2, name='lrelu_input')
with tf.name_scope("deep_cnn" + str(level)):
"""
cnn s ,depth is given by options
"""
last_lrelu = lrelu_input
for i in range(depth):
conv_cnn = layer.conv2d(last_lrelu, filters, filters, channel, level, weight_dacay=wd, isBias=False,
name="block_conv_" + str(i + 1))
lrelu_cnn = layer.leaky_relu(conv_cnn, -0.2, name="block_lrelu_" + str(i + 1))
last_lrelu = lrelu_cnn
with tf.name_scope("up_sampling" + str(level)):
"""
up_sampling layer
"""
deconv_up = layer.deconv2d(last_lrelu, bilinear_upsample_weights(filters_tranpose, channel), [2, 2], level,
weight_dacay=wd, name="up_sampling")
up_samping_output = layer.leaky_relu(deconv_up, -0.2, name="up_samping_output")
return up_samping_output
def imageReconstruction(low_res_input, conv_up, level):
filters_tranpose = arg.options.conv_ft
channel = arg.options.output_channel
filters = arg.options.conv_f
wd = arg.options.weight_decay
"""
图像重构层
"""
with tf.name_scope("Reconstruction" + str(level)):
"""
image reconstruction
"""
deconv_image = layer.deconv2d(low_res_input, bilinear_upsample_weights(filters_tranpose, channel), [2, 2],
level,
weight_dacay=wd, name="deconv_image")
conv_res = layer.conv2d(conv_up, filters, filters, channel, level, isBias=False, name="conv_res")
HR = deconv_image + conv_res
return HR
def get_LasSRN(low_res_input):
"""
获得2x 4x 8x 三种预测结果
Args:
low_res_input:
options:
Returns:
"""
convt_F1 = featureExtraction(low_res_input, 1)
HR_2 = imageReconstruction(low_res_input, convt_F1, 1)
convt_F2 = featureExtraction(convt_F1, 2)
HR_4 = imageReconstruction(HR_2, convt_F2, 2)
convt_F3 = featureExtraction(convt_F2, 3)
HR_8 = imageReconstruction(HR_4, convt_F3, 3)
return HR_2, HR_4, HR_8
def L1_Charbonnier_loss(predict, real):
"""
损失函数
Args:
predict: 预测结果
real: 真实结果
Returns:
损失代价
"""
eps = 1e-6
diff = tf.add(predict, -real)
error = tf.sqrt(diff * diff + eps)
loss = tf.reduce_mean(error)
return loss
def weight_decay_losses():
"""
Returns: 返回损失权重的值
"""
ll = tf.abs(tf.add_n(tf.get_collection('weight_losses'), name='total_loss'))
return ll
def get_bicubic(low_res_input):
image_height = arg.options.height
image_width = arg.options.width
HR_2 = tf.image.resize_images(low_res_input, [int(image_height / 4), int(image_width / 4)],
method=tf.image.ResizeMethod.BICUBIC)
HR_4 = tf.image.resize_images(low_res_input, [int(image_height / 2), int(image_width / 2)],
method=tf.image.ResizeMethod.BICUBIC)
HR_8 = tf.image.resize_images(low_res_input, [int(image_height ), int(image_width)],
method=tf.image.ResizeMethod.BICUBIC)
return HR_2,HR_4,HR_8
if __name__ == '__main__':
print(bilinear_upsample_weights(64,1))
print(7//2)
| en | 0.457048 | # --utf8--# Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size. Create weights matrix for transposed convolution with bilinear filter initialization. 特征提取层 input layer cnn s ,depth is given by options up_sampling layer 图像重构层 image reconstruction 获得2x 4x 8x 三种预测结果 Args: low_res_input: options: Returns: 损失函数 Args: predict: 预测结果 real: 真实结果 Returns: 损失代价 Returns: 返回损失权重的值 | 2.780053 | 3 |
driver/limits.py | nitinkaveriappa/downward | 4 | 6620529 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from . import util
import math
import re
try:
import resource
except ImportError:
resource = None
import sys
RESOURCE_MODULE_MISSING_MSG = (
"The 'resource' module is not available on your platform. "
"Therefore, setting time or memory limits, and running "
"portfolios is not possible.")
def can_set_limits():
return resource is not None
def _set_limit(kind, soft, hard=None):
if hard is None:
hard = soft
try:
resource.setrlimit(kind, (soft, hard))
except (OSError, ValueError) as err:
print(
"Limit for {} could not be set to ({},{}) ({}). "
"Previous limit: {}".format(
kind, soft, hard, err, resource.getrlimit(kind)),
file=sys.stderr)
def _get_soft_and_hard_time_limits(internal_limit, external_hard_limit):
soft_limit = min(int(math.ceil(internal_limit)), external_hard_limit)
hard_limit = min(soft_limit + 1, external_hard_limit)
print("time limit %.2f -> (%d, %d)" %
(internal_limit, soft_limit, hard_limit))
sys.stdout.flush()
assert soft_limit <= hard_limit
return soft_limit, hard_limit
def set_time_limit(time_limit):
if time_limit is None:
return
assert can_set_limits()
# Don't try to raise the hard limit.
_, external_hard_limit = resource.getrlimit(resource.RLIMIT_CPU)
if external_hard_limit == resource.RLIM_INFINITY:
external_hard_limit = float("inf")
assert time_limit <= external_hard_limit, (time_limit, external_hard_limit)
# Soft limit reached --> SIGXCPU.
# Hard limit reached --> SIGKILL.
soft_limit, hard_limit = _get_soft_and_hard_time_limits(
time_limit, external_hard_limit)
_set_limit(resource.RLIMIT_CPU, soft_limit, hard_limit)
def set_memory_limit(memory):
"""*memory* must be given in bytes or None."""
if memory is None:
return
assert can_set_limits()
_set_limit(resource.RLIMIT_AS, memory)
def convert_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
def _get_external_limit(kind):
if not can_set_limits():
return None
# Limits are either positive values or -1 (RLIM_INFINITY).
soft, hard = resource.getrlimit(kind)
if soft != resource.RLIM_INFINITY:
return soft
elif hard != resource.RLIM_INFINITY:
return hard
else:
return None
def _get_external_time_limit():
"""Return external soft CPU limit in seconds or None if not set."""
if not can_set_limits():
return None
return _get_external_limit(resource.RLIMIT_CPU)
def _get_external_memory_limit():
"""Return external soft memory limit in bytes or None if not set."""
if not can_set_limits():
return None
return _get_external_limit(resource.RLIMIT_AS)
def _get_time_limit_in_seconds(limit, parser):
match = re.match(r"^(\d+)(s|m|h)?$", limit, flags=re.I)
if not match:
parser.error("malformed time limit parameter: {}".format(limit))
time = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "m":
time *= 60
elif suffix == "h":
time *= 3600
return time
def _get_memory_limit_in_bytes(limit, parser):
match = re.match(r"^(\d+)(k|m|g)?$", limit, flags=re.I)
if not match:
parser.error("malformed memory limit parameter: {}".format(limit))
memory = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "k":
memory *= 1024
elif suffix is None or suffix == "m":
memory *= 1024 * 1024
elif suffix == "g":
memory *= 1024 * 1024 * 1024
return memory
def set_time_limit_in_seconds(parser, args, component):
param = component + "_time_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_time_limit_in_seconds(limit, parser))
def set_memory_limit_in_bytes(parser, args, component):
param = component + "_memory_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_memory_limit_in_bytes(limit, parser))
def get_memory_limit(component_limit, overall_limit):
"""
Return the lowest of the following memory limits:
component, overall, external soft, external hard.
"""
limits = [component_limit, overall_limit, _get_external_memory_limit()]
limits = [limit for limit in limits if limit is not None]
return min(limits) if limits else None
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by any internal and external limit.
"""
if can_set_limits():
elapsed_time = util.get_elapsed_time()
external_limit = _get_external_time_limit()
limits = []
if component_limit is not None:
limits.append(component_limit)
if overall_limit is not None:
limits.append(max(0, overall_limit - elapsed_time))
if external_limit is not None:
limits.append(max(0, external_limit - elapsed_time))
return min(limits) if limits else None
elif component_limit is None and overall_limit is None:
return None
else:
sys.exit(RESOURCE_MODULE_MISSING_MSG)
| # -*- coding: utf-8 -*-
from __future__ import division, print_function
from . import util
import math
import re
try:
import resource
except ImportError:
resource = None
import sys
RESOURCE_MODULE_MISSING_MSG = (
"The 'resource' module is not available on your platform. "
"Therefore, setting time or memory limits, and running "
"portfolios is not possible.")
def can_set_limits():
return resource is not None
def _set_limit(kind, soft, hard=None):
if hard is None:
hard = soft
try:
resource.setrlimit(kind, (soft, hard))
except (OSError, ValueError) as err:
print(
"Limit for {} could not be set to ({},{}) ({}). "
"Previous limit: {}".format(
kind, soft, hard, err, resource.getrlimit(kind)),
file=sys.stderr)
def _get_soft_and_hard_time_limits(internal_limit, external_hard_limit):
soft_limit = min(int(math.ceil(internal_limit)), external_hard_limit)
hard_limit = min(soft_limit + 1, external_hard_limit)
print("time limit %.2f -> (%d, %d)" %
(internal_limit, soft_limit, hard_limit))
sys.stdout.flush()
assert soft_limit <= hard_limit
return soft_limit, hard_limit
def set_time_limit(time_limit):
if time_limit is None:
return
assert can_set_limits()
# Don't try to raise the hard limit.
_, external_hard_limit = resource.getrlimit(resource.RLIMIT_CPU)
if external_hard_limit == resource.RLIM_INFINITY:
external_hard_limit = float("inf")
assert time_limit <= external_hard_limit, (time_limit, external_hard_limit)
# Soft limit reached --> SIGXCPU.
# Hard limit reached --> SIGKILL.
soft_limit, hard_limit = _get_soft_and_hard_time_limits(
time_limit, external_hard_limit)
_set_limit(resource.RLIMIT_CPU, soft_limit, hard_limit)
def set_memory_limit(memory):
"""*memory* must be given in bytes or None."""
if memory is None:
return
assert can_set_limits()
_set_limit(resource.RLIMIT_AS, memory)
def convert_to_mb(num_bytes):
return num_bytes / (1024 * 1024)
def _get_external_limit(kind):
if not can_set_limits():
return None
# Limits are either positive values or -1 (RLIM_INFINITY).
soft, hard = resource.getrlimit(kind)
if soft != resource.RLIM_INFINITY:
return soft
elif hard != resource.RLIM_INFINITY:
return hard
else:
return None
def _get_external_time_limit():
"""Return external soft CPU limit in seconds or None if not set."""
if not can_set_limits():
return None
return _get_external_limit(resource.RLIMIT_CPU)
def _get_external_memory_limit():
"""Return external soft memory limit in bytes or None if not set."""
if not can_set_limits():
return None
return _get_external_limit(resource.RLIMIT_AS)
def _get_time_limit_in_seconds(limit, parser):
match = re.match(r"^(\d+)(s|m|h)?$", limit, flags=re.I)
if not match:
parser.error("malformed time limit parameter: {}".format(limit))
time = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "m":
time *= 60
elif suffix == "h":
time *= 3600
return time
def _get_memory_limit_in_bytes(limit, parser):
match = re.match(r"^(\d+)(k|m|g)?$", limit, flags=re.I)
if not match:
parser.error("malformed memory limit parameter: {}".format(limit))
memory = int(match.group(1))
suffix = match.group(2)
if suffix is not None:
suffix = suffix.lower()
if suffix == "k":
memory *= 1024
elif suffix is None or suffix == "m":
memory *= 1024 * 1024
elif suffix == "g":
memory *= 1024 * 1024 * 1024
return memory
def set_time_limit_in_seconds(parser, args, component):
param = component + "_time_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_time_limit_in_seconds(limit, parser))
def set_memory_limit_in_bytes(parser, args, component):
param = component + "_memory_limit"
limit = getattr(args, param)
if limit is not None:
setattr(args, param, _get_memory_limit_in_bytes(limit, parser))
def get_memory_limit(component_limit, overall_limit):
"""
Return the lowest of the following memory limits:
component, overall, external soft, external hard.
"""
limits = [component_limit, overall_limit, _get_external_memory_limit()]
limits = [limit for limit in limits if limit is not None]
return min(limits) if limits else None
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by any internal and external limit.
"""
if can_set_limits():
elapsed_time = util.get_elapsed_time()
external_limit = _get_external_time_limit()
limits = []
if component_limit is not None:
limits.append(component_limit)
if overall_limit is not None:
limits.append(max(0, overall_limit - elapsed_time))
if external_limit is not None:
limits.append(max(0, external_limit - elapsed_time))
return min(limits) if limits else None
elif component_limit is None and overall_limit is None:
return None
else:
sys.exit(RESOURCE_MODULE_MISSING_MSG)
| en | 0.741049 | # -*- coding: utf-8 -*- # Don't try to raise the hard limit. # Soft limit reached --> SIGXCPU. # Hard limit reached --> SIGKILL. *memory* must be given in bytes or None. # Limits are either positive values or -1 (RLIM_INFINITY). Return external soft CPU limit in seconds or None if not set. Return external soft memory limit in bytes or None if not set. Return the lowest of the following memory limits: component, overall, external soft, external hard. Return the minimum time limit imposed by any internal and external limit. | 2.506229 | 3 |
src/Schedulers/BillsHistoryScheduler.py | andreisalvador/bills-management-telegram-bot | 0 | 6620530 | <reponame>andreisalvador/bills-management-telegram-bot
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from dateutil.relativedelta import relativedelta
from sqlalchemy import extract
from sqlalchemy.orm import load_only, Load
from src.Data.Database import session, Bill, BillHistory
from src.Enums.PeriodEnum import PeriodEnum
from src.Utils.BillsUtils import create_new_bill_history_adding_months
def create_bills_history_for_monthly_bills():
today = datetime.datetime.now()
next_date = today + relativedelta(months=1)
month = next_date.month
year = next_date.year
subquery = session.query(BillHistory.bill_id).options(load_only("bill_id")) \
.filter(extract('month', BillHistory.expiration_date) == month,
extract('year', BillHistory.expiration_date) == year)
monthly_bills = session.query(Bill).join(BillHistory, Bill.id == BillHistory.bill_id) \
.options(Load(Bill).load_only(Bill.id, Bill.expiration_day)) \
.filter(Bill.expiration_period == PeriodEnum.Monthly, Bill.id.not_in(subquery)).all()
bills_histories = [create_new_bill_history_adding_months(bill.id, bill.expiration_day, 1) for
bill
in monthly_bills]
session.bulk_save_objects(bills_histories)
session.commit()
class BillsHistoryScheduler:
def __init__(self):
self.__scheduler = BlockingScheduler()
def start_jobs(self):
self.__scheduler.add_job(create_bills_history_for_monthly_bills, trigger='cron', year='*', month='*',
day='last')
self.__scheduler.start()
| import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from dateutil.relativedelta import relativedelta
from sqlalchemy import extract
from sqlalchemy.orm import load_only, Load
from src.Data.Database import session, Bill, BillHistory
from src.Enums.PeriodEnum import PeriodEnum
from src.Utils.BillsUtils import create_new_bill_history_adding_months
def create_bills_history_for_monthly_bills():
today = datetime.datetime.now()
next_date = today + relativedelta(months=1)
month = next_date.month
year = next_date.year
subquery = session.query(BillHistory.bill_id).options(load_only("bill_id")) \
.filter(extract('month', BillHistory.expiration_date) == month,
extract('year', BillHistory.expiration_date) == year)
monthly_bills = session.query(Bill).join(BillHistory, Bill.id == BillHistory.bill_id) \
.options(Load(Bill).load_only(Bill.id, Bill.expiration_day)) \
.filter(Bill.expiration_period == PeriodEnum.Monthly, Bill.id.not_in(subquery)).all()
bills_histories = [create_new_bill_history_adding_months(bill.id, bill.expiration_day, 1) for
bill
in monthly_bills]
session.bulk_save_objects(bills_histories)
session.commit()
class BillsHistoryScheduler:
def __init__(self):
self.__scheduler = BlockingScheduler()
def start_jobs(self):
self.__scheduler.add_job(create_bills_history_for_monthly_bills, trigger='cron', year='*', month='*',
day='last')
self.__scheduler.start() | none | 1 | 2.347321 | 2 | |
finetune/errors.py | eric-erki/finetune | 0 | 6620531 | <filename>finetune/errors.py
"""
Placeholder for custom errors
""" | <filename>finetune/errors.py
"""
Placeholder for custom errors
""" | en | 0.537864 | Placeholder for custom errors | 1.182051 | 1 |
tests/test_backends.py | dropseed/django-oauth-login | 6 | 6620532 | import pytest
from django.contrib.auth.models import AnonymousUser
from oauthlogin.providers import OAuthProvider, OAuthToken, OAuthUser
class DummyProvider(OAuthProvider):
def get_oauth_token(self, *, code, request):
return OAuthToken(
access_token="dummy_token",
)
def get_oauth_user(self, *, oauth_token):
return OAuthUser(
id="dummy_user_id",
username="dummy_username",
email="<EMAIL>",
)
def check_request_state(self, *, request):
"""Don't check the state"""
return
@pytest.mark.django_db
def test_single_backend(client, settings):
settings.OAUTH_LOGIN_PROVIDERS = {
"dummy": {
"class": "test_backends.DummyProvider",
"kwargs": {
"client_id": "dummy_client_id",
"client_secret": "dummy_client_secret",
"scope": "dummy_scope",
},
}
}
settings.AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
]
response = client.get("/oauth/dummy/callback/?code=test_code&state=dummy_state")
assert response.status_code == 302
assert response.url == "/"
# Now logged in
response = client.get("/")
assert response.context["user"].is_authenticated
@pytest.mark.django_db
def test_multiple_backends(client, settings):
settings.OAUTH_LOGIN_PROVIDERS = {
"dummy": {
"class": "test_backends.DummyProvider",
"kwargs": {
"client_id": "dummy_client_id",
"client_secret": "dummy_client_secret",
"scope": "dummy_scope",
},
}
}
settings.AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"django.contrib.auth.backends.ModelBackend",
]
response = client.get("/oauth/dummy/callback/?code=test_code&state=dummy_state")
assert response.status_code == 302
assert response.url == "/"
# Now logged in
response = client.get("/")
assert response.context["user"].is_authenticated
| import pytest
from django.contrib.auth.models import AnonymousUser
from oauthlogin.providers import OAuthProvider, OAuthToken, OAuthUser
class DummyProvider(OAuthProvider):
def get_oauth_token(self, *, code, request):
return OAuthToken(
access_token="dummy_token",
)
def get_oauth_user(self, *, oauth_token):
return OAuthUser(
id="dummy_user_id",
username="dummy_username",
email="<EMAIL>",
)
def check_request_state(self, *, request):
"""Don't check the state"""
return
@pytest.mark.django_db
def test_single_backend(client, settings):
settings.OAUTH_LOGIN_PROVIDERS = {
"dummy": {
"class": "test_backends.DummyProvider",
"kwargs": {
"client_id": "dummy_client_id",
"client_secret": "dummy_client_secret",
"scope": "dummy_scope",
},
}
}
settings.AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
]
response = client.get("/oauth/dummy/callback/?code=test_code&state=dummy_state")
assert response.status_code == 302
assert response.url == "/"
# Now logged in
response = client.get("/")
assert response.context["user"].is_authenticated
@pytest.mark.django_db
def test_multiple_backends(client, settings):
settings.OAUTH_LOGIN_PROVIDERS = {
"dummy": {
"class": "test_backends.DummyProvider",
"kwargs": {
"client_id": "dummy_client_id",
"client_secret": "dummy_client_secret",
"scope": "dummy_scope",
},
}
}
settings.AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"django.contrib.auth.backends.ModelBackend",
]
response = client.get("/oauth/dummy/callback/?code=test_code&state=dummy_state")
assert response.status_code == 302
assert response.url == "/"
# Now logged in
response = client.get("/")
assert response.context["user"].is_authenticated
| en | 0.828594 | Don't check the state # Now logged in # Now logged in | 2.238014 | 2 |
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/data/getters.py | amakropoulos/structural-pipeline-measures | 2 | 6620533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# @Author: oesteban
# @Date: 2016-01-05 11:29:40
# @Email: <EMAIL>
# @Last modified by: oesteban
# @Last Modified time: 2016-04-06 16:33:35
"""
Data grabbers
"""
from .utils import _get_dataset_dir, _fetch_file
def get_brainweb_1mm_normal(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the BIDS-fied brainweb 1mm normal
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "https://googledrive.com/host/0BxI12kyv2olZZkhrUzZLbExKRzQ"
dataset_name = 'brainweb'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose,
md5sum='384263fbeadc8e2cca92ced98f224c4b'):
return data_dir
else:
return None
def get_ds003_downsampled(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the BIDS-fied ds003_downsampled
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "https://googledrive.com/host/0B2JWN60ZLkgkMEw4bW5VUUpSdFU/ds003_downsampled.tar"
dataset_name = 'ds003_downsampled'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose):
return data_dir
else:
return None
def get_mni_template(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the necessary files from the mni template
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "http://googledrive.com/host/0BxI12kyv2olZdzRDUnBPYWZGZk0"
dataset_name = 'mni_template'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose,
md5sum='debfa882b8c301cd6d75dd769e73f727'):
return data_dir
else:
return None
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# @Author: oesteban
# @Date: 2016-01-05 11:29:40
# @Email: <EMAIL>
# @Last modified by: oesteban
# @Last Modified time: 2016-04-06 16:33:35
"""
Data grabbers
"""
from .utils import _get_dataset_dir, _fetch_file
def get_brainweb_1mm_normal(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the BIDS-fied brainweb 1mm normal
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "https://googledrive.com/host/0BxI12kyv2olZZkhrUzZLbExKRzQ"
dataset_name = 'brainweb'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose,
md5sum='384263fbeadc8e2cca92ced98f224c4b'):
return data_dir
else:
return None
def get_ds003_downsampled(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the BIDS-fied ds003_downsampled
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "https://googledrive.com/host/0B2JWN60ZLkgkMEw4bW5VUUpSdFU/ds003_downsampled.tar"
dataset_name = 'ds003_downsampled'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose):
return data_dir
else:
return None
def get_mni_template(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the necessary files from the mni template
:param str data_dir: path of the data directory. Used to force data storage
in a non-standard location.
:param str url: download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = "http://googledrive.com/host/0BxI12kyv2olZdzRDUnBPYWZGZk0"
dataset_name = 'mni_template'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose,
md5sum='debfa882b8c301cd6d75dd769e73f727'):
return data_dir
else:
return None
| en | 0.507753 | #!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # # @Author: oesteban # @Date: 2016-01-05 11:29:40 # @Email: <EMAIL> # @Last modified by: oesteban # @Last Modified time: 2016-04-06 16:33:35 Data grabbers Download and load the BIDS-fied brainweb 1mm normal :param str data_dir: path of the data directory. Used to force data storage in a non-standard location. :param str url: download URL of the dataset. Overwrite the default URL. Download and load the BIDS-fied ds003_downsampled :param str data_dir: path of the data directory. Used to force data storage in a non-standard location. :param str url: download URL of the dataset. Overwrite the default URL. Download and load the necessary files from the mni template :param str data_dir: path of the data directory. Used to force data storage in a non-standard location. :param str url: download URL of the dataset. Overwrite the default URL. | 2.254863 | 2 |
core/exceptions.py | nielsvm/toggle-desktop | 1 | 6620534 | <reponame>nielsvm/toggle-desktop<filename>core/exceptions.py
class FileResourceNotFound(Exception):
def __init__(self, needle, haystack=None):
if haystack is None:
self.msg = "Unable to find: %s" % needle
else:
self.msg = "Unable to find: %s\n\nIn any of these locations:\n - %s" % (needle, "\n - ".join(haystack))
def __str__(self):
return self.msg
class MissingDependency(Exception):
pass
class RuleActionException(Exception):
"""Base class for exceptions related to rule action loading and execution."""
def __init__(self, id, msg):
self.id = id
self.msg = msg
def __str__(self):
return "While loading or executing rule action '%s':\n\n%s." % (self.id, self.msg)
class RuleParseException(RuleActionException):
"""Base class for exceptions related to rule parsing and loading."""
def __str__(self):
return "parse error: %s." % self.msg
class RuleActionsDoesNotExist(RuleParseException):
def __init__(self, id):
self.id = id
self.msg = "Rule action '%s' does not exist!" % id
class RuleMissingArguments(RuleParseException):
def __init__(self, id, needs, given):
self.id = id
self.msg = "Rule action '%s' needs %d arguments, %d given!" % (id, needs, given)
class RuntimeException(Exception):
"""General runtime exceptions blocking further code execution."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "RUNTIME ERROR: %s." % self.msg
| class FileResourceNotFound(Exception):
def __init__(self, needle, haystack=None):
if haystack is None:
self.msg = "Unable to find: %s" % needle
else:
self.msg = "Unable to find: %s\n\nIn any of these locations:\n - %s" % (needle, "\n - ".join(haystack))
def __str__(self):
return self.msg
class MissingDependency(Exception):
pass
class RuleActionException(Exception):
"""Base class for exceptions related to rule action loading and execution."""
def __init__(self, id, msg):
self.id = id
self.msg = msg
def __str__(self):
return "While loading or executing rule action '%s':\n\n%s." % (self.id, self.msg)
class RuleParseException(RuleActionException):
"""Base class for exceptions related to rule parsing and loading."""
def __str__(self):
return "parse error: %s." % self.msg
class RuleActionsDoesNotExist(RuleParseException):
def __init__(self, id):
self.id = id
self.msg = "Rule action '%s' does not exist!" % id
class RuleMissingArguments(RuleParseException):
def __init__(self, id, needs, given):
self.id = id
self.msg = "Rule action '%s' needs %d arguments, %d given!" % (id, needs, given)
class RuntimeException(Exception):
"""General runtime exceptions blocking further code execution."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "RUNTIME ERROR: %s." % self.msg | en | 0.883762 | Base class for exceptions related to rule action loading and execution. Base class for exceptions related to rule parsing and loading. General runtime exceptions blocking further code execution. | 2.760184 | 3 |
app/settings.py | capralifecycle/office-games-client | 0 | 6620535 | <filename>app/settings.py
import os
# Amount of seconds before a player can win, this functions as a buffer, so that nobody wins by "accident"
# Used by register_card() in office_game.py
GAME_START_TIME_BUFFER = int(os.environ.get('OG_GAME_START_TIME_BUFFER', 10))
# Amount of seconds before a new card registration times out
GAME_CARD_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_CARD_REGISTRATION_TIMEOUT', 60 * 60))
# Amount of seconds before another player has to register their card to start a new game
GAME_PLAYER_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_PLAYER_REGISTRATION_TIMEOUT', 30))
# Amount of seconds before a game session runs out (in the case when players forget to register a winner)
GAME_SESSION_TIME = int(os.environ.get('OG_GAME_SESSION_TIME', 15 * 60))
# Firebase details
FIREBASE_API_KEY = os.environ.get('OG_FIREBASE_API_KEY', None)
FIREBASE_DATABASE_URL = os.environ.get('OG_FIREBASE_DATABASE_URL', None)
FIREBASE_STORAGE_BUCKET = os.environ.get('OG_FIREBASE_STORAGE_BUCKET', None)
FIREBASE_AUTH_DOMAIN = os.environ.get('OG_FIREBASE_AUTH_DOMAIN', None)
FIREBASE_TYPE = os.environ.get('OG_FIREBASE_TYPE', 'service_account')
FIREBASE_PROJECT_ID = os.environ.get('OG_FIREBASE_PROJECT_ID', None)
FIREBASE_PRIVATE_KEY_ID = os.environ.get('OG_FIREBASE_PRIVATE_KEY_ID', None)
FIREBASE_PRIVATE_KEY = os.environ.get('OG_FIREBASE_PRIVATE_KEY', None)
FIREBASE_CLIENT_EMAIL = os.environ.get('OG_FIREBASE_CLIENT_EMAIL', None)
FIREBASE_CLIENT_ID = os.environ.get('OG_FIREBASE_CLIENT_ID', None)
FIREBASE_AUTH_URI = os.environ.get('OG_FIREBASE_AUTH_URI', 'https://accounts.google.com/o/oauth2/auth')
FIREBASE_TOKEN_URI = os.environ.get('OG_FIREBASE_TOKEN_URI', 'https://accounts.google.com/o/oauth2/token')
FIREBASE_AUTH_PROVIDER_X509_CERT_URL = os.environ.get(
'OG_FIREBASE_AUTH_PROVIDER_X509_CERT_URL',
'https://www.googleapis.com/oauth2/v1/certs'
)
FIREBASE_CLIENT_X509_CERT_URL = os.environ.get('OG_FIREBASE_CLIENT_X509_CERT_URL', None)
# Reader details
READER_VENDOR_ID = os.environ.get('OG_READER_VENDOR_ID', '0xffff')
READER_PRODUCT_ID = os.environ.get('OG_READER_PRODUCT_ID', '0x0035')
# Sentry details
SENTRY_DSN = os.environ.get('OG_SENTRY_DSN', None)
# Slack details
# Notify Slack regarding game events?
SLACK_MESSAGES_ENABLED = os.environ.get('OG_SLACK_MESSAGES_ENABLED', True)
SLACK_TOKEN = os.environ.get('OG_SLACK_TOKEN', None)
SLACK_DEV_CHANNEL = os.environ.get('OG_SLACK_DEV_CHANNEL', '#kontorspill_dev')
SLACK_CHANNEL = os.environ.get('OG_SLACK_CHANNEL', '#kontorspill')
SLACK_USERNAME = os.environ.get('OG_SLACK_USERNAME', 'Kontor Spill')
SLACK_AVATAR_URL = os.environ.get('OG_SLACK_AVATAR_URL', None)
SLACK_DEFAULT_USER_AVATAR_URL = os.environ.get('OG_SLACK_DEFAULT_USER_AVATAR_URL', 'https://capralifecycle.github.io/office-games-viewer/capra.png')
SLACK_SYNC_INTERVAL = os.environ.get('OG_SLACK_SYNC_INTERVAL', 3600)
| <filename>app/settings.py
import os
# Amount of seconds before a player can win, this functions as a buffer, so that nobody wins by "accident"
# Used by register_card() in office_game.py
GAME_START_TIME_BUFFER = int(os.environ.get('OG_GAME_START_TIME_BUFFER', 10))
# Amount of seconds before a new card registration times out
GAME_CARD_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_CARD_REGISTRATION_TIMEOUT', 60 * 60))
# Amount of seconds before another player has to register their card to start a new game
GAME_PLAYER_REGISTRATION_TIMEOUT = int(os.environ.get('OG_GAME_PLAYER_REGISTRATION_TIMEOUT', 30))
# Amount of seconds before a game session runs out (in the case when players forget to register a winner)
GAME_SESSION_TIME = int(os.environ.get('OG_GAME_SESSION_TIME', 15 * 60))
# Firebase details
FIREBASE_API_KEY = os.environ.get('OG_FIREBASE_API_KEY', None)
FIREBASE_DATABASE_URL = os.environ.get('OG_FIREBASE_DATABASE_URL', None)
FIREBASE_STORAGE_BUCKET = os.environ.get('OG_FIREBASE_STORAGE_BUCKET', None)
FIREBASE_AUTH_DOMAIN = os.environ.get('OG_FIREBASE_AUTH_DOMAIN', None)
FIREBASE_TYPE = os.environ.get('OG_FIREBASE_TYPE', 'service_account')
FIREBASE_PROJECT_ID = os.environ.get('OG_FIREBASE_PROJECT_ID', None)
FIREBASE_PRIVATE_KEY_ID = os.environ.get('OG_FIREBASE_PRIVATE_KEY_ID', None)
FIREBASE_PRIVATE_KEY = os.environ.get('OG_FIREBASE_PRIVATE_KEY', None)
FIREBASE_CLIENT_EMAIL = os.environ.get('OG_FIREBASE_CLIENT_EMAIL', None)
FIREBASE_CLIENT_ID = os.environ.get('OG_FIREBASE_CLIENT_ID', None)
FIREBASE_AUTH_URI = os.environ.get('OG_FIREBASE_AUTH_URI', 'https://accounts.google.com/o/oauth2/auth')
FIREBASE_TOKEN_URI = os.environ.get('OG_FIREBASE_TOKEN_URI', 'https://accounts.google.com/o/oauth2/token')
FIREBASE_AUTH_PROVIDER_X509_CERT_URL = os.environ.get(
'OG_FIREBASE_AUTH_PROVIDER_X509_CERT_URL',
'https://www.googleapis.com/oauth2/v1/certs'
)
FIREBASE_CLIENT_X509_CERT_URL = os.environ.get('OG_FIREBASE_CLIENT_X509_CERT_URL', None)
# Reader details
READER_VENDOR_ID = os.environ.get('OG_READER_VENDOR_ID', '0xffff')
READER_PRODUCT_ID = os.environ.get('OG_READER_PRODUCT_ID', '0x0035')
# Sentry details
SENTRY_DSN = os.environ.get('OG_SENTRY_DSN', None)
# Slack details
# Notify Slack regarding game events?
SLACK_MESSAGES_ENABLED = os.environ.get('OG_SLACK_MESSAGES_ENABLED', True)
SLACK_TOKEN = os.environ.get('OG_SLACK_TOKEN', None)
SLACK_DEV_CHANNEL = os.environ.get('OG_SLACK_DEV_CHANNEL', '#kontorspill_dev')
SLACK_CHANNEL = os.environ.get('OG_SLACK_CHANNEL', '#kontorspill')
SLACK_USERNAME = os.environ.get('OG_SLACK_USERNAME', 'Kontor Spill')
SLACK_AVATAR_URL = os.environ.get('OG_SLACK_AVATAR_URL', None)
SLACK_DEFAULT_USER_AVATAR_URL = os.environ.get('OG_SLACK_DEFAULT_USER_AVATAR_URL', 'https://capralifecycle.github.io/office-games-viewer/capra.png')
SLACK_SYNC_INTERVAL = os.environ.get('OG_SLACK_SYNC_INTERVAL', 3600)
| en | 0.93812 | # Amount of seconds before a player can win, this functions as a buffer, so that nobody wins by "accident" # Used by register_card() in office_game.py # Amount of seconds before a new card registration times out # Amount of seconds before another player has to register their card to start a new game # Amount of seconds before a game session runs out (in the case when players forget to register a winner) # Firebase details # Reader details # Sentry details # Slack details # Notify Slack regarding game events? | 2.482172 | 2 |
Locomotion System/leg.py | aayush-ag21/Cronus | 0 | 6620536 | #This code is for one leg, i.e Two servos
import time
import pigpio
pi= pigpio.pi()
RFU=4
RFL=17
pi.set_mode(RFU, pigpio.OUTPUT)
pi.set_mode(RFL, pigpio.OUTPUT)
pi.write(RFU,0)
pi.write(RFL,0)
def servo(pin1,pin2,degree) :
degree=min(max(10*(int((degree*2000/180 + 500)/10)),500),2500)
while(pi.get_servo_pulsewidth(pin1)>degree):
pi.set_servo_pulsewidth(pin1,pi.get_servo_pulsewidth(pin)-10)
time.sleep(0.01)
while(pi.get_servo_pulsewidth(pin1)<degree):
pi.set_servo_pulsewidth(pin1,pi.get_servo_pulsewidth(pin)+10)
time.sleep(0.01)
def height(h, tx, ty) :
servo(RFU, RFL,165- (h+tx+ty))
value = [0,0,0]
while 1:
para = input('Enter parameter\n').split(' ')
parameters = ['H','TX','TY']
for item in para:
if item in parameters:
try:
value[parameters.index(item)]=int(para[para.index(item)+1])
except:
print("Error")
print(*value)
height(value[0], value[1], value[2])
#Y is front ,X is side
#X for X axis locomotion,Y for Y axis locomotion,R for rotate,H is chassis height,TX for tilt in X,TY for tilt in Y
| #This code is for one leg, i.e Two servos
import time
import pigpio
pi= pigpio.pi()
RFU=4
RFL=17
pi.set_mode(RFU, pigpio.OUTPUT)
pi.set_mode(RFL, pigpio.OUTPUT)
pi.write(RFU,0)
pi.write(RFL,0)
def servo(pin1,pin2,degree) :
degree=min(max(10*(int((degree*2000/180 + 500)/10)),500),2500)
while(pi.get_servo_pulsewidth(pin1)>degree):
pi.set_servo_pulsewidth(pin1,pi.get_servo_pulsewidth(pin)-10)
time.sleep(0.01)
while(pi.get_servo_pulsewidth(pin1)<degree):
pi.set_servo_pulsewidth(pin1,pi.get_servo_pulsewidth(pin)+10)
time.sleep(0.01)
def height(h, tx, ty) :
servo(RFU, RFL,165- (h+tx+ty))
value = [0,0,0]
while 1:
para = input('Enter parameter\n').split(' ')
parameters = ['H','TX','TY']
for item in para:
if item in parameters:
try:
value[parameters.index(item)]=int(para[para.index(item)+1])
except:
print("Error")
print(*value)
height(value[0], value[1], value[2])
#Y is front ,X is side
#X for X axis locomotion,Y for Y axis locomotion,R for rotate,H is chassis height,TX for tilt in X,TY for tilt in Y
| en | 0.915913 | #This code is for one leg, i.e Two servos #Y is front ,X is side #X for X axis locomotion,Y for Y axis locomotion,R for rotate,H is chassis height,TX for tilt in X,TY for tilt in Y | 3.123009 | 3 |
main.py | ptrcarlos/email-sender | 0 | 6620537 | <gh_stars>0
import sqlite3
import smtplib
import os.path
import random
from tkinter.ttk import Treeview
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilenames
from PIL import ImageTk, Image
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
class EmailSender():
def __init__(self):
self.conn = sqlite3.connect('database.db')
self.cursor = self.conn.cursor()
self.id_emails_db = []
self.email_cod = ''
self.uniq_id = 0
self.attach = []
self.to_send_from_db = ''
self.cod_page = ''
self.create_db()
self.search_data_ids()
def create_db(self):
self.cursor.execute('CREATE TABLE IF NOT EXISTS data (id INTEGER, email TEXT NOT NULL, name TEXT)')
def search_data_ids(self):
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
self.id_emails_db.append(row[0])
def nav_menu(self, cod, app):
app.destroy()
if cod == 'main':
try:
self.to_send_from_db = ''
self.cod_page = ''
self.smtp.quit()
except:
pass
self.main_page()
elif cod == 'single':
self.single_page()
elif cod == 'login':
self.login_page()
elif cod == 'acess_db':
self.access_db_page()
def server_page_save(self, server_entry, app):
self.server = server_entry.get()
app.destroy()
def show_pass(self, pass_entry):
if pass_entry['show'] == '':
pass_entry['show'] = '•'
else:
pass_entry['show'] = ''
def centering_page(self, app, app_width, app_height):
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
x_cordinate = int((screen_width/2) - (app_width/2))
y_cordinate = int((screen_height/2) - (app_height/2))
app.geometry("{}x{}+{}+{}".format(app_width, app_height, x_cordinate, y_cordinate))
def attach_files(self, attach_entry):
files = askopenfilenames()
for path in files:
part = MIMEBase('application', "octet-stream")
with open(path, 'rb') as file:
part.set_payload(file.read())
encoders.encode_base64(part)
file_name = os.path.basename(path)
attach_entry.insert(END, f'{os.path.basename(file_name)}'+'; ')
part.add_header('Content-Disposition',
'attachment; filename="{}"'.format(file_name))
self.attach.append(part)
def login_auth(self, app, email_entry, pass_entry, cod):
self.login = email_entry.get()
self.password = <PASSWORD>_entry.get()
try:
if cod == 'gmail':
messagebox.showinfo('Login', 'Connecting...')
self.smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465)
self.smtp.login(self.login, self.password)
elif cod == 'exchange':
messagebox.showinfo('Login', 'Connecting...')
self.smtp = smtplib.SMTP(self.server)
self.smtp.starttls()
self.smtp.login(self.login, self.password)
except:
messagebox.showerror('Login', 'Login failed, try again!')
else:
self.nav_menu('single', app)
def send_email(self, app, to_entry, subject_text, content_text, attach_entry):
if to_entry.get() == '' or subject_text.get("1.0", END) == '':
messagebox.showerror('Error', 'No email address to send or empty subject!')
else:
to_send = to_entry.get()
to_send = to_send.split('; ')
for email in to_send:
if email != '':
self.msg = MIMEMultipart()
self.msg['From'] = self.login
self.msg['To'] = email
self.msg['Subject'] = subject_text.get("1.0", END)
self.msg.attach(MIMEText(content_text.get("1.0", END)))
for part in self.attach:
self.msg.attach(part)
try:
self.smtp.sendmail(self.login, self.msg['To'], self.msg.as_string())
except:
messagebox.showerror('Error', 'Cannot send the email!')
else:
if self.cod_page != 'from_db':
messagebox.showinfo('Info', 'Email sent to {} successfully!'.format(email))
self.attach.clear()
to_entry.delete(0, END)
attach_entry.delete(0, END)
subject_text.delete(1.0, END)
content_text.delete(1.0, END)
if self.cod_page == 'from_db':
messagebox.showinfo('Info', 'Emails sent successfully!')
app.destroy()
self.restart_nav_page()
def restart_nav_page(self):
app = Tk()
self.centering_page(app, 500, 300)
app.title('Nav Page')
app.resizable(False, False)
app['background'] = '#075fab'
app.iconbitmap('icons/001-restart.ico')
nav_label = Label(app, text='Navigation Page', font=('Terminal', 23), bg='#075fab', fg='White')
nav_label.place(relx=0.5, rely=0.15, anchor=CENTER)
back_main_btn = Button(app, text='Back to Main Page', font=('Roboto', 13), bg='Gray25',
fg='White', command=lambda: self.nav_menu('main', app))
back_main_btn.place(relx=0.5, rely=0.4, anchor=CENTER, width=160, height=50)
send_again_btn = Button(app, text='Send email again', font=('Roboto', 13), bg='Green',
fg='White', command=lambda: self.nav_menu('single', app))
send_again_btn.place(relx=0.5, rely=0.6, anchor=CENTER, width=160, height=50)
exit_btn = Button(app, text='Exit', font=('Roboto', 13), bg='Red', fg='White',
command=lambda: self.nav_menu('', app))
exit_btn.place(relx=0.5, rely=0.8, anchor=CENTER, width=160, height=50)
app.mainloop()
def server_page(self):
app = Toplevel()
self.centering_page(app, 400, 150)
app.title('Server')
app.iconbitmap('icons/002-server.ico')
app['background'] = '#075fab'
app.resizable(False, False)
server_label = Label(app, text='Exchange server:', font=('Roboto', 12), bg='#075fab', fg='White')
server_label.place(relx=0.18, rely=0.4, anchor=CENTER)
server_entry = Entry(app, font=('Roboto', 12), width=27)
server_entry.place(relx=0.65, rely=0.4, anchor=CENTER)
save_btn = Button(app, text='Save', font=('Roboto', 12), width=8, bg='Green', fg='White',
command=lambda: self.server_page_save(server_entry, app))
save_btn.place(relx=0.5, rely=0.7, anchor=CENTER)
app.mainloop()
def search_all_data_db(self, tree):
tree.delete(*tree.get_children())
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
tree.insert("" , "end", values=(f'{row[0]}', f'{row[1]}',f'{row[2]}'))
def fill_tree(self, tree):
rows = self.cursor.fetchall()
if not rows:
tree.delete(*tree.get_children())
messagebox.showerror('Error', "Didn't find data!")
else:
tree.delete(*tree.get_children())
for row in rows:
tree.insert("" , "end", values=(f'{row[0]}', f'{row[1]}',f'{row[2]}'))
def search_one_data_db(self, name_entry, email_entry, tree):
if name_entry.get() == '' and email_entry.get() == '':
tree.delete(*tree.get_children())
self.search_all_data_db(tree)
return
else:
if name_entry.get() != '': # name field not empty
self.cursor.execute('SELECT * FROM data WHERE name=?', [name_entry.get()])
self.fill_tree(tree)
else: # name field empty, so use email_entry
self.cursor.execute('SELECT * FROM data WHERE email=?', [email_entry.get()])
self.fill_tree(tree)
def search_data_db(self, cod, name_entry, email_entry, tree):
if cod == 'all':
self.search_all_data_db(tree)
elif cod == 'one':
self.search_one_data_db(name_entry, email_entry, tree)
def delete_data_db(self, email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
item = tree.focus()
try:
id_item = tree.item(item)['values'][0]
email_item = tree.item(item)['values'][1]
except:
pass
if messagebox.askquestion('Delete data', 'Are you sure you want to delete this data?') == 'yes':
self.cursor.execute('DELETE FROM data WHERE email=?', [email_item])
self.conn.commit()
self.id_emails_db.remove(id_item)
self.search_all_data_db(tree)
def insert_data_db(self, name_entry, email_entry, tree):
email = email_entry.get()
name = name_entry.get()
if name_entry.get() == '' or email_entry.get() == '':
messagebox.showerror('Error', 'Email or Name field empty!')
return
if self.email_cod == '' and self.uniq_id == 0:
self.uniq_id = random.randint(1, 1000001)
while True:
if self.uniq_id in self.id_emails_db:
self.uniq_id = random.randint(1, 1000001)
else:
self.id_emails_db.append(self.uniq_id)
break
try:
self.cursor.execute("INSERT INTO data (id, email, name) VALUES (?, ?, ?)", (self.uniq_id, email, name))
self.conn.commit()
except:
messagebox.showerror('Error', 'Impossible to insert on DB!')
return
elif self.email_cod == 'edit':
try:
self.cursor.execute("UPDATE data SET email=?, name=? WHERE id=?", (email, name, self.uniq_id))
self.conn.commit()
except:
messagebox.showerror('Error', 'Impossible to update on DB!')
return
messagebox.showinfo('Info', 'Data saved successfully!')
name_entry.delete(0, END)
email_entry.delete(0, END)
self.search_all_data_db(tree)
self.email_cod = ''
self.uniq_id = 0
def edit_data_db(self, name_entry, email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
self.email_cod = 'edit'
item = tree.focus()
self.uniq_id= tree.item(item)['values'][0]
email_item = tree.item(item)['values'][1]
name_item = tree.item(item)['values'][2]
email_entry.insert(END, email_item)
name_entry.insert(END, name_item)
def append_one_db(self, added_email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
added_email_entry.delete(0, END)
item = tree.focus()
email_item = tree.item(item)['values'][1]
added_email_entry.insert(END, email_item + '; ')
def append_all_db(self, added_email_entry, tree):
added_email_entry.delete(0, END)
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
added_email_entry.insert(END, row[1] + '; ')
def clear_attach_files(self, attach_entry):
self.attach.clear()
attach_entry.delete(0, END)
def send_email_from_db(self, added_email_entry, app):
self.cod_page = 'from_db'
if added_email_entry.get() == '':
messagebox.showerror('Error', 'No email address to send!')
return
self.to_send_from_db = added_email_entry.get()
app.destroy()
if self.login_page():
self.single_page()
def access_db_page(self):
app = Tk()
self.centering_page(app, 700, 550)
app.title('Database')
app.iconbitmap('icons/003-database.ico')
app['background'] = '#075fab'
app.resizable(False, False)
db_label = Label(app, text='Database', font=('Terminal', 25), bg='#075fab', fg='White')
db_label.place(relx=0.5, rely=0.1, anchor=CENTER)
email_label = Label(app, text='Email:', font=('Roboto', 12), bg='#075fab', fg='White')
email_label.place(relx=0.1, rely=0.25, anchor=CENTER)
email_entry = Entry(app, font=('Roboto', 13), width=45)
email_entry.place(relx=0.14, rely=0.25, anchor='w')
name_label = Label(app, text='Name:', font=('Roboto', 12), bg='#075fab', fg='White')
name_label.place(relx=0.1, rely=0.32, anchor=CENTER)
name_entry = Entry(app, font=('Roboto', 13), width=45)
name_entry.place(relx=0.14, rely=0.32, anchor='w')
search_image = Image.open('icons/004-search.ico').resize((20, 20), Image.ANTIALIAS)
search_image = ImageTk.PhotoImage(search_image)
search_btn = Button(app, image=search_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.search_data_db('one', name_entry, email_entry, tree))
search_btn.place(relx=0.77, rely=0.25, anchor='w')
edit_image = Image.open('icons/014-edit.ico').resize((30, 30), Image.ANTIALIAS)
edit_image = ImageTk.PhotoImage(edit_image)
edit_btn = Button(app, image=edit_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.edit_data_db(name_entry, email_entry, tree))
edit_btn.place(relx=0.86, rely=0.25, anchor='w')
delete_image = Image.open('icons/005-delete.ico').resize((22, 22), Image.ANTIALIAS)
delete_image = ImageTk.PhotoImage(delete_image)
delete_btn = Button(app, image=delete_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.delete_data_db(email_entry, tree))
delete_btn.place(relx=0.86, rely=0.32, anchor='w')
save_image = Image.open('icons/012-save.ico').resize((20, 20), Image.ANTIALIAS)
save_image = ImageTk.PhotoImage(save_image)
save_btn = Button(app, image=save_image, width=27, height=22, bg='White',
command=lambda: self.insert_data_db(name_entry, email_entry, tree))
save_btn.place(relx=0.77, rely=0.32, anchor='w')
append_one_btn = Button(app, text='Append', font=('Roboto', 10), width=8, height=1, bg='White', fg='Black',
command=lambda: self.append_one_db(added_email_entry, tree))
append_one_btn.place(relx=0.8, rely=0.82, anchor='w')
append_all_btn = Button(app, text='Append all', font=('Roboto', 10), width=8, height=1, bg='White', fg='Black',
command=lambda: self.append_all_db(added_email_entry, tree))
append_all_btn.place(relx=0.8, rely=0.88, anchor='w')
added_email_label = Label(app, text='Added:', font=('Roboto', 12), bg='#075fab', fg='White')
added_email_label.place(relx=0.1, rely=0.95, anchor=CENTER)
added_email_entry = Entry(app, font=('Roboto', 12), width=50)
added_email_entry.place(relx=0.14, rely=0.95, anchor='w')
send_email_btn = Button(app, text='Send email', font=('Roboto', 10), width=8, height=1, bg='Green', fg='White',
command=lambda: self.send_email_from_db(added_email_entry, app))
send_email_btn.place(relx=0.8, rely=0.95, anchor='w')
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
frame = Frame(app, bg='White')
frame.pack()
frame.place(relx=0.49, rely=0.57, anchor=CENTER)
tree = Treeview(frame, height=10)
tree.pack(side=TOP, fill=NONE, expand=FALSE)
tree['columns'] = ('index', 'email', 'name')
tree.column('#0', width=0, stretch=NO)
tree.column('index', width=80, stretch=NO)
tree.column('email', width=250, stretch=NO)
tree.column('name', width=255, stretch=NO)
tree.heading('index', text='Index', anchor=CENTER)
tree.heading('email', text='Email', anchor=CENTER)
tree.heading('name', text='Name', anchor=CENTER)
self.search_data_db('all', name_entry, email_entry, tree)
app.mainloop()
def login_page(self):
app = Tk()
self.centering_page(app, 500, 250)
app.iconbitmap('icons/007-login.ico')
app.title('Login')
app['background'] = '#075fab'
app.resizable(False, False)
info_label = Label(app, text='Log in to send emails', font=('Terminal', 12), bg='#075fab', fg='White')
info_label.place(relx=0.5, rely=0.1, anchor=CENTER)
email_label = Label(app, text='Email:', font=('Roboto', 12), bg='#<PASSWORD>', fg='White')
email_label.place(relx=0.15, rely=0.3, anchor='w')
email_entry = Entry(app, font=('Roboto', 12), fg='black', width=32)
email_entry.place(relx=0.25, rely=0.3, anchor='w')
pass_label = Label(app, text='Password:', font=('Roboto', 12),bg='#075fab', fg='White')
pass_label.place(relx=0.09, rely=0.45, anchor='w')
pass_entry = Entry(app, font=('Roboto', 12), show='•', fg='black', width=32)
pass_entry.place(relx=0.25, rely=0.45, anchor='w')
show_pass_icon = Image.open('icons/013-show-pass.ico').resize((25, 25), Image.ANTIALIAS)
show_pass_icon = ImageTk.PhotoImage(show_pass_icon)
show_pass_btn = Button(app, image=show_pass_icon, compound=CENTER, height=18, width=22, bg='White',
command=lambda: self.show_pass(pass_entry))
show_pass_btn.place(relx=0.85, rely=0.45, anchor='w')
selected_radio = StringVar()
gmail_radio = Radiobutton(app, text='Gmail', font=('Roboto', 11), value='gmail', fg='White', bg='#075fab', activebackground='#075fab',
activeforeground='White', selectcolor='#075fab', variable=selected_radio)
gmail_radio.place(relx=0.3, rely=0.58, anchor='w')
gmail_radio.select()
exchange_radio = Radiobutton(app, text='Exchange', font=('Roboto', 11), value='exchange', fg='White', bg='#075fab', activebackground='#075fab',
activeforeground='White', selectcolor='#075fab', variable=selected_radio,command=lambda: self.server_page())
exchange_radio.place(relx=0.55, rely=0.58, anchor='w')
login_btn = Button(app, text='Login', font=('Roboto', 12), height=1, width=6, bg='White', fg='Black',
command=lambda: self.login_auth(app, email_entry, pass_entry, selected_radio.get()))
login_btn.place(relx=0.5, rely=0.75, anchor=CENTER)
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
app.mainloop()
def single_page(self):
app = Tk()
self.centering_page(app, 700, 450)
app.iconbitmap('icons/008-send.ico')
app.title('Send a single email')
app['background'] = '#075fab'
app.resizable(False, False)
single_title_label = Label(app, text='Send a single email', font=('Terminal', 25), bg='#075fab', fg='White')
single_title_label.place(relx=0.5, rely=0.1, anchor=CENTER)
to_label = Label(app, text='To:', font=('Arial', 14), bg='#075fab', fg='White')
to_label.place(relx=0.15, rely=0.25, anchor='e')
to_entry = Entry(app, font=('Arial', 11), fg='black', width=50)
to_entry.place(relx=0.16, rely=0.25, anchor='w')
if self.cod_page == 'from_db':
to_entry.delete(0, END)
to_entry.insert(END, self.to_send_from_db)
subject_label = Label(app, text='Subject:', font=('Arial', 14), bg='#075fab', fg='White')
subject_label.place(relx=0.15, rely=0.35, anchor='e')
subject_text = Text(app, font=('Arial', 11), height=1, width=50)
subject_text.place(relx=0.16, rely=0.33, anchor='nw')
content_label = Label(app, text='Content:', font=('Arial', 14), bg='#075fab', fg='White')
content_label.place(relx=0.15, rely=0.45, anchor='e')
content_text = Text(app, font=('Arial', 11), height=7, width=50)
content_text.place(relx=0.16, rely=0.43, anchor='nw')
attach_label = Label(app, text='Attach files:', font=('Arial', 14), bg='#075fab', fg='White')
attach_label.place(relx=0.15, rely=0.75, anchor='e')
attach_entry = Entry(app, font=('Arial', 11), width=50)
attach_entry.place(relx=0.16, rely=0.73, anchor='nw')
select_attach_btn = Button(app, text='Select', font=('Arial', 11), height=1, width=6, bg='White', fg='Black',
command=lambda: self.attach_files(attach_entry))
select_attach_btn.place(relx=0.75, rely=0.72, anchor='nw')
clear_attach_btn = Button(app, text='Clear Attach', font=('Arial', 11), height=1, width=9, bg='White', fg='Black',
command=lambda: self.clear_attach_files(attach_entry))
clear_attach_btn.place(relx=0.85, rely=0.72, anchor='nw')
send_btn = Button(app, text='Send email', font=('Roboto', 11), height=1, width=8, bg='green', fg='White',
command=lambda: self.send_email(app, to_entry, subject_text, content_text, attach_entry))
send_btn.place(relx=0.5, rely=0.86, anchor=CENTER)
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
app.mainloop()
def main_page(self):
app = Tk()
self.centering_page(app, 700, 450)
app.iconbitmap('icons/009-home.ico')
app.title('Email Sender')
app['background'] = '#075fab'
app.resizable(False, False)
app_title_label = Label(app, text='Email', font=('Terminal', 50), bg='#075fab', fg='White')
app_title_label.place(relx=0.4, rely=0.2, anchor=CENTER)
app_title_label2 = Label(app, text='Sender', font=('Terminal', 50), bg='#075fab', fg='White')
app_title_label2.place(relx=0.6, rely=0.37, anchor=CENTER)
app_image = Image.open('icons/010-mail.ico').resize((100, 100), Image.ANTIALIAS)
app_image = ImageTk.PhotoImage(app_image)
app_image_label = Label(app, image=app_image, bg='#075fab')
app_image_label.place(relx=0.65, rely=0.18, anchor=CENTER)
single_icon = Image.open('icons/008-send.ico').resize((40, 40), Image.ANTIALIAS)
single_icon = ImageTk.PhotoImage(single_icon)
single_btn = Button(app, text=' Send a single\n email', image=single_icon, compound=LEFT, font=('Roboto', 14), bg='White', fg='Black', height=75, width=180,
command=lambda: self.nav_menu('login', app))
single_btn.place(relx=0.2, rely=0.7, anchor=CENTER)
access_db_icon = Image.open('icons/003-database.ico').resize((30, 30), Image.ANTIALIAS)
access_db_icon = ImageTk.PhotoImage(access_db_icon)
access_db_btn = Button(app, text=' Access emails\n DB', image=access_db_icon, compound=LEFT, font=('Roboto', 14),
bg='White', fg='Black', height=75, width=180, command=lambda: self.nav_menu('acess_db', app))
access_db_btn.place(relx=0.5, rely=0.7, anchor=CENTER)
exit_icon = Image.open('icons/011-exit.ico').resize((30, 30), Image.ANTIALIAS)
exit_icon = ImageTk.PhotoImage(exit_icon)
exit_btn = Button(app, text=' Exit', image=exit_icon, compound=LEFT, font=('Roboto', 14), bg='White', fg='Black', height=75, width=180,
command=lambda: self.nav_menu('', app))
exit_btn.place(relx=0.8, rely=0.7, anchor=CENTER)
app.mainloop()
if __name__ == "__main__":
email = EmailSender()
email.main_page()
| import sqlite3
import smtplib
import os.path
import random
from tkinter.ttk import Treeview
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilenames
from PIL import ImageTk, Image
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
class EmailSender():
def __init__(self):
self.conn = sqlite3.connect('database.db')
self.cursor = self.conn.cursor()
self.id_emails_db = []
self.email_cod = ''
self.uniq_id = 0
self.attach = []
self.to_send_from_db = ''
self.cod_page = ''
self.create_db()
self.search_data_ids()
def create_db(self):
self.cursor.execute('CREATE TABLE IF NOT EXISTS data (id INTEGER, email TEXT NOT NULL, name TEXT)')
def search_data_ids(self):
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
self.id_emails_db.append(row[0])
def nav_menu(self, cod, app):
app.destroy()
if cod == 'main':
try:
self.to_send_from_db = ''
self.cod_page = ''
self.smtp.quit()
except:
pass
self.main_page()
elif cod == 'single':
self.single_page()
elif cod == 'login':
self.login_page()
elif cod == 'acess_db':
self.access_db_page()
def server_page_save(self, server_entry, app):
self.server = server_entry.get()
app.destroy()
def show_pass(self, pass_entry):
if pass_entry['show'] == '':
pass_entry['show'] = '•'
else:
pass_entry['show'] = ''
def centering_page(self, app, app_width, app_height):
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
x_cordinate = int((screen_width/2) - (app_width/2))
y_cordinate = int((screen_height/2) - (app_height/2))
app.geometry("{}x{}+{}+{}".format(app_width, app_height, x_cordinate, y_cordinate))
def attach_files(self, attach_entry):
files = askopenfilenames()
for path in files:
part = MIMEBase('application', "octet-stream")
with open(path, 'rb') as file:
part.set_payload(file.read())
encoders.encode_base64(part)
file_name = os.path.basename(path)
attach_entry.insert(END, f'{os.path.basename(file_name)}'+'; ')
part.add_header('Content-Disposition',
'attachment; filename="{}"'.format(file_name))
self.attach.append(part)
def login_auth(self, app, email_entry, pass_entry, cod):
self.login = email_entry.get()
self.password = <PASSWORD>_entry.get()
try:
if cod == 'gmail':
messagebox.showinfo('Login', 'Connecting...')
self.smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465)
self.smtp.login(self.login, self.password)
elif cod == 'exchange':
messagebox.showinfo('Login', 'Connecting...')
self.smtp = smtplib.SMTP(self.server)
self.smtp.starttls()
self.smtp.login(self.login, self.password)
except:
messagebox.showerror('Login', 'Login failed, try again!')
else:
self.nav_menu('single', app)
def send_email(self, app, to_entry, subject_text, content_text, attach_entry):
if to_entry.get() == '' or subject_text.get("1.0", END) == '':
messagebox.showerror('Error', 'No email address to send or empty subject!')
else:
to_send = to_entry.get()
to_send = to_send.split('; ')
for email in to_send:
if email != '':
self.msg = MIMEMultipart()
self.msg['From'] = self.login
self.msg['To'] = email
self.msg['Subject'] = subject_text.get("1.0", END)
self.msg.attach(MIMEText(content_text.get("1.0", END)))
for part in self.attach:
self.msg.attach(part)
try:
self.smtp.sendmail(self.login, self.msg['To'], self.msg.as_string())
except:
messagebox.showerror('Error', 'Cannot send the email!')
else:
if self.cod_page != 'from_db':
messagebox.showinfo('Info', 'Email sent to {} successfully!'.format(email))
self.attach.clear()
to_entry.delete(0, END)
attach_entry.delete(0, END)
subject_text.delete(1.0, END)
content_text.delete(1.0, END)
if self.cod_page == 'from_db':
messagebox.showinfo('Info', 'Emails sent successfully!')
app.destroy()
self.restart_nav_page()
def restart_nav_page(self):
app = Tk()
self.centering_page(app, 500, 300)
app.title('Nav Page')
app.resizable(False, False)
app['background'] = '#075fab'
app.iconbitmap('icons/001-restart.ico')
nav_label = Label(app, text='Navigation Page', font=('Terminal', 23), bg='#075fab', fg='White')
nav_label.place(relx=0.5, rely=0.15, anchor=CENTER)
back_main_btn = Button(app, text='Back to Main Page', font=('Roboto', 13), bg='Gray25',
fg='White', command=lambda: self.nav_menu('main', app))
back_main_btn.place(relx=0.5, rely=0.4, anchor=CENTER, width=160, height=50)
send_again_btn = Button(app, text='Send email again', font=('Roboto', 13), bg='Green',
fg='White', command=lambda: self.nav_menu('single', app))
send_again_btn.place(relx=0.5, rely=0.6, anchor=CENTER, width=160, height=50)
exit_btn = Button(app, text='Exit', font=('Roboto', 13), bg='Red', fg='White',
command=lambda: self.nav_menu('', app))
exit_btn.place(relx=0.5, rely=0.8, anchor=CENTER, width=160, height=50)
app.mainloop()
def server_page(self):
app = Toplevel()
self.centering_page(app, 400, 150)
app.title('Server')
app.iconbitmap('icons/002-server.ico')
app['background'] = '#075fab'
app.resizable(False, False)
server_label = Label(app, text='Exchange server:', font=('Roboto', 12), bg='#075fab', fg='White')
server_label.place(relx=0.18, rely=0.4, anchor=CENTER)
server_entry = Entry(app, font=('Roboto', 12), width=27)
server_entry.place(relx=0.65, rely=0.4, anchor=CENTER)
save_btn = Button(app, text='Save', font=('Roboto', 12), width=8, bg='Green', fg='White',
command=lambda: self.server_page_save(server_entry, app))
save_btn.place(relx=0.5, rely=0.7, anchor=CENTER)
app.mainloop()
def search_all_data_db(self, tree):
tree.delete(*tree.get_children())
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
tree.insert("" , "end", values=(f'{row[0]}', f'{row[1]}',f'{row[2]}'))
def fill_tree(self, tree):
rows = self.cursor.fetchall()
if not rows:
tree.delete(*tree.get_children())
messagebox.showerror('Error', "Didn't find data!")
else:
tree.delete(*tree.get_children())
for row in rows:
tree.insert("" , "end", values=(f'{row[0]}', f'{row[1]}',f'{row[2]}'))
def search_one_data_db(self, name_entry, email_entry, tree):
if name_entry.get() == '' and email_entry.get() == '':
tree.delete(*tree.get_children())
self.search_all_data_db(tree)
return
else:
if name_entry.get() != '': # name field not empty
self.cursor.execute('SELECT * FROM data WHERE name=?', [name_entry.get()])
self.fill_tree(tree)
else: # name field empty, so use email_entry
self.cursor.execute('SELECT * FROM data WHERE email=?', [email_entry.get()])
self.fill_tree(tree)
def search_data_db(self, cod, name_entry, email_entry, tree):
if cod == 'all':
self.search_all_data_db(tree)
elif cod == 'one':
self.search_one_data_db(name_entry, email_entry, tree)
def delete_data_db(self, email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
item = tree.focus()
try:
id_item = tree.item(item)['values'][0]
email_item = tree.item(item)['values'][1]
except:
pass
if messagebox.askquestion('Delete data', 'Are you sure you want to delete this data?') == 'yes':
self.cursor.execute('DELETE FROM data WHERE email=?', [email_item])
self.conn.commit()
self.id_emails_db.remove(id_item)
self.search_all_data_db(tree)
def insert_data_db(self, name_entry, email_entry, tree):
email = email_entry.get()
name = name_entry.get()
if name_entry.get() == '' or email_entry.get() == '':
messagebox.showerror('Error', 'Email or Name field empty!')
return
if self.email_cod == '' and self.uniq_id == 0:
self.uniq_id = random.randint(1, 1000001)
while True:
if self.uniq_id in self.id_emails_db:
self.uniq_id = random.randint(1, 1000001)
else:
self.id_emails_db.append(self.uniq_id)
break
try:
self.cursor.execute("INSERT INTO data (id, email, name) VALUES (?, ?, ?)", (self.uniq_id, email, name))
self.conn.commit()
except:
messagebox.showerror('Error', 'Impossible to insert on DB!')
return
elif self.email_cod == 'edit':
try:
self.cursor.execute("UPDATE data SET email=?, name=? WHERE id=?", (email, name, self.uniq_id))
self.conn.commit()
except:
messagebox.showerror('Error', 'Impossible to update on DB!')
return
messagebox.showinfo('Info', 'Data saved successfully!')
name_entry.delete(0, END)
email_entry.delete(0, END)
self.search_all_data_db(tree)
self.email_cod = ''
self.uniq_id = 0
def edit_data_db(self, name_entry, email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
self.email_cod = 'edit'
item = tree.focus()
self.uniq_id= tree.item(item)['values'][0]
email_item = tree.item(item)['values'][1]
name_item = tree.item(item)['values'][2]
email_entry.insert(END, email_item)
name_entry.insert(END, name_item)
def append_one_db(self, added_email_entry, tree):
if not tree.focus():
messagebox.showinfo('Info', 'Not a data selected!')
else:
added_email_entry.delete(0, END)
item = tree.focus()
email_item = tree.item(item)['values'][1]
added_email_entry.insert(END, email_item + '; ')
def append_all_db(self, added_email_entry, tree):
added_email_entry.delete(0, END)
self.cursor.execute('SELECT * FROM data')
rows = self.cursor.fetchall()
for row in rows:
added_email_entry.insert(END, row[1] + '; ')
def clear_attach_files(self, attach_entry):
self.attach.clear()
attach_entry.delete(0, END)
def send_email_from_db(self, added_email_entry, app):
self.cod_page = 'from_db'
if added_email_entry.get() == '':
messagebox.showerror('Error', 'No email address to send!')
return
self.to_send_from_db = added_email_entry.get()
app.destroy()
if self.login_page():
self.single_page()
def access_db_page(self):
app = Tk()
self.centering_page(app, 700, 550)
app.title('Database')
app.iconbitmap('icons/003-database.ico')
app['background'] = '#075fab'
app.resizable(False, False)
db_label = Label(app, text='Database', font=('Terminal', 25), bg='#075fab', fg='White')
db_label.place(relx=0.5, rely=0.1, anchor=CENTER)
email_label = Label(app, text='Email:', font=('Roboto', 12), bg='#075fab', fg='White')
email_label.place(relx=0.1, rely=0.25, anchor=CENTER)
email_entry = Entry(app, font=('Roboto', 13), width=45)
email_entry.place(relx=0.14, rely=0.25, anchor='w')
name_label = Label(app, text='Name:', font=('Roboto', 12), bg='#075fab', fg='White')
name_label.place(relx=0.1, rely=0.32, anchor=CENTER)
name_entry = Entry(app, font=('Roboto', 13), width=45)
name_entry.place(relx=0.14, rely=0.32, anchor='w')
search_image = Image.open('icons/004-search.ico').resize((20, 20), Image.ANTIALIAS)
search_image = ImageTk.PhotoImage(search_image)
search_btn = Button(app, image=search_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.search_data_db('one', name_entry, email_entry, tree))
search_btn.place(relx=0.77, rely=0.25, anchor='w')
edit_image = Image.open('icons/014-edit.ico').resize((30, 30), Image.ANTIALIAS)
edit_image = ImageTk.PhotoImage(edit_image)
edit_btn = Button(app, image=edit_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.edit_data_db(name_entry, email_entry, tree))
edit_btn.place(relx=0.86, rely=0.25, anchor='w')
delete_image = Image.open('icons/005-delete.ico').resize((22, 22), Image.ANTIALIAS)
delete_image = ImageTk.PhotoImage(delete_image)
delete_btn = Button(app, image=delete_image, compound=CENTER, width=27, height=22, bg='White',
command=lambda: self.delete_data_db(email_entry, tree))
delete_btn.place(relx=0.86, rely=0.32, anchor='w')
save_image = Image.open('icons/012-save.ico').resize((20, 20), Image.ANTIALIAS)
save_image = ImageTk.PhotoImage(save_image)
save_btn = Button(app, image=save_image, width=27, height=22, bg='White',
command=lambda: self.insert_data_db(name_entry, email_entry, tree))
save_btn.place(relx=0.77, rely=0.32, anchor='w')
append_one_btn = Button(app, text='Append', font=('Roboto', 10), width=8, height=1, bg='White', fg='Black',
command=lambda: self.append_one_db(added_email_entry, tree))
append_one_btn.place(relx=0.8, rely=0.82, anchor='w')
append_all_btn = Button(app, text='Append all', font=('Roboto', 10), width=8, height=1, bg='White', fg='Black',
command=lambda: self.append_all_db(added_email_entry, tree))
append_all_btn.place(relx=0.8, rely=0.88, anchor='w')
added_email_label = Label(app, text='Added:', font=('Roboto', 12), bg='#075fab', fg='White')
added_email_label.place(relx=0.1, rely=0.95, anchor=CENTER)
added_email_entry = Entry(app, font=('Roboto', 12), width=50)
added_email_entry.place(relx=0.14, rely=0.95, anchor='w')
send_email_btn = Button(app, text='Send email', font=('Roboto', 10), width=8, height=1, bg='Green', fg='White',
command=lambda: self.send_email_from_db(added_email_entry, app))
send_email_btn.place(relx=0.8, rely=0.95, anchor='w')
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
frame = Frame(app, bg='White')
frame.pack()
frame.place(relx=0.49, rely=0.57, anchor=CENTER)
tree = Treeview(frame, height=10)
tree.pack(side=TOP, fill=NONE, expand=FALSE)
tree['columns'] = ('index', 'email', 'name')
tree.column('#0', width=0, stretch=NO)
tree.column('index', width=80, stretch=NO)
tree.column('email', width=250, stretch=NO)
tree.column('name', width=255, stretch=NO)
tree.heading('index', text='Index', anchor=CENTER)
tree.heading('email', text='Email', anchor=CENTER)
tree.heading('name', text='Name', anchor=CENTER)
self.search_data_db('all', name_entry, email_entry, tree)
app.mainloop()
def login_page(self):
app = Tk()
self.centering_page(app, 500, 250)
app.iconbitmap('icons/007-login.ico')
app.title('Login')
app['background'] = '#075fab'
app.resizable(False, False)
info_label = Label(app, text='Log in to send emails', font=('Terminal', 12), bg='#075fab', fg='White')
info_label.place(relx=0.5, rely=0.1, anchor=CENTER)
email_label = Label(app, text='Email:', font=('Roboto', 12), bg='#<PASSWORD>', fg='White')
email_label.place(relx=0.15, rely=0.3, anchor='w')
email_entry = Entry(app, font=('Roboto', 12), fg='black', width=32)
email_entry.place(relx=0.25, rely=0.3, anchor='w')
pass_label = Label(app, text='Password:', font=('Roboto', 12),bg='#075fab', fg='White')
pass_label.place(relx=0.09, rely=0.45, anchor='w')
pass_entry = Entry(app, font=('Roboto', 12), show='•', fg='black', width=32)
pass_entry.place(relx=0.25, rely=0.45, anchor='w')
show_pass_icon = Image.open('icons/013-show-pass.ico').resize((25, 25), Image.ANTIALIAS)
show_pass_icon = ImageTk.PhotoImage(show_pass_icon)
show_pass_btn = Button(app, image=show_pass_icon, compound=CENTER, height=18, width=22, bg='White',
command=lambda: self.show_pass(pass_entry))
show_pass_btn.place(relx=0.85, rely=0.45, anchor='w')
selected_radio = StringVar()
gmail_radio = Radiobutton(app, text='Gmail', font=('Roboto', 11), value='gmail', fg='White', bg='#075fab', activebackground='#075fab',
activeforeground='White', selectcolor='#075fab', variable=selected_radio)
gmail_radio.place(relx=0.3, rely=0.58, anchor='w')
gmail_radio.select()
exchange_radio = Radiobutton(app, text='Exchange', font=('Roboto', 11), value='exchange', fg='White', bg='#075fab', activebackground='#075fab',
activeforeground='White', selectcolor='#075fab', variable=selected_radio,command=lambda: self.server_page())
exchange_radio.place(relx=0.55, rely=0.58, anchor='w')
login_btn = Button(app, text='Login', font=('Roboto', 12), height=1, width=6, bg='White', fg='Black',
command=lambda: self.login_auth(app, email_entry, pass_entry, selected_radio.get()))
login_btn.place(relx=0.5, rely=0.75, anchor=CENTER)
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
app.mainloop()
def single_page(self):
app = Tk()
self.centering_page(app, 700, 450)
app.iconbitmap('icons/008-send.ico')
app.title('Send a single email')
app['background'] = '#075fab'
app.resizable(False, False)
single_title_label = Label(app, text='Send a single email', font=('Terminal', 25), bg='#075fab', fg='White')
single_title_label.place(relx=0.5, rely=0.1, anchor=CENTER)
to_label = Label(app, text='To:', font=('Arial', 14), bg='#075fab', fg='White')
to_label.place(relx=0.15, rely=0.25, anchor='e')
to_entry = Entry(app, font=('Arial', 11), fg='black', width=50)
to_entry.place(relx=0.16, rely=0.25, anchor='w')
if self.cod_page == 'from_db':
to_entry.delete(0, END)
to_entry.insert(END, self.to_send_from_db)
subject_label = Label(app, text='Subject:', font=('Arial', 14), bg='#075fab', fg='White')
subject_label.place(relx=0.15, rely=0.35, anchor='e')
subject_text = Text(app, font=('Arial', 11), height=1, width=50)
subject_text.place(relx=0.16, rely=0.33, anchor='nw')
content_label = Label(app, text='Content:', font=('Arial', 14), bg='#075fab', fg='White')
content_label.place(relx=0.15, rely=0.45, anchor='e')
content_text = Text(app, font=('Arial', 11), height=7, width=50)
content_text.place(relx=0.16, rely=0.43, anchor='nw')
attach_label = Label(app, text='Attach files:', font=('Arial', 14), bg='#075fab', fg='White')
attach_label.place(relx=0.15, rely=0.75, anchor='e')
attach_entry = Entry(app, font=('Arial', 11), width=50)
attach_entry.place(relx=0.16, rely=0.73, anchor='nw')
select_attach_btn = Button(app, text='Select', font=('Arial', 11), height=1, width=6, bg='White', fg='Black',
command=lambda: self.attach_files(attach_entry))
select_attach_btn.place(relx=0.75, rely=0.72, anchor='nw')
clear_attach_btn = Button(app, text='Clear Attach', font=('Arial', 11), height=1, width=9, bg='White', fg='Black',
command=lambda: self.clear_attach_files(attach_entry))
clear_attach_btn.place(relx=0.85, rely=0.72, anchor='nw')
send_btn = Button(app, text='Send email', font=('Roboto', 11), height=1, width=8, bg='green', fg='White',
command=lambda: self.send_email(app, to_entry, subject_text, content_text, attach_entry))
send_btn.place(relx=0.5, rely=0.86, anchor=CENTER)
back_icon = Image.open('icons/006-return.ico').resize((25, 25), Image.ANTIALIAS)
back_icon = ImageTk.PhotoImage(back_icon)
back_btn = Button(app, image=back_icon, compound=CENTER, width=25, height=25, bg='White',
command=lambda: self.nav_menu('main', app))
back_btn.place(relx=0.05, rely=0.1, anchor=CENTER)
app.mainloop()
def main_page(self):
app = Tk()
self.centering_page(app, 700, 450)
app.iconbitmap('icons/009-home.ico')
app.title('Email Sender')
app['background'] = '#075fab'
app.resizable(False, False)
app_title_label = Label(app, text='Email', font=('Terminal', 50), bg='#075fab', fg='White')
app_title_label.place(relx=0.4, rely=0.2, anchor=CENTER)
app_title_label2 = Label(app, text='Sender', font=('Terminal', 50), bg='#075fab', fg='White')
app_title_label2.place(relx=0.6, rely=0.37, anchor=CENTER)
app_image = Image.open('icons/010-mail.ico').resize((100, 100), Image.ANTIALIAS)
app_image = ImageTk.PhotoImage(app_image)
app_image_label = Label(app, image=app_image, bg='#075fab')
app_image_label.place(relx=0.65, rely=0.18, anchor=CENTER)
single_icon = Image.open('icons/008-send.ico').resize((40, 40), Image.ANTIALIAS)
single_icon = ImageTk.PhotoImage(single_icon)
single_btn = Button(app, text=' Send a single\n email', image=single_icon, compound=LEFT, font=('Roboto', 14), bg='White', fg='Black', height=75, width=180,
command=lambda: self.nav_menu('login', app))
single_btn.place(relx=0.2, rely=0.7, anchor=CENTER)
access_db_icon = Image.open('icons/003-database.ico').resize((30, 30), Image.ANTIALIAS)
access_db_icon = ImageTk.PhotoImage(access_db_icon)
access_db_btn = Button(app, text=' Access emails\n DB', image=access_db_icon, compound=LEFT, font=('Roboto', 14),
bg='White', fg='Black', height=75, width=180, command=lambda: self.nav_menu('acess_db', app))
access_db_btn.place(relx=0.5, rely=0.7, anchor=CENTER)
exit_icon = Image.open('icons/011-exit.ico').resize((30, 30), Image.ANTIALIAS)
exit_icon = ImageTk.PhotoImage(exit_icon)
exit_btn = Button(app, text=' Exit', image=exit_icon, compound=LEFT, font=('Roboto', 14), bg='White', fg='Black', height=75, width=180,
command=lambda: self.nav_menu('', app))
exit_btn.place(relx=0.8, rely=0.7, anchor=CENTER)
app.mainloop()
if __name__ == "__main__":
email = EmailSender()
email.main_page() | en | 0.368615 | # name field not empty # name field empty, so use email_entry | 2.867976 | 3 |
functions.py | vektor8891/video-editor | 2 | 6620538 | import moviepy.editor as movie
import os
import re
import pandas as pd
import datetime
import time
# open current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
clip_intro = movie.VideoFileClip('input/intro.mp4')
clip_action = movie.VideoFileClip('input/call-to-action-up.mp4')
clip_outro = movie.VideoFileClip('input/outro.mp4')
def get_videos():
videos = pd.read_excel('videos.xlsx')
return videos
def get_video_row(video_id: int):
videos = get_videos()
return videos[videos.Id == video_id].squeeze()
def get_video_column(video_id: int, column: str):
video_row = get_video_row(video_id=video_id)
video_column = video_row[column]
return video_column
def get_clips():
clips = pd.read_excel('clips_final.xlsx')
return clips
def get_video_path(video_id: int):
files = os.listdir('input/regi')
for f_path in files:
try:
video_index = int(re.split(' |-', f_path)[0])
except ValueError:
video_index = None
if video_index == video_id:
return f'input/regi/{f_path}'
raise ValueError(f'Video #{video_id} not found.')
def get_sec(time_str: str):
h = m = s = f = 0
if len(time_str.split(':')) == 3:
m, s, f = time_str.split(':')
elif len(time_str.split(':')) == 4:
h, m, s, f = time_str.split(':')
sec_float = int(h) * 3600 + int(m) * 60 + int(s) + int(f) / 30
return round(sec_float, ndigits=2)
def trim_video_clip(video_id: int, clip_id: int, clip_path: str):
videos = get_videos()
videos.Start = videos.Start.astype(str)
videos.End = videos.End.astype(str)
video_path = get_video_path(video_id=video_id)
video = videos[videos.Id == video_id]
start_str = video.Start.values[0].split('\n')[clip_id]
end_str = video.End.values[0].split('\n')[clip_id]
start = get_sec(start_str)
end = get_sec(end_str)
trimmed_clip = movie.VideoFileClip(video_path).subclip(start, end)
trimmed_clip.write_videofile(clip_path)
print(f'Clip exported to {clip_path}')
def get_clip_data():
videos = get_videos()
pd_clips = pd.DataFrame(columns=['Id', 'VideoId', 'ClipId', 'VideoName',
'ClipName', 'ClipPath', 'ExerciseLink',
'ShortLink', 'YoutubeLink',
'CardLinks', 'Tags'])
clip_index = 0
for index, video in videos.iterrows():
for clip_id, clip in enumerate(video.Clips.split('\n')):
clip_path = f'output/raw/video{video.Id:02}_clip{clip_id:02}.mp4'
clip_name = video.Clips.split('\n')[clip_id]
exercise_link = video.ExerciseLink.split('\n')[clip_id]
short_link = video.ShortLink.split('\n')[clip_id]
card_links = get_card_links(video_id=video.Id, clip_id=clip_id)
pd_clip = pd.DataFrame({
'Id': [clip_index],
'VideoId': [video.Id],
'ClipId': [clip_id],
'VideoName': [video.Name],
'ClipName': [clip_name],
'ClipPath': [clip_path],
'ExerciseLink': [exercise_link],
'ShortLink': [short_link],
'YoutubeLink': [video.YoutubeLink],
'Tags': [video.Tags],
'CardLinks': ['\n'.join(card_links) if len(card_links) > 0
else '']
})
clip_index = clip_index + 1
pd_clips = pd_clips.append(pd_clip, ignore_index=True)
return pd_clips
def trim_video_clips(video_ids: list):
videos = get_videos()
for index, video in videos[videos.Id.isin(video_ids)].iterrows():
for clip_id, clip in enumerate(video.Clips.split('\n')):
clip_path = f'output/raw/video{video.Id:02}_clip{clip_id:02}.mp4'
if not os.path.isfile(clip_path):
trim_video_clip(video_id=video.Id, clip_id=clip_id,
clip_path=clip_path)
else:
print(f'Skipping {clip_path} (already done)')
def add_intro_outro(clip_ids: list):
clips = get_clips()
for index, clip in clips[clips.Id.isin(clip_ids)].iterrows():
final_path = f"output/final/{clip.VideoTitle}.mp4"
if not os.path.isfile(final_path):
clip_exercise = movie.VideoFileClip(clip.ClipPath)
final_clip = movie.concatenate_videoclips([clip_intro,
clip_exercise,
clip_action,
clip_outro])
final_clip.write_videofile(final_path)
print(f"Clip {clip.Id} exported to {final_path}")
else:
print(f'Skipping {final_path} (already done)')
def get_time_diff_seconds(t_start: str, t_end: str, t_middle: str):
sec_start = get_sec(t_start)
sec_end = get_sec(t_end)
sec_middle = get_sec(f'{t_middle}:00')
if sec_middle < sec_start or sec_middle > sec_end:
return None
else:
return round(sec_middle - sec_start)
def get_card_links(video_id: int, clip_id: int):
videos = get_videos()
video = videos[videos.Id == video_id].squeeze()
clip_start = str(video.Start).split('\n')[clip_id]
clip_end = str(video.End).split('\n')[clip_id]
card_links = []
intro_length = 4
for link in str(video.Links).split('\n'):
if len(link.split('-')) > 1:
link_video_id = int(link.split('-')[1].strip())
if link_video_id > 0:
link_timestamp = link.split('-')[0].strip()
link_diff = get_time_diff_seconds(t_start=clip_start,
t_end=clip_end,
t_middle=link_timestamp)
if link_diff:
total_diff = link_diff + intro_length
str_diff = str(datetime.timedelta(seconds=total_diff))
link_name = get_video_column(video_id=link_video_id,
column='Name')
card_link = f'{str_diff} - {link_video_id}.' \
f' {link_name}'
card_links.append(card_link)
return card_links
| import moviepy.editor as movie
import os
import re
import pandas as pd
import datetime
import time
# open current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
clip_intro = movie.VideoFileClip('input/intro.mp4')
clip_action = movie.VideoFileClip('input/call-to-action-up.mp4')
clip_outro = movie.VideoFileClip('input/outro.mp4')
def get_videos():
videos = pd.read_excel('videos.xlsx')
return videos
def get_video_row(video_id: int):
videos = get_videos()
return videos[videos.Id == video_id].squeeze()
def get_video_column(video_id: int, column: str):
video_row = get_video_row(video_id=video_id)
video_column = video_row[column]
return video_column
def get_clips():
clips = pd.read_excel('clips_final.xlsx')
return clips
def get_video_path(video_id: int):
files = os.listdir('input/regi')
for f_path in files:
try:
video_index = int(re.split(' |-', f_path)[0])
except ValueError:
video_index = None
if video_index == video_id:
return f'input/regi/{f_path}'
raise ValueError(f'Video #{video_id} not found.')
def get_sec(time_str: str):
h = m = s = f = 0
if len(time_str.split(':')) == 3:
m, s, f = time_str.split(':')
elif len(time_str.split(':')) == 4:
h, m, s, f = time_str.split(':')
sec_float = int(h) * 3600 + int(m) * 60 + int(s) + int(f) / 30
return round(sec_float, ndigits=2)
def trim_video_clip(video_id: int, clip_id: int, clip_path: str):
videos = get_videos()
videos.Start = videos.Start.astype(str)
videos.End = videos.End.astype(str)
video_path = get_video_path(video_id=video_id)
video = videos[videos.Id == video_id]
start_str = video.Start.values[0].split('\n')[clip_id]
end_str = video.End.values[0].split('\n')[clip_id]
start = get_sec(start_str)
end = get_sec(end_str)
trimmed_clip = movie.VideoFileClip(video_path).subclip(start, end)
trimmed_clip.write_videofile(clip_path)
print(f'Clip exported to {clip_path}')
def get_clip_data():
videos = get_videos()
pd_clips = pd.DataFrame(columns=['Id', 'VideoId', 'ClipId', 'VideoName',
'ClipName', 'ClipPath', 'ExerciseLink',
'ShortLink', 'YoutubeLink',
'CardLinks', 'Tags'])
clip_index = 0
for index, video in videos.iterrows():
for clip_id, clip in enumerate(video.Clips.split('\n')):
clip_path = f'output/raw/video{video.Id:02}_clip{clip_id:02}.mp4'
clip_name = video.Clips.split('\n')[clip_id]
exercise_link = video.ExerciseLink.split('\n')[clip_id]
short_link = video.ShortLink.split('\n')[clip_id]
card_links = get_card_links(video_id=video.Id, clip_id=clip_id)
pd_clip = pd.DataFrame({
'Id': [clip_index],
'VideoId': [video.Id],
'ClipId': [clip_id],
'VideoName': [video.Name],
'ClipName': [clip_name],
'ClipPath': [clip_path],
'ExerciseLink': [exercise_link],
'ShortLink': [short_link],
'YoutubeLink': [video.YoutubeLink],
'Tags': [video.Tags],
'CardLinks': ['\n'.join(card_links) if len(card_links) > 0
else '']
})
clip_index = clip_index + 1
pd_clips = pd_clips.append(pd_clip, ignore_index=True)
return pd_clips
def trim_video_clips(video_ids: list):
videos = get_videos()
for index, video in videos[videos.Id.isin(video_ids)].iterrows():
for clip_id, clip in enumerate(video.Clips.split('\n')):
clip_path = f'output/raw/video{video.Id:02}_clip{clip_id:02}.mp4'
if not os.path.isfile(clip_path):
trim_video_clip(video_id=video.Id, clip_id=clip_id,
clip_path=clip_path)
else:
print(f'Skipping {clip_path} (already done)')
def add_intro_outro(clip_ids: list):
clips = get_clips()
for index, clip in clips[clips.Id.isin(clip_ids)].iterrows():
final_path = f"output/final/{clip.VideoTitle}.mp4"
if not os.path.isfile(final_path):
clip_exercise = movie.VideoFileClip(clip.ClipPath)
final_clip = movie.concatenate_videoclips([clip_intro,
clip_exercise,
clip_action,
clip_outro])
final_clip.write_videofile(final_path)
print(f"Clip {clip.Id} exported to {final_path}")
else:
print(f'Skipping {final_path} (already done)')
def get_time_diff_seconds(t_start: str, t_end: str, t_middle: str):
sec_start = get_sec(t_start)
sec_end = get_sec(t_end)
sec_middle = get_sec(f'{t_middle}:00')
if sec_middle < sec_start or sec_middle > sec_end:
return None
else:
return round(sec_middle - sec_start)
def get_card_links(video_id: int, clip_id: int):
videos = get_videos()
video = videos[videos.Id == video_id].squeeze()
clip_start = str(video.Start).split('\n')[clip_id]
clip_end = str(video.End).split('\n')[clip_id]
card_links = []
intro_length = 4
for link in str(video.Links).split('\n'):
if len(link.split('-')) > 1:
link_video_id = int(link.split('-')[1].strip())
if link_video_id > 0:
link_timestamp = link.split('-')[0].strip()
link_diff = get_time_diff_seconds(t_start=clip_start,
t_end=clip_end,
t_middle=link_timestamp)
if link_diff:
total_diff = link_diff + intro_length
str_diff = str(datetime.timedelta(seconds=total_diff))
link_name = get_video_column(video_id=link_video_id,
column='Name')
card_link = f'{str_diff} - {link_video_id}.' \
f' {link_name}'
card_links.append(card_link)
return card_links
| en | 0.303964 | # open current directory #{video_id} not found.') | 2.720887 | 3 |
Chapter13/Docker/app_tests.py | shalevy1/Flask-Framework-Cookbook-Second-Edition | 42 | 6620539 | <filename>Chapter13/Docker/app_tests.py
import os
from my_app import app, db
import unittest
from unittest import mock
import tempfile
import geoip2.records
import coverage
cov = coverage.coverage(
omit = [
'/Users/shalabh.aggarwal/workspace/cookbook10/lib/python3.6/site-packages/*',
'app_tests.py'
]
)
cov.start()
class CatalogTestCase(unittest.TestCase):
def setUp(self):
self.test_db_file = tempfile.mkstemp()[1]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + self.test_db_file
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
self.app = app.test_client()
self.geoip_city_patcher = mock.patch('geoip2.models.City',
location=geoip2.records.Location(time_zone = 'America/Los_Angeles')
)
PatchedGeoipCity = self.geoip_city_patcher.start()
self.geoip_reader_patcher = mock.patch('geoip2.database.Reader')
PatchedGeoipReader = self.geoip_reader_patcher.start()
PatchedGeoipReader().city.return_value = PatchedGeoipCity
db.create_all()
def tearDown(self):
self.geoip_city_patcher.stop()
self.geoip_reader_patcher.stop()
os.remove(self.test_db_file)
def test_home(self):
rv = self.app.get('/')
self.assertEqual(rv.status_code, 200)
def test_products(self):
"Test Products list page"
rv = self.app.get('/en/products')
self.assertEqual(rv.status_code, 200)
self.assertTrue('No Previous Page' in rv.data.decode("utf-8"))
self.assertTrue('No Next Page' in rv.data.decode("utf-8"))
def test_create_category(self):
"Test creation of new category"
rv = self.app.get('/en/category-create')
self.assertEqual(rv.status_code, 200)
rv = self.app.post('/en/category-create')
self.assertEqual(rv.status_code, 200)
self.assertTrue('This field is required.' in rv.data.decode("utf-8"))
rv = self.app.get('/en/categories')
self.assertEqual(rv.status_code, 200)
self.assertFalse('Phones' in rv.data.decode("utf-8"))
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
rv = self.app.get('/en/categories')
self.assertEqual(rv.status_code, 200)
self.assertTrue('Phones' in rv.data.decode("utf-8"))
rv = self.app.get('/en/category/1')
self.assertEqual(rv.status_code, 200)
self.assertTrue('Phones' in rv.data.decode("utf-8"))
def test_create_product(self):
"Test creation of new product"
rv = self.app.get('/en/product-create')
self.assertEqual(rv.status_code, 200)
rv = self.app.post('/en/product-create')
self.assertEqual(rv.status_code, 200)
self.assertTrue('This field is required.' in rv.data.decode("utf-8"))
# Create a category to be used in product creation
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
rv = self.app.post('/en/product-create', data={
'name': 'iPhone 5',
'price': 549.49,
'company': 'Apple',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
rv = self.app.get('/en/product/1')
self.assertEqual(rv.status_code, 200)
self.assertTrue('iPhone 5' in rv.data.decode("utf-8"))
self.assertTrue('America/Los_Angeles' in rv.data.decode("utf-8"))
def test_search_product(self):
"Test searching product"
# Create a category to be used in product creation
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
# Create a product
rv = self.app.post('/en/product-create', data={
'name': 'iPhone 5',
'price': 549.49,
'company': 'Apple',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
# Create another product
rv = self.app.post('/en/product-create', data={
'name': 'Galaxy S5',
'price': 549.49,
'company': 'Samsung',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
self.app.get('/')
rv = self.app.get('/en/product-search?name=iPhone')
self.assertEqual(rv.status_code, 200)
self.assertTrue('iPhone 5' in rv.data.decode("utf-8"))
self.assertFalse('Galaxy S5' in rv.data.decode("utf-8"))
rv = self.app.get('/en/product-search?name=iPhone 6')
self.assertEqual(rv.status_code, 200)
self.assertFalse('iPhone 6' in rv.data.decode("utf-8"))
if __name__ == '__main__':
try:
unittest.main()
finally:
cov.stop()
cov.save()
cov.report()
cov.html_report(directory = 'coverage')
cov.erase()
| <filename>Chapter13/Docker/app_tests.py
import os
from my_app import app, db
import unittest
from unittest import mock
import tempfile
import geoip2.records
import coverage
cov = coverage.coverage(
omit = [
'/Users/shalabh.aggarwal/workspace/cookbook10/lib/python3.6/site-packages/*',
'app_tests.py'
]
)
cov.start()
class CatalogTestCase(unittest.TestCase):
def setUp(self):
self.test_db_file = tempfile.mkstemp()[1]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + self.test_db_file
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
self.app = app.test_client()
self.geoip_city_patcher = mock.patch('geoip2.models.City',
location=geoip2.records.Location(time_zone = 'America/Los_Angeles')
)
PatchedGeoipCity = self.geoip_city_patcher.start()
self.geoip_reader_patcher = mock.patch('geoip2.database.Reader')
PatchedGeoipReader = self.geoip_reader_patcher.start()
PatchedGeoipReader().city.return_value = PatchedGeoipCity
db.create_all()
def tearDown(self):
self.geoip_city_patcher.stop()
self.geoip_reader_patcher.stop()
os.remove(self.test_db_file)
def test_home(self):
rv = self.app.get('/')
self.assertEqual(rv.status_code, 200)
def test_products(self):
"Test Products list page"
rv = self.app.get('/en/products')
self.assertEqual(rv.status_code, 200)
self.assertTrue('No Previous Page' in rv.data.decode("utf-8"))
self.assertTrue('No Next Page' in rv.data.decode("utf-8"))
def test_create_category(self):
"Test creation of new category"
rv = self.app.get('/en/category-create')
self.assertEqual(rv.status_code, 200)
rv = self.app.post('/en/category-create')
self.assertEqual(rv.status_code, 200)
self.assertTrue('This field is required.' in rv.data.decode("utf-8"))
rv = self.app.get('/en/categories')
self.assertEqual(rv.status_code, 200)
self.assertFalse('Phones' in rv.data.decode("utf-8"))
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
rv = self.app.get('/en/categories')
self.assertEqual(rv.status_code, 200)
self.assertTrue('Phones' in rv.data.decode("utf-8"))
rv = self.app.get('/en/category/1')
self.assertEqual(rv.status_code, 200)
self.assertTrue('Phones' in rv.data.decode("utf-8"))
def test_create_product(self):
"Test creation of new product"
rv = self.app.get('/en/product-create')
self.assertEqual(rv.status_code, 200)
rv = self.app.post('/en/product-create')
self.assertEqual(rv.status_code, 200)
self.assertTrue('This field is required.' in rv.data.decode("utf-8"))
# Create a category to be used in product creation
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
rv = self.app.post('/en/product-create', data={
'name': 'iPhone 5',
'price': 549.49,
'company': 'Apple',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
rv = self.app.get('/en/product/1')
self.assertEqual(rv.status_code, 200)
self.assertTrue('iPhone 5' in rv.data.decode("utf-8"))
self.assertTrue('America/Los_Angeles' in rv.data.decode("utf-8"))
def test_search_product(self):
"Test searching product"
# Create a category to be used in product creation
rv = self.app.post('/en/category-create', data={
'name': 'Phones',
})
self.assertEqual(rv.status_code, 302)
# Create a product
rv = self.app.post('/en/product-create', data={
'name': 'iPhone 5',
'price': 549.49,
'company': 'Apple',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
# Create another product
rv = self.app.post('/en/product-create', data={
'name': 'Galaxy S5',
'price': 549.49,
'company': 'Samsung',
'category': 1,
'image': tempfile.NamedTemporaryFile()
})
self.assertEqual(rv.status_code, 302)
self.app.get('/')
rv = self.app.get('/en/product-search?name=iPhone')
self.assertEqual(rv.status_code, 200)
self.assertTrue('iPhone 5' in rv.data.decode("utf-8"))
self.assertFalse('Galaxy S5' in rv.data.decode("utf-8"))
rv = self.app.get('/en/product-search?name=iPhone 6')
self.assertEqual(rv.status_code, 200)
self.assertFalse('iPhone 6' in rv.data.decode("utf-8"))
if __name__ == '__main__':
try:
unittest.main()
finally:
cov.stop()
cov.save()
cov.report()
cov.html_report(directory = 'coverage')
cov.erase()
| en | 0.858494 | # Create a category to be used in product creation # Create a category to be used in product creation # Create a product # Create another product | 2.416996 | 2 |
04_Selenium/02_finding_elements/ByDemo.py | twiindan/selenium_lessons | 0 | 6620540 | from selenium import webdriver
from selenium.webdriver.common.by import By
baseUrl = "https://forum-testing.herokuapp.com/v1.0/demo"
driver = webdriver.Firefox()
driver.get(baseUrl)
elementById = driver.find_element(By.ID, "name")
if elementById is not None:
print("We found an element by Id")
elementByXpath = driver.find_element(By.XPATH, "//input[@id='displayed-text']")
if elementByXpath is not None:
print("We found an element by XPATH")
elementByLinkText = driver.find_element(By.LINK_TEXT, "Open Tab")
if elementByLinkText is not None:
print("We found an element by Link Text")
| from selenium import webdriver
from selenium.webdriver.common.by import By
baseUrl = "https://forum-testing.herokuapp.com/v1.0/demo"
driver = webdriver.Firefox()
driver.get(baseUrl)
elementById = driver.find_element(By.ID, "name")
if elementById is not None:
print("We found an element by Id")
elementByXpath = driver.find_element(By.XPATH, "//input[@id='displayed-text']")
if elementByXpath is not None:
print("We found an element by XPATH")
elementByLinkText = driver.find_element(By.LINK_TEXT, "Open Tab")
if elementByLinkText is not None:
print("We found an element by Link Text")
| none | 1 | 3.361406 | 3 | |
heap/kthlargestElementinarray_55.py | miiiingi/algorithmstudy | 0 | 6620541 | <filename>heap/kthlargestElementinarray_55.py
# 문제
# 정렬되지 않은 배열에서 k번째 큰 요소를 추출하라.
def solution(L, x):
for ind in range(len(L)) :
if ind == 0 and x <= L[ind] :
L.insert(ind, x)
elif ind > 0 and L[ind-1] <= x < L[ind] :
L.insert(ind, x)
break
elif ind == len(L) - 1 and x >= L[ind] :
L.insert(ind, x)
return L
answer = solution([20, 37, 58, 72, 91], 65)
print(answer) | <filename>heap/kthlargestElementinarray_55.py
# 문제
# 정렬되지 않은 배열에서 k번째 큰 요소를 추출하라.
def solution(L, x):
for ind in range(len(L)) :
if ind == 0 and x <= L[ind] :
L.insert(ind, x)
elif ind > 0 and L[ind-1] <= x < L[ind] :
L.insert(ind, x)
break
elif ind == len(L) - 1 and x >= L[ind] :
L.insert(ind, x)
return L
answer = solution([20, 37, 58, 72, 91], 65)
print(answer) | ko | 1.00004 | # 문제 # 정렬되지 않은 배열에서 k번째 큰 요소를 추출하라. | 3.496079 | 3 |
first-cifar/models/fitnet1.py | mihirparadkar/deeplearning-experiments | 0 | 6620542 | import chainer
import chainer.functions as F
import chainer.links as L
import chainer.initializers as I
import numpy as np
initW = I.Orthogonal(dtype=np.float32)
class FitNet1(chainer.Chain):
def __init__(self, class_labels=10):
super(FitNet1, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3,16,ksize=(3,3),pad=1,initialW=initW)
self.conv1_2 = L.Convolution2D(16,16,ksize=(3,3),pad=1,initialW=initW)
self.conv1_3 = L.Convolution2D(16,16,ksize=(3,3),pad=1,initialW=initW)
self.conv2_1 = L.Convolution2D(16,32,ksize=(3,3),pad=1,initialW=initW)
self.conv2_2 = L.Convolution2D(32,32,ksize=(3,3),pad=1,initialW=initW)
self.conv2_3 = L.Convolution2D(32,32,ksize=(3,3),pad=1,initialW=initW)
self.conv3_1 = L.Convolution2D(32,48,ksize=(3,3),pad=1,initialW=initW)
self.conv3_2 = L.Convolution2D(48,48,ksize=(3,3),pad=1,initialW=initW)
self.conv3_3 = L.Convolution2D(48,64,ksize=(3,3),pad=1,initialW=initW)
self.fc1 = L.Linear(64, class_labels)
def __call__(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.relu(self.conv1_3(x))
x = F.max_pooling_2d(x, ksize=2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.relu(self.conv2_3(x))
x = F.max_pooling_2d(x, ksize=2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.average_pooling_2d(x, ksize=8)
return self.fc1(x)
| import chainer
import chainer.functions as F
import chainer.links as L
import chainer.initializers as I
import numpy as np
initW = I.Orthogonal(dtype=np.float32)
class FitNet1(chainer.Chain):
def __init__(self, class_labels=10):
super(FitNet1, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3,16,ksize=(3,3),pad=1,initialW=initW)
self.conv1_2 = L.Convolution2D(16,16,ksize=(3,3),pad=1,initialW=initW)
self.conv1_3 = L.Convolution2D(16,16,ksize=(3,3),pad=1,initialW=initW)
self.conv2_1 = L.Convolution2D(16,32,ksize=(3,3),pad=1,initialW=initW)
self.conv2_2 = L.Convolution2D(32,32,ksize=(3,3),pad=1,initialW=initW)
self.conv2_3 = L.Convolution2D(32,32,ksize=(3,3),pad=1,initialW=initW)
self.conv3_1 = L.Convolution2D(32,48,ksize=(3,3),pad=1,initialW=initW)
self.conv3_2 = L.Convolution2D(48,48,ksize=(3,3),pad=1,initialW=initW)
self.conv3_3 = L.Convolution2D(48,64,ksize=(3,3),pad=1,initialW=initW)
self.fc1 = L.Linear(64, class_labels)
def __call__(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.relu(self.conv1_3(x))
x = F.max_pooling_2d(x, ksize=2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.relu(self.conv2_3(x))
x = F.max_pooling_2d(x, ksize=2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.average_pooling_2d(x, ksize=8)
return self.fc1(x)
| none | 1 | 2.703713 | 3 | |
utils/write_model_to_json.py | MohMehKo/time_domain_speech_enhancement | 7 | 6620543 | from keras.models import load_model
import simplejson
import pdb
import cPickle as pickle
import os
import h5py
hdf_file = '../model/model_weights_11_21_16_SNR_time_domain_time_domain_mlp_6500_4500_3500_2500_1500_256_bk.h5'
def validate_arg(arg):
try:
os.path.isfile(arg)
print('file exists: %s' %arg)
except:
print('file not found: %s' %arg)
return
try:
os.access(arg, os.R_OK)
print('file is readable: %s' %arg)
except:
print('file is not readable: %s' %arg)
return
def main():
try:
validate_arg(hdf_file)
except ValueError as e:
print("some arguments are wrong:")
print(str(e))
return
## read model
try:
model =load_model(hdf_file)
model.summary()
except:
print('cant read the model')
return
model_json = model.to_json()
with open(hdf_file[:-3] + '.json', 'w') as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent = 4, sort_keys=True))
weight = model.get_weights()
pickle.dump(weight, open(hdf_file[:-3] + '.pkl', 'wb' ) )
if __name__ == '__main__':
main() | from keras.models import load_model
import simplejson
import pdb
import cPickle as pickle
import os
import h5py
hdf_file = '../model/model_weights_11_21_16_SNR_time_domain_time_domain_mlp_6500_4500_3500_2500_1500_256_bk.h5'
def validate_arg(arg):
try:
os.path.isfile(arg)
print('file exists: %s' %arg)
except:
print('file not found: %s' %arg)
return
try:
os.access(arg, os.R_OK)
print('file is readable: %s' %arg)
except:
print('file is not readable: %s' %arg)
return
def main():
try:
validate_arg(hdf_file)
except ValueError as e:
print("some arguments are wrong:")
print(str(e))
return
## read model
try:
model =load_model(hdf_file)
model.summary()
except:
print('cant read the model')
return
model_json = model.to_json()
with open(hdf_file[:-3] + '.json', 'w') as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent = 4, sort_keys=True))
weight = model.get_weights()
pickle.dump(weight, open(hdf_file[:-3] + '.pkl', 'wb' ) )
if __name__ == '__main__':
main() | en | 0.83932 | ## read model | 2.455212 | 2 |
FWCore/GuiBrowsers/python/Vispa/Gui/FindDialog.py | NTrevisani/cmssw | 3 | 6620544 | import logging
from PyQt4.QtCore import QCoreApplication,Qt,SIGNAL
from PyQt4.QtGui import QDialog,QLabel,QLineEdit,QCheckBox,QPushButton,QVBoxLayout,QHBoxLayout,QMessageBox,QToolButton,QWidget,QLayout
from Vispa.Main.Application import Application
from Vispa.Share.ThreadChain import ThreadChain
class FindDialog(QDialog):
def __init__(self,parent=None):
logging.debug(__name__ +': __init__')
QDialog.__init__(self,parent)
self.setWindowFlags(Qt.Window)
self.setWindowTitle("Find...")
self._findAlgorithm=None
self._properties=[]
self._scripts=[]
self._find=True
self._filter=False
self.fill()
def fill(self):
logging.debug(__name__ +': fill')
self._findLabelLabel = QLabel("Label: ")
self._findLabelLineEdit = QLineEdit()
self._findLabelLineEdit.setToolTip("Example: Particle1")
self._caseSensitiveCheckBox=QCheckBox("Case sensitive")
self._exactMatchCheckBox=QCheckBox("Exact match")
self._helpButton = QPushButton("&Help")
self._findPreviousButton = QPushButton("&Previous")
self._findPreviousButton.hide()
self._findNumberLabel = QLabel("?/?")
self._findNumberLabel.hide()
self._findNextButton = QPushButton("&Find")
self._filterButton = QPushButton("&Filter")
self._resetButton = QPushButton("&Reset")
self._closeButton = QPushButton("&Close")
self.setLayout(QVBoxLayout())
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self._layout1=QHBoxLayout()
self._layout3=QHBoxLayout()
self._layout4=QHBoxLayout()
self._layout1.setSizeConstraint(QLayout.SetDefaultConstraint)
self._layout3.setSizeConstraint(QLayout.SetDefaultConstraint)
self._layout4.setSizeConstraint(QLayout.SetDefaultConstraint)
self.layout().addLayout(self._layout1)
self.layout().addLayout(self._layout3)
self.layout().addStretch()
self.layout().addLayout(self._layout4)
self._layout1.addWidget(self._findLabelLabel)
self._layout1.addWidget(self._findLabelLineEdit)
self._layout3.addWidget(self._helpButton)
self._layout3.addStretch()
self._layout3.addWidget(self._caseSensitiveCheckBox)
self._layout3.addWidget(self._exactMatchCheckBox)
self._layout4.addWidget(self._findPreviousButton)
self._layout4.addWidget(self._findNumberLabel)
self._layout4.addWidget(self._findNextButton)
self._layout4.addWidget(self._filterButton)
self._layout4.addWidget(self._resetButton)
self._layout4.addStretch()
self._layout4.addWidget(self._closeButton)
self.connect(self._findLabelLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(self._caseSensitiveCheckBox, SIGNAL('stateChanged(int)'), self.edited)
self.connect(self._exactMatchCheckBox, SIGNAL('stateChanged(int)'), self.edited)
self.connect(self._findPreviousButton, SIGNAL('clicked(bool)'), self.findPrevious)
self.connect(self._findNextButton, SIGNAL('clicked(bool)'), self.findNext)
self.connect(self._filterButton, SIGNAL('clicked(bool)'), self.filter)
self.connect(self._resetButton, SIGNAL('clicked(bool)'), self.reset)
self.connect(self._helpButton, SIGNAL('clicked(bool)'), self.help)
self.connect(self._closeButton, SIGNAL('clicked(bool)'), self.reject)
self._addStringProperty(False,False)
self._addScript(False,False)
def _removeProperty(self):
for property in self._properties:
if self.sender() in property:
self._remove(property)
return
def _remove(self,object):
for o in object:
if isinstance(o,QWidget):
o.close()
self.layout().removeItem(object[0])
if object in self._properties:
self._properties.remove(object)
elif object in self._scripts:
self._scripts.remove(object)
def _addStringProperty(self,bool,deletable=True):
layout2=QHBoxLayout()
findPropertyNameLabel = QLabel("Property: ")
findPropertyNameLineEdit = QLineEdit()
findPropertyNameLineEdit.setToolTip("Example: Label = Particle1 ")
findPropertyValueLabel = QLabel(" = ")
findPropertyValueLineEdit = QLineEdit()
findPropertyValueLineEdit.setToolTip("Example: Label = Particle1 ")
propertyAdd = QToolButton()
propertyAdd.setText("+")
propertyDelete = QToolButton()
propertyDelete.setText("-")
if deletable:
propertyAdd.hide()
else:
propertyDelete.hide()
layout2.addWidget(propertyAdd)
layout2.addWidget(propertyDelete)
layout2.addWidget(findPropertyNameLabel)
layout2.addWidget(findPropertyNameLineEdit)
layout2.addWidget(findPropertyValueLabel)
layout2.addWidget(findPropertyValueLineEdit)
self.connect(findPropertyNameLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(findPropertyValueLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(propertyAdd, SIGNAL('clicked(bool)'), self._addStringProperty)
self.connect(propertyDelete, SIGNAL('clicked(bool)'), self._removeProperty)
self.layout().insertLayout(len(self._properties)+len(self._scripts)+1,layout2)
self._properties+=[(layout2,findPropertyNameLineEdit,findPropertyValueLineEdit,findPropertyNameLabel,findPropertyValueLabel,propertyAdd,propertyDelete)]
def _removeScript(self):
for script in self._scripts:
if self.sender() in script:
self._remove(script)
return
def _addScript(self,bool,deletable=True):
layout2=QHBoxLayout()
findScriptLabel = QLabel("Filter = ")
findScriptLineEdit = QLineEdit("")
findScriptLineEdit.setToolTip("Example: object.Label == 'Particle1' ")
scriptAdd = QToolButton()
scriptAdd.setText("+")
scriptDelete = QToolButton()
scriptDelete.setText("-")
if deletable:
scriptAdd.hide()
else:
scriptDelete.hide()
layout2.addWidget(scriptAdd)
layout2.addWidget(scriptDelete)
layout2.addWidget(findScriptLabel)
layout2.addWidget(findScriptLineEdit)
self.connect(findScriptLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(scriptAdd, SIGNAL('clicked(bool)'), self._addScript)
self.connect(scriptDelete, SIGNAL('clicked(bool)'), self._removeScript)
self.layout().insertLayout(len(self._properties)+len(self._scripts)+1,layout2)
self._scripts+=[(layout2,findScriptLineEdit,findScriptLabel,scriptAdd,scriptDelete)]
def onScreen(self, filter=False, find=True):
logging.debug(__name__ +': onScreen')
self._find=find
self._filter=filter
if self._find and self._filter:
self._findNextButton.setDefault(True)
self.setWindowTitle("Find/Filter...")
elif self._find:
self._findNextButton.setDefault(True)
self.setWindowTitle("Find...")
elif self._filter:
self._filterButton.setDefault(True)
self.setWindowTitle("Filter...")
self._findNextButton.setVisible(find)
if not find:
self._findPreviousButton.setVisible(find)
self._filterButton.setVisible(filter)
self.show()
self.raise_()
self.activateWindow()
self._findLabelLineEdit.setFocus()
def keyPressEvent(self, event):
"""
"""
if event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_W:
self.close()
QDialog.keyPressEvent(self, event)
def setFindAlgorithm(self,findAlgorithm):
logging.debug(__name__ +': setFindAlgorithm')
self._findAlgorithm=findAlgorithm
def findAlgorithm(self):
return self._findAlgorithm
def label(self):
return str(self._findLabelLineEdit.text().toAscii())
def setLabel(self,label):
logging.debug(__name__ +': setLabel '+label)
self._findLabelLineEdit.setText(label)
def properties(self):
return [(str(property[1].text().toAscii()),str(property[2].text().toAscii())) for property in self._properties]
def scripts(self):
return [str(script[1].text().toAscii()) for script in self._scripts]
def caseSensitive(self):
return self._caseSensitiveCheckBox.checkState()==Qt.Checked
def exactMatch(self):
return self._exactMatchCheckBox.checkState()==Qt.Checked
def edited(self):
self._findPreviousButton.hide()
if self._findNextButton.isVisible():
self._findNumberLabel.hide()
self._findNextButton.setText("&Find")
def _updateNumberLabel(self):
current=self._findAlgorithm.currentNumber()
total=self._findAlgorithm.numberOfResults()
message=self._findAlgorithm.message()
text=""
if self._filter:
text=str(total)+" found"
else:
if total>0:
text=str(current)+"/"+str(total)
else:
text="not found"
if message:
text+=" ("+message+")"
self._findNumberLabel.setText(text)
def findPrevious(self):
logging.debug(__name__ +': findPrevious')
object=self._findAlgorithm.previous()
self._updateNumberLabel()
self.emit(SIGNAL("found"),object)
def findNext(self):
logging.debug(__name__ +': findNext')
if not self._findPreviousButton.isVisible():
self._findNextButton.setVisible(False)
self._filterButton.setVisible(False)
self._resetButton.setVisible(False)
self._findNumberLabel.setText("Searching...")
self._findNumberLabel.show()
thread = ThreadChain(self._findAlgorithm.findUsingFindDialog, self)
while thread.isRunning():
if not Application.NO_PROCESS_EVENTS:
QCoreApplication.instance().processEvents()
object=thread.returnValue()
self._findNextButton.setVisible(True)
if self._filter:
self._filterButton.setVisible(True)
self._resetButton.setVisible(True)
self._findPreviousButton.show()
self._findNextButton.setText("&Next")
else:
object=next(self._findAlgorithm)
self._updateNumberLabel()
self.emit(SIGNAL("found"),object)
def filter(self):
logging.debug(__name__ +': filter')
self._findNextButton.setVisible(False)
self._filterButton.setVisible(False)
self._resetButton.setVisible(False)
self._findNumberLabel.setText("Searching...")
self._findNumberLabel.show()
thread = ThreadChain(self._findAlgorithm.findUsingFindDialog, self)
while thread.isRunning():
if not Application.NO_PROCESS_EVENTS:
QCoreApplication.instance().processEvents()
if self._find:
self._findNextButton.setVisible(True)
self._filterButton.setVisible(True)
self._resetButton.setVisible(True)
self._updateNumberLabel()
self.emit(SIGNAL("filtered"),self._findAlgorithm.results())
def reset(self):
self.setLabel("")
for o in self._scripts+self._properties:
self._remove(o)
self._addStringProperty(False,False)
self._addScript(False,False)
self._findAlgorithm.clear()
self._updateNumberLabel()
if self._filter:
self.emit(SIGNAL("filtered"),None)
self.update()
def help(self):
QMessageBox.about(self, 'Info', "You can find objects \n1. using their label shown in the center view, \n2. their properties shown in the property view, or \n3. using a Python script returning a boolean. Empty fields are ignored. Examples are shown as tool tips.")
| import logging
from PyQt4.QtCore import QCoreApplication,Qt,SIGNAL
from PyQt4.QtGui import QDialog,QLabel,QLineEdit,QCheckBox,QPushButton,QVBoxLayout,QHBoxLayout,QMessageBox,QToolButton,QWidget,QLayout
from Vispa.Main.Application import Application
from Vispa.Share.ThreadChain import ThreadChain
class FindDialog(QDialog):
def __init__(self,parent=None):
logging.debug(__name__ +': __init__')
QDialog.__init__(self,parent)
self.setWindowFlags(Qt.Window)
self.setWindowTitle("Find...")
self._findAlgorithm=None
self._properties=[]
self._scripts=[]
self._find=True
self._filter=False
self.fill()
def fill(self):
logging.debug(__name__ +': fill')
self._findLabelLabel = QLabel("Label: ")
self._findLabelLineEdit = QLineEdit()
self._findLabelLineEdit.setToolTip("Example: Particle1")
self._caseSensitiveCheckBox=QCheckBox("Case sensitive")
self._exactMatchCheckBox=QCheckBox("Exact match")
self._helpButton = QPushButton("&Help")
self._findPreviousButton = QPushButton("&Previous")
self._findPreviousButton.hide()
self._findNumberLabel = QLabel("?/?")
self._findNumberLabel.hide()
self._findNextButton = QPushButton("&Find")
self._filterButton = QPushButton("&Filter")
self._resetButton = QPushButton("&Reset")
self._closeButton = QPushButton("&Close")
self.setLayout(QVBoxLayout())
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self._layout1=QHBoxLayout()
self._layout3=QHBoxLayout()
self._layout4=QHBoxLayout()
self._layout1.setSizeConstraint(QLayout.SetDefaultConstraint)
self._layout3.setSizeConstraint(QLayout.SetDefaultConstraint)
self._layout4.setSizeConstraint(QLayout.SetDefaultConstraint)
self.layout().addLayout(self._layout1)
self.layout().addLayout(self._layout3)
self.layout().addStretch()
self.layout().addLayout(self._layout4)
self._layout1.addWidget(self._findLabelLabel)
self._layout1.addWidget(self._findLabelLineEdit)
self._layout3.addWidget(self._helpButton)
self._layout3.addStretch()
self._layout3.addWidget(self._caseSensitiveCheckBox)
self._layout3.addWidget(self._exactMatchCheckBox)
self._layout4.addWidget(self._findPreviousButton)
self._layout4.addWidget(self._findNumberLabel)
self._layout4.addWidget(self._findNextButton)
self._layout4.addWidget(self._filterButton)
self._layout4.addWidget(self._resetButton)
self._layout4.addStretch()
self._layout4.addWidget(self._closeButton)
self.connect(self._findLabelLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(self._caseSensitiveCheckBox, SIGNAL('stateChanged(int)'), self.edited)
self.connect(self._exactMatchCheckBox, SIGNAL('stateChanged(int)'), self.edited)
self.connect(self._findPreviousButton, SIGNAL('clicked(bool)'), self.findPrevious)
self.connect(self._findNextButton, SIGNAL('clicked(bool)'), self.findNext)
self.connect(self._filterButton, SIGNAL('clicked(bool)'), self.filter)
self.connect(self._resetButton, SIGNAL('clicked(bool)'), self.reset)
self.connect(self._helpButton, SIGNAL('clicked(bool)'), self.help)
self.connect(self._closeButton, SIGNAL('clicked(bool)'), self.reject)
self._addStringProperty(False,False)
self._addScript(False,False)
def _removeProperty(self):
for property in self._properties:
if self.sender() in property:
self._remove(property)
return
def _remove(self,object):
for o in object:
if isinstance(o,QWidget):
o.close()
self.layout().removeItem(object[0])
if object in self._properties:
self._properties.remove(object)
elif object in self._scripts:
self._scripts.remove(object)
def _addStringProperty(self,bool,deletable=True):
layout2=QHBoxLayout()
findPropertyNameLabel = QLabel("Property: ")
findPropertyNameLineEdit = QLineEdit()
findPropertyNameLineEdit.setToolTip("Example: Label = Particle1 ")
findPropertyValueLabel = QLabel(" = ")
findPropertyValueLineEdit = QLineEdit()
findPropertyValueLineEdit.setToolTip("Example: Label = Particle1 ")
propertyAdd = QToolButton()
propertyAdd.setText("+")
propertyDelete = QToolButton()
propertyDelete.setText("-")
if deletable:
propertyAdd.hide()
else:
propertyDelete.hide()
layout2.addWidget(propertyAdd)
layout2.addWidget(propertyDelete)
layout2.addWidget(findPropertyNameLabel)
layout2.addWidget(findPropertyNameLineEdit)
layout2.addWidget(findPropertyValueLabel)
layout2.addWidget(findPropertyValueLineEdit)
self.connect(findPropertyNameLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(findPropertyValueLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(propertyAdd, SIGNAL('clicked(bool)'), self._addStringProperty)
self.connect(propertyDelete, SIGNAL('clicked(bool)'), self._removeProperty)
self.layout().insertLayout(len(self._properties)+len(self._scripts)+1,layout2)
self._properties+=[(layout2,findPropertyNameLineEdit,findPropertyValueLineEdit,findPropertyNameLabel,findPropertyValueLabel,propertyAdd,propertyDelete)]
def _removeScript(self):
for script in self._scripts:
if self.sender() in script:
self._remove(script)
return
def _addScript(self,bool,deletable=True):
layout2=QHBoxLayout()
findScriptLabel = QLabel("Filter = ")
findScriptLineEdit = QLineEdit("")
findScriptLineEdit.setToolTip("Example: object.Label == 'Particle1' ")
scriptAdd = QToolButton()
scriptAdd.setText("+")
scriptDelete = QToolButton()
scriptDelete.setText("-")
if deletable:
scriptAdd.hide()
else:
scriptDelete.hide()
layout2.addWidget(scriptAdd)
layout2.addWidget(scriptDelete)
layout2.addWidget(findScriptLabel)
layout2.addWidget(findScriptLineEdit)
self.connect(findScriptLineEdit, SIGNAL('textChanged(QString)'), self.edited)
self.connect(scriptAdd, SIGNAL('clicked(bool)'), self._addScript)
self.connect(scriptDelete, SIGNAL('clicked(bool)'), self._removeScript)
self.layout().insertLayout(len(self._properties)+len(self._scripts)+1,layout2)
self._scripts+=[(layout2,findScriptLineEdit,findScriptLabel,scriptAdd,scriptDelete)]
def onScreen(self, filter=False, find=True):
logging.debug(__name__ +': onScreen')
self._find=find
self._filter=filter
if self._find and self._filter:
self._findNextButton.setDefault(True)
self.setWindowTitle("Find/Filter...")
elif self._find:
self._findNextButton.setDefault(True)
self.setWindowTitle("Find...")
elif self._filter:
self._filterButton.setDefault(True)
self.setWindowTitle("Filter...")
self._findNextButton.setVisible(find)
if not find:
self._findPreviousButton.setVisible(find)
self._filterButton.setVisible(filter)
self.show()
self.raise_()
self.activateWindow()
self._findLabelLineEdit.setFocus()
def keyPressEvent(self, event):
"""
"""
if event.modifiers() == Qt.ControlModifier and event.key() == Qt.Key_W:
self.close()
QDialog.keyPressEvent(self, event)
def setFindAlgorithm(self,findAlgorithm):
logging.debug(__name__ +': setFindAlgorithm')
self._findAlgorithm=findAlgorithm
def findAlgorithm(self):
return self._findAlgorithm
def label(self):
return str(self._findLabelLineEdit.text().toAscii())
def setLabel(self,label):
logging.debug(__name__ +': setLabel '+label)
self._findLabelLineEdit.setText(label)
def properties(self):
return [(str(property[1].text().toAscii()),str(property[2].text().toAscii())) for property in self._properties]
def scripts(self):
return [str(script[1].text().toAscii()) for script in self._scripts]
def caseSensitive(self):
return self._caseSensitiveCheckBox.checkState()==Qt.Checked
def exactMatch(self):
return self._exactMatchCheckBox.checkState()==Qt.Checked
def edited(self):
self._findPreviousButton.hide()
if self._findNextButton.isVisible():
self._findNumberLabel.hide()
self._findNextButton.setText("&Find")
def _updateNumberLabel(self):
current=self._findAlgorithm.currentNumber()
total=self._findAlgorithm.numberOfResults()
message=self._findAlgorithm.message()
text=""
if self._filter:
text=str(total)+" found"
else:
if total>0:
text=str(current)+"/"+str(total)
else:
text="not found"
if message:
text+=" ("+message+")"
self._findNumberLabel.setText(text)
def findPrevious(self):
logging.debug(__name__ +': findPrevious')
object=self._findAlgorithm.previous()
self._updateNumberLabel()
self.emit(SIGNAL("found"),object)
def findNext(self):
logging.debug(__name__ +': findNext')
if not self._findPreviousButton.isVisible():
self._findNextButton.setVisible(False)
self._filterButton.setVisible(False)
self._resetButton.setVisible(False)
self._findNumberLabel.setText("Searching...")
self._findNumberLabel.show()
thread = ThreadChain(self._findAlgorithm.findUsingFindDialog, self)
while thread.isRunning():
if not Application.NO_PROCESS_EVENTS:
QCoreApplication.instance().processEvents()
object=thread.returnValue()
self._findNextButton.setVisible(True)
if self._filter:
self._filterButton.setVisible(True)
self._resetButton.setVisible(True)
self._findPreviousButton.show()
self._findNextButton.setText("&Next")
else:
object=next(self._findAlgorithm)
self._updateNumberLabel()
self.emit(SIGNAL("found"),object)
def filter(self):
logging.debug(__name__ +': filter')
self._findNextButton.setVisible(False)
self._filterButton.setVisible(False)
self._resetButton.setVisible(False)
self._findNumberLabel.setText("Searching...")
self._findNumberLabel.show()
thread = ThreadChain(self._findAlgorithm.findUsingFindDialog, self)
while thread.isRunning():
if not Application.NO_PROCESS_EVENTS:
QCoreApplication.instance().processEvents()
if self._find:
self._findNextButton.setVisible(True)
self._filterButton.setVisible(True)
self._resetButton.setVisible(True)
self._updateNumberLabel()
self.emit(SIGNAL("filtered"),self._findAlgorithm.results())
def reset(self):
self.setLabel("")
for o in self._scripts+self._properties:
self._remove(o)
self._addStringProperty(False,False)
self._addScript(False,False)
self._findAlgorithm.clear()
self._updateNumberLabel()
if self._filter:
self.emit(SIGNAL("filtered"),None)
self.update()
def help(self):
QMessageBox.about(self, 'Info', "You can find objects \n1. using their label shown in the center view, \n2. their properties shown in the property view, or \n3. using a Python script returning a boolean. Empty fields are ignored. Examples are shown as tool tips.")
| none | 1 | 2.066666 | 2 | |
app/util/process_utils.py | rsennewald/ClusterRunner | 164 | 6620545 | from contextlib import suppress
import os
import subprocess
import time
SIGINFO = 29 # signal.SIGINFO is not present in all Python distributions
def kill_gracefully(process, timeout=2):
"""
Try terminating the process first (uses SIGTERM; which allows it to potentially shutdown gracefully). If the process
does not exit within the given timeout, the process is killed (SIGKILL).
:param process: The process to terminate or kill
:type process: subprocess.Popen
:param timeout: Number of seconds to wait after terminate before killing
:type timeout: int
:return: The exit code, stdout, and stderr of the process
:rtype: (int, str, str)
"""
try:
with suppress(ProcessLookupError):
process.terminate()
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
_, stdout, stderr = kill_hard(process)
return process.returncode, stdout, stderr
def kill_hard(process):
"""
Kill the specified process immediately using SIGKILL.
:param process: The process to terminate or kill
:type process: subprocess.Popen
:return: The exit code, stdout, and stderr of the process
:rtype: (int, str, str)
"""
with suppress(ProcessLookupError):
if not is_windows():
process.send_signal(SIGINFO) # this assumes a debug handler has been registered for SIGINFO
time.sleep(1) # give the logger a chance to write out debug info
process.kill()
stdout, stderr = process.communicate()
return process.returncode, stdout, stderr
def is_windows():
"""
:return: Whether ClusterRunner is running on Windows or not>
:rtype: bool
"""
return os.name == 'nt'
def Popen_with_delayed_expansion(cmd, *args, **kwargs):
"""
A thin wrapper around subprocess.Popen which ensures that all environment variables in the cmd are expanded at
execution time. By default, Windows CMD *disables* delayed expansion which means it will expand the command first
before execution. E.g. run 'set FOO=1 && echo %FOO%' won't actually echo 1 because %FOO% gets expanded before the
execution.
:param cmd: The command to execute
:type cmd: str | iterable
:return: Popen object, just like the Popen object returned by subprocess.Popen
:rtype: :class:`Popen`
"""
if is_windows():
cmd_with_deplayed_expansion = ['cmd', '/V', '/C']
if isinstance(cmd, str):
cmd_with_deplayed_expansion.append(cmd)
else:
cmd_with_deplayed_expansion.extend(cmd)
cmd = cmd_with_deplayed_expansion
else:
# Ordinarily, if you pipe commands in bash (eg "cmd1 | cmd2 | cmd3")
# and the rightmost command succeeds, the whole command will succeed
# even if one of the earlier commands fail. This is potentially
# problematic. See https://github.com/box/ClusterRunner/issues/321 for
# an example. To prevent this, set bash's pipefail option
should_set_pipefail = (
kwargs.get('shell') is True and
kwargs.get('executabe') in [None, '/bin/bash'] and
os.path.exists('/bin/bash') and
isinstance(cmd, str)
)
if should_set_pipefail:
kwargs['executable'] = '/bin/bash'
cmd = 'set -o pipefail; ' + cmd
return subprocess.Popen(cmd, *args, **kwargs)
def get_environment_variable_setter_command(name, value):
"""
Construct a platform specific command for setting an environment variable. Right now each command constructed
is designed to be chained with other commands.
:param name: The name of the environment variable
:type name: str
:param value: The value of the environment variable
:type value: str
:return: Platform specific command for setting the environment variable
:rtype: str
"""
if is_windows():
return 'set {}={}&&'.format(name, value)
else:
return 'export {}="{}";'.format(name, value)
| from contextlib import suppress
import os
import subprocess
import time
SIGINFO = 29 # signal.SIGINFO is not present in all Python distributions
def kill_gracefully(process, timeout=2):
"""
Try terminating the process first (uses SIGTERM; which allows it to potentially shutdown gracefully). If the process
does not exit within the given timeout, the process is killed (SIGKILL).
:param process: The process to terminate or kill
:type process: subprocess.Popen
:param timeout: Number of seconds to wait after terminate before killing
:type timeout: int
:return: The exit code, stdout, and stderr of the process
:rtype: (int, str, str)
"""
try:
with suppress(ProcessLookupError):
process.terminate()
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
_, stdout, stderr = kill_hard(process)
return process.returncode, stdout, stderr
def kill_hard(process):
"""
Kill the specified process immediately using SIGKILL.
:param process: The process to terminate or kill
:type process: subprocess.Popen
:return: The exit code, stdout, and stderr of the process
:rtype: (int, str, str)
"""
with suppress(ProcessLookupError):
if not is_windows():
process.send_signal(SIGINFO) # this assumes a debug handler has been registered for SIGINFO
time.sleep(1) # give the logger a chance to write out debug info
process.kill()
stdout, stderr = process.communicate()
return process.returncode, stdout, stderr
def is_windows():
"""
:return: Whether ClusterRunner is running on Windows or not>
:rtype: bool
"""
return os.name == 'nt'
def Popen_with_delayed_expansion(cmd, *args, **kwargs):
"""
A thin wrapper around subprocess.Popen which ensures that all environment variables in the cmd are expanded at
execution time. By default, Windows CMD *disables* delayed expansion which means it will expand the command first
before execution. E.g. run 'set FOO=1 && echo %FOO%' won't actually echo 1 because %FOO% gets expanded before the
execution.
:param cmd: The command to execute
:type cmd: str | iterable
:return: Popen object, just like the Popen object returned by subprocess.Popen
:rtype: :class:`Popen`
"""
if is_windows():
cmd_with_deplayed_expansion = ['cmd', '/V', '/C']
if isinstance(cmd, str):
cmd_with_deplayed_expansion.append(cmd)
else:
cmd_with_deplayed_expansion.extend(cmd)
cmd = cmd_with_deplayed_expansion
else:
# Ordinarily, if you pipe commands in bash (eg "cmd1 | cmd2 | cmd3")
# and the rightmost command succeeds, the whole command will succeed
# even if one of the earlier commands fail. This is potentially
# problematic. See https://github.com/box/ClusterRunner/issues/321 for
# an example. To prevent this, set bash's pipefail option
should_set_pipefail = (
kwargs.get('shell') is True and
kwargs.get('executabe') in [None, '/bin/bash'] and
os.path.exists('/bin/bash') and
isinstance(cmd, str)
)
if should_set_pipefail:
kwargs['executable'] = '/bin/bash'
cmd = 'set -o pipefail; ' + cmd
return subprocess.Popen(cmd, *args, **kwargs)
def get_environment_variable_setter_command(name, value):
"""
Construct a platform specific command for setting an environment variable. Right now each command constructed
is designed to be chained with other commands.
:param name: The name of the environment variable
:type name: str
:param value: The value of the environment variable
:type value: str
:return: Platform specific command for setting the environment variable
:rtype: str
"""
if is_windows():
return 'set {}={}&&'.format(name, value)
else:
return 'export {}="{}";'.format(name, value)
| en | 0.833863 | # signal.SIGINFO is not present in all Python distributions Try terminating the process first (uses SIGTERM; which allows it to potentially shutdown gracefully). If the process does not exit within the given timeout, the process is killed (SIGKILL). :param process: The process to terminate or kill :type process: subprocess.Popen :param timeout: Number of seconds to wait after terminate before killing :type timeout: int :return: The exit code, stdout, and stderr of the process :rtype: (int, str, str) Kill the specified process immediately using SIGKILL. :param process: The process to terminate or kill :type process: subprocess.Popen :return: The exit code, stdout, and stderr of the process :rtype: (int, str, str) # this assumes a debug handler has been registered for SIGINFO # give the logger a chance to write out debug info :return: Whether ClusterRunner is running on Windows or not> :rtype: bool A thin wrapper around subprocess.Popen which ensures that all environment variables in the cmd are expanded at execution time. By default, Windows CMD *disables* delayed expansion which means it will expand the command first before execution. E.g. run 'set FOO=1 && echo %FOO%' won't actually echo 1 because %FOO% gets expanded before the execution. :param cmd: The command to execute :type cmd: str | iterable :return: Popen object, just like the Popen object returned by subprocess.Popen :rtype: :class:`Popen` # Ordinarily, if you pipe commands in bash (eg "cmd1 | cmd2 | cmd3") # and the rightmost command succeeds, the whole command will succeed # even if one of the earlier commands fail. This is potentially # problematic. See https://github.com/box/ClusterRunner/issues/321 for # an example. To prevent this, set bash's pipefail option Construct a platform specific command for setting an environment variable. Right now each command constructed is designed to be chained with other commands. :param name: The name of the environment variable :type name: str :param value: The value of the environment variable :type value: str :return: Platform specific command for setting the environment variable :rtype: str | 2.905276 | 3 |
assessment/migrations/0001_initial.py | vandorjw/django-assessment | 10 | 6620546 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 05:30
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import parler.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('answer', models.TextField(verbose_name='answer')),
],
options={
'verbose_name_plural': 'answers',
'verbose_name': 'answer',
},
),
migrations.CreateModel(
name='Choice',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_correct', models.BooleanField(default=False, verbose_name='correct')),
],
options={
'verbose_name_plural': 'choices',
'verbose_name': 'choice',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='ChoiceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('value', models.CharField(max_length=512, verbose_name='value')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Choice')),
],
options={
'verbose_name': 'choice Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_choice_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Question',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_required', models.BooleanField(default=False, verbose_name='required')),
('of_type', models.IntegerField(choices=[(1, 'true or false'), (2, 'multiple choice'), (3, 'text')], default=1, verbose_name='type')),
],
options={
'verbose_name_plural': 'questions',
'verbose_name': 'question',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='QuestionTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('question', models.CharField(max_length=512, verbose_name='question')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Question')),
],
options={
'verbose_name': 'question Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_question_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Result',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('timestamp', models.DateTimeField(default=datetime.datetime.now, editable=False)),
],
options={
'verbose_name_plural': 'results',
'verbose_name': 'result',
},
),
migrations.CreateModel(
name='Survey',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_private', models.BooleanField(default=False, verbose_name='private')),
('start_date_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='start time')),
('end_date_time', models.DateTimeField(blank=True, null=True, verbose_name='end time')),
('admin', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assessment_admin_surveys', to=settings.AUTH_USER_MODEL, verbose_name='owner')),
('users', models.ManyToManyField(blank=True, related_name='assessment_user_surveys', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'surveys',
'verbose_name': 'survey',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='SurveyTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=160, verbose_name='name')),
('slug', models.SlugField(max_length=160, unique=True, verbose_name='slug')),
('description', models.TextField(verbose_name='description')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Survey')),
],
options={
'verbose_name': 'survey Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_survey_translation',
'managed': True,
},
),
migrations.AddField(
model_name='result',
name='survey',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='result',
name='user',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='result',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Result', verbose_name='result'),
),
migrations.AlterUniqueTogether(
name='surveytranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('survey', 'user')]),
),
migrations.AlterUniqueTogether(
name='questiontranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='choicetranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together=set([('result', 'question')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 05:30
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import parler.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('answer', models.TextField(verbose_name='answer')),
],
options={
'verbose_name_plural': 'answers',
'verbose_name': 'answer',
},
),
migrations.CreateModel(
name='Choice',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_correct', models.BooleanField(default=False, verbose_name='correct')),
],
options={
'verbose_name_plural': 'choices',
'verbose_name': 'choice',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='ChoiceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('value', models.CharField(max_length=512, verbose_name='value')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Choice')),
],
options={
'verbose_name': 'choice Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_choice_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Question',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_required', models.BooleanField(default=False, verbose_name='required')),
('of_type', models.IntegerField(choices=[(1, 'true or false'), (2, 'multiple choice'), (3, 'text')], default=1, verbose_name='type')),
],
options={
'verbose_name_plural': 'questions',
'verbose_name': 'question',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='QuestionTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('question', models.CharField(max_length=512, verbose_name='question')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Question')),
],
options={
'verbose_name': 'question Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_question_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Result',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('timestamp', models.DateTimeField(default=datetime.datetime.now, editable=False)),
],
options={
'verbose_name_plural': 'results',
'verbose_name': 'result',
},
),
migrations.CreateModel(
name='Survey',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_private', models.BooleanField(default=False, verbose_name='private')),
('start_date_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='start time')),
('end_date_time', models.DateTimeField(blank=True, null=True, verbose_name='end time')),
('admin', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assessment_admin_surveys', to=settings.AUTH_USER_MODEL, verbose_name='owner')),
('users', models.ManyToManyField(blank=True, related_name='assessment_user_surveys', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'surveys',
'verbose_name': 'survey',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='SurveyTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=160, verbose_name='name')),
('slug', models.SlugField(max_length=160, unique=True, verbose_name='slug')),
('description', models.TextField(verbose_name='description')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Survey')),
],
options={
'verbose_name': 'survey Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_survey_translation',
'managed': True,
},
),
migrations.AddField(
model_name='result',
name='survey',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='result',
name='user',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='result',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Result', verbose_name='result'),
),
migrations.AlterUniqueTogether(
name='surveytranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('survey', 'user')]),
),
migrations.AlterUniqueTogether(
name='questiontranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='choicetranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together=set([('result', 'question')]),
),
] | en | 0.755831 | # -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-31 05:30 | 1.737999 | 2 |
influxdb_client/_sync/__init__.py | bonitoo-io/influxdb-client-python | 1 | 6620547 | """Synchronous REST APIs."""
| """Synchronous REST APIs."""
| en | 0.644841 | Synchronous REST APIs. | 0.841786 | 1 |
mud/models/mixins/containing.py | erwanaubry/alamud_IUT_Escape | 0 | 6620548 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 <NAME>, IUT d'Orléans
#==============================================================================
from .propertied import Propertied
import mud.game
class Containing(Propertied):
"""a mixin that provides the ability to have content."""
#--------------------------------------------------------------------------
# initialization
#--------------------------------------------------------------------------
def __init__(self, **kargs):
super().__init__(**kargs)
self._contents = set()
#--------------------------------------------------------------------------
# initialization from YAML data
#--------------------------------------------------------------------------
def init_from_yaml(self, data, world):
super().init_from_yaml(data, world)
self._contents = set()
for i in data.get("contains", ()):
mud.game.GAME.world[i].move_to(self)
def update_from_yaml(self, data, world):
super().update_from_yaml(data, world)
#--------------------------------------------------------------------------
# API for saving the dynamic part of objects to YAML (via JSON)
#--------------------------------------------------------------------------
def archive_into(self, obj):
super().archive_into(obj)
#--------------------------------------------------------------------------
# contents API
#--------------------------------------------------------------------------
def contents(self):
"""return an iterator over the contents of the container."""
return iter(self._contents)
def __contains__(self, x):
return x in self._contents
def is_empty(self):
return not self._contents
def add(self, obj):
"""add an object or player to the container."""
self._contents.add(obj)
def remove(self, obj):
"""remove an object or player from the container."""
self._contents.remove(obj)
def reset(self):
self._contents.clear()
super().reset()
| # -*- coding: utf-8 -*-
# Copyright (C) 2014 <NAME>, IUT d'Orléans
#==============================================================================
from .propertied import Propertied
import mud.game
class Containing(Propertied):
"""a mixin that provides the ability to have content."""
#--------------------------------------------------------------------------
# initialization
#--------------------------------------------------------------------------
def __init__(self, **kargs):
super().__init__(**kargs)
self._contents = set()
#--------------------------------------------------------------------------
# initialization from YAML data
#--------------------------------------------------------------------------
def init_from_yaml(self, data, world):
super().init_from_yaml(data, world)
self._contents = set()
for i in data.get("contains", ()):
mud.game.GAME.world[i].move_to(self)
def update_from_yaml(self, data, world):
super().update_from_yaml(data, world)
#--------------------------------------------------------------------------
# API for saving the dynamic part of objects to YAML (via JSON)
#--------------------------------------------------------------------------
def archive_into(self, obj):
super().archive_into(obj)
#--------------------------------------------------------------------------
# contents API
#--------------------------------------------------------------------------
def contents(self):
"""return an iterator over the contents of the container."""
return iter(self._contents)
def __contains__(self, x):
return x in self._contents
def is_empty(self):
return not self._contents
def add(self, obj):
"""add an object or player to the container."""
self._contents.add(obj)
def remove(self, obj):
"""remove an object or player from the container."""
self._contents.remove(obj)
def reset(self):
self._contents.clear()
super().reset()
| en | 0.263031 | # -*- coding: utf-8 -*- # Copyright (C) 2014 <NAME>, IUT d'Orléans #============================================================================== a mixin that provides the ability to have content. #-------------------------------------------------------------------------- # initialization #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # initialization from YAML data #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # API for saving the dynamic part of objects to YAML (via JSON) #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # contents API #-------------------------------------------------------------------------- return an iterator over the contents of the container. add an object or player to the container. remove an object or player from the container. | 2.538835 | 3 |
plantDB/search.py | Lukas-create/Riverengineering | 1 | 6620549 | """functions related to search for vegetation matching the users input"""
import geopandas as geopandas
import numpy as np
import pandas as pd
import os
import platform
import sqlite3
from gdal import ogr
from shapely.geometry import Point
from tabulate import tabulate
import gdal
from plantDB.plant import Plant
shp_driver = ogr.GetDriverByName("ESRI Shapefile")
def search_db_via_query(query):
"""Function that checks database for matching entries with user input.
The function takes the user input and adds it to the used sql command to search for matching entries in the provided database
if there are matching entries these will be printed in the python console
Args:
query (str): habitat name in sql, provided by the user
Returns:
table entries matching with user input
"""
connection = sqlite3.connect("Pflanzendaten.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM plants WHERE " + query)
content = cursor.fetchall()
print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))
print('Status 1 equals nativ')
connection.close()
def habitat_search(column, entry):
"""Function searches in csv file for vegetation matching the user input.
The function uses the console input to search for matching entries in the provided csv file,
if there are matching entries the function print_habitat gets called to print the information in the python console.
Args:
column(str): column in the .csv file
entry(str): entry in the .csv file
Returns:
String in console
"""
if platform.system() == 'Linux':
df = pd.read_csv('plantdata.csv')
else:
df = pd.read_csv('plantdata.csv', encoding='unicode_escape')
df1 = df.dropna()
def search(column, entry, df):
df2 = df1.to_numpy()
column = df[column]
for i in range(len(column)):
if column[i] == entry:
plant = Plant(df2[i, 0], df2[i, 1], df2[i, 2], df2[i, 3], df2[i, 4], df2[i, 5], df2[i, 6], df2[i, 7], df2[i, 8], df2[i, 9], df2[i, 10], df2[11])
plant.print_habitat()
else:
print('')
search(column, entry, df1)
def search_by_habitat():
"""Function that enables the user to provide habitat input in console.
The function asks the user to provide the habitat name he wants to search for,
afterwards the input is given to the habitat_search() function and habitat_search() gets called.
Returns:
String in console to let the user know what the Status entries mean
"""
habitat = input('Enter name of habitat\n')
habitat_search('habitat', habitat)
print('Status 1 equals nativ')
def point_in_bound(filename, x, y, area):
"""Function that checks if the coordinates provided by the user are in bound of the shapefile polygon.
If the provided coordinates are out of bounds, a string will be printed in the console to let the user know,
if they are matching one of the shapefiles, search_db_via_query() gets called.
Args:
filename (str): name of the shapefile
x (float): x - coordinate
y (float): y - coordinate
area (str): name of the study area
Returns:
string to console
"""
file_shape = geopandas.read_file(filename)
polygon = list(file_shape.geometry)[0]
point = Point(x, y)
if polygon.contains(point):
query = "habitat = '" + area + "'"
search_db_via_query(query)
print('Enter 1 if you want elevation data for the coordinates\nEnter 2 if you dont want elevation data')
src = int(input('Enter here:'))
if src == 1:
elevation(x, y)
elif src == 2:
print('done')
else:
print('\ncoordinates out of \n' + area + '\nplease check provided shapefile for suitable coordinates\n')
def search_by_coordinates():
"""Function that lets the user input coordinates.
After asking the user to input x and y coordinates, point_in_bound(..) gets called for the 3 provided shapefiles.
Afterwards the user gets asked if he wants to receive elevation data for the input coordinates.
Returns:
"""
print('CRS used is EPSG:3857 \n for reference check https://epsg.io/3857 ')
x = float(input('Enter x coordinate\n'))
y = float(input('Enter y coordinate\n'))
point_in_bound(os.path.abspath("..")+"\Shape\prealpinebavaria.shp", x, y, 'Alpenvorland')
point_in_bound(os.path.abspath("..")+"\Shape\oberrheinmaintiefland.shp", x, y, 'Oberrheinisches Tiefland')
point_in_bound(os.path.abspath("..")+"\Shape\Tiefland.shp", x, y, 'Niederrheinisches Tiefland')
def elevation(x, y):
"""Function used to get information about elevation at the provided coordinates.
Args:
x (float): x - coordinate
y (float): y - coordinate
Returns:
elevation data for coordinate input in console
"""
file = os.path.abspath("..") + "\Shape\Shape.vrt"
layer = gdal.Open(file)
gt = layer.GetGeoTransform()
rasterx = int((x - gt[0]) / gt[1])
rastery = int((y - gt[3]) / gt[5])
print('elevation =', layer.GetRasterBand(1).ReadAsArray(rasterx, rastery, 1, 1)[0][0], 'm above sea level')
def question():
"""Function to let the user decide if he wants to search by habitat in csv file, search by habitat in database or search by coordinates.
The function prints a string in the console to ask the user if he wants to search by putting in coordinates or the name of the habitat,
furthermore it is asking the user if he wants to search by the name of the habitat in the provided csv file or database.
If option 1 is chosen, user is asked for an habitat name before calling search_db_via_query()
Args:
1 (int): calls search_db_via_query()
2 (int): calls search_by_coordinates()
3 (int): calls search_by_habitat()
Returns:
text string 'no data' if the input is anything else then 1, 2 or 3
"""
print('Enter 1 to search database by habitat with detailed information\nEnter 2 to search database by coordinates \nEnter 3 to search by habitat in csv file for a quick overview without detail')
print('habitat search options so far:\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')
src = int(input('Enter here:'))
if src == 1:
habitat = input('Enter name of habitat\n')
query = "habitat = '" + habitat + "'"
search_db_via_query(query)
elif src == 2:
search_by_coordinates()
elif src == 3:
search_by_habitat()
else:
print('no data')
| """functions related to search for vegetation matching the users input"""
import geopandas as geopandas
import numpy as np
import pandas as pd
import os
import platform
import sqlite3
from gdal import ogr
from shapely.geometry import Point
from tabulate import tabulate
import gdal
from plantDB.plant import Plant
shp_driver = ogr.GetDriverByName("ESRI Shapefile")
def search_db_via_query(query):
"""Function that checks database for matching entries with user input.
The function takes the user input and adds it to the used sql command to search for matching entries in the provided database
if there are matching entries these will be printed in the python console
Args:
query (str): habitat name in sql, provided by the user
Returns:
table entries matching with user input
"""
connection = sqlite3.connect("Pflanzendaten.db")
cursor = connection.cursor()
cursor.execute("SELECT * FROM plants WHERE " + query)
content = cursor.fetchall()
print(tabulate((content), headers=['species', 'name', 'nativ', 'endangered', 'habitat', 'waterdepthmin', 'waterdepthmax', 'rootdepth', 'groundwatertablechange', 'floodheightmax', 'floodloss', 'floodduration']))
print('Status 1 equals nativ')
connection.close()
def habitat_search(column, entry):
"""Function searches in csv file for vegetation matching the user input.
The function uses the console input to search for matching entries in the provided csv file,
if there are matching entries the function print_habitat gets called to print the information in the python console.
Args:
column(str): column in the .csv file
entry(str): entry in the .csv file
Returns:
String in console
"""
if platform.system() == 'Linux':
df = pd.read_csv('plantdata.csv')
else:
df = pd.read_csv('plantdata.csv', encoding='unicode_escape')
df1 = df.dropna()
def search(column, entry, df):
df2 = df1.to_numpy()
column = df[column]
for i in range(len(column)):
if column[i] == entry:
plant = Plant(df2[i, 0], df2[i, 1], df2[i, 2], df2[i, 3], df2[i, 4], df2[i, 5], df2[i, 6], df2[i, 7], df2[i, 8], df2[i, 9], df2[i, 10], df2[11])
plant.print_habitat()
else:
print('')
search(column, entry, df1)
def search_by_habitat():
"""Function that enables the user to provide habitat input in console.
The function asks the user to provide the habitat name he wants to search for,
afterwards the input is given to the habitat_search() function and habitat_search() gets called.
Returns:
String in console to let the user know what the Status entries mean
"""
habitat = input('Enter name of habitat\n')
habitat_search('habitat', habitat)
print('Status 1 equals nativ')
def point_in_bound(filename, x, y, area):
"""Function that checks if the coordinates provided by the user are in bound of the shapefile polygon.
If the provided coordinates are out of bounds, a string will be printed in the console to let the user know,
if they are matching one of the shapefiles, search_db_via_query() gets called.
Args:
filename (str): name of the shapefile
x (float): x - coordinate
y (float): y - coordinate
area (str): name of the study area
Returns:
string to console
"""
file_shape = geopandas.read_file(filename)
polygon = list(file_shape.geometry)[0]
point = Point(x, y)
if polygon.contains(point):
query = "habitat = '" + area + "'"
search_db_via_query(query)
print('Enter 1 if you want elevation data for the coordinates\nEnter 2 if you dont want elevation data')
src = int(input('Enter here:'))
if src == 1:
elevation(x, y)
elif src == 2:
print('done')
else:
print('\ncoordinates out of \n' + area + '\nplease check provided shapefile for suitable coordinates\n')
def search_by_coordinates():
"""Function that lets the user input coordinates.
After asking the user to input x and y coordinates, point_in_bound(..) gets called for the 3 provided shapefiles.
Afterwards the user gets asked if he wants to receive elevation data for the input coordinates.
Returns:
"""
print('CRS used is EPSG:3857 \n for reference check https://epsg.io/3857 ')
x = float(input('Enter x coordinate\n'))
y = float(input('Enter y coordinate\n'))
point_in_bound(os.path.abspath("..")+"\Shape\prealpinebavaria.shp", x, y, 'Alpenvorland')
point_in_bound(os.path.abspath("..")+"\Shape\oberrheinmaintiefland.shp", x, y, 'Oberrheinisches Tiefland')
point_in_bound(os.path.abspath("..")+"\Shape\Tiefland.shp", x, y, 'Niederrheinisches Tiefland')
def elevation(x, y):
"""Function used to get information about elevation at the provided coordinates.
Args:
x (float): x - coordinate
y (float): y - coordinate
Returns:
elevation data for coordinate input in console
"""
file = os.path.abspath("..") + "\Shape\Shape.vrt"
layer = gdal.Open(file)
gt = layer.GetGeoTransform()
rasterx = int((x - gt[0]) / gt[1])
rastery = int((y - gt[3]) / gt[5])
print('elevation =', layer.GetRasterBand(1).ReadAsArray(rasterx, rastery, 1, 1)[0][0], 'm above sea level')
def question():
"""Function to let the user decide if he wants to search by habitat in csv file, search by habitat in database or search by coordinates.
The function prints a string in the console to ask the user if he wants to search by putting in coordinates or the name of the habitat,
furthermore it is asking the user if he wants to search by the name of the habitat in the provided csv file or database.
If option 1 is chosen, user is asked for an habitat name before calling search_db_via_query()
Args:
1 (int): calls search_db_via_query()
2 (int): calls search_by_coordinates()
3 (int): calls search_by_habitat()
Returns:
text string 'no data' if the input is anything else then 1, 2 or 3
"""
print('Enter 1 to search database by habitat with detailed information\nEnter 2 to search database by coordinates \nEnter 3 to search by habitat in csv file for a quick overview without detail')
print('habitat search options so far:\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')
src = int(input('Enter here:'))
if src == 1:
habitat = input('Enter name of habitat\n')
query = "habitat = '" + habitat + "'"
search_db_via_query(query)
elif src == 2:
search_by_coordinates()
elif src == 3:
search_by_habitat()
else:
print('no data')
| en | 0.740535 | functions related to search for vegetation matching the users input Function that checks database for matching entries with user input. The function takes the user input and adds it to the used sql command to search for matching entries in the provided database if there are matching entries these will be printed in the python console Args: query (str): habitat name in sql, provided by the user Returns: table entries matching with user input Function searches in csv file for vegetation matching the user input. The function uses the console input to search for matching entries in the provided csv file, if there are matching entries the function print_habitat gets called to print the information in the python console. Args: column(str): column in the .csv file entry(str): entry in the .csv file Returns: String in console Function that enables the user to provide habitat input in console. The function asks the user to provide the habitat name he wants to search for, afterwards the input is given to the habitat_search() function and habitat_search() gets called. Returns: String in console to let the user know what the Status entries mean Function that checks if the coordinates provided by the user are in bound of the shapefile polygon. If the provided coordinates are out of bounds, a string will be printed in the console to let the user know, if they are matching one of the shapefiles, search_db_via_query() gets called. Args: filename (str): name of the shapefile x (float): x - coordinate y (float): y - coordinate area (str): name of the study area Returns: string to console Function that lets the user input coordinates. After asking the user to input x and y coordinates, point_in_bound(..) gets called for the 3 provided shapefiles. Afterwards the user gets asked if he wants to receive elevation data for the input coordinates. Returns: Function used to get information about elevation at the provided coordinates. Args: x (float): x - coordinate y (float): y - coordinate Returns: elevation data for coordinate input in console Function to let the user decide if he wants to search by habitat in csv file, search by habitat in database or search by coordinates. The function prints a string in the console to ask the user if he wants to search by putting in coordinates or the name of the habitat, furthermore it is asking the user if he wants to search by the name of the habitat in the provided csv file or database. If option 1 is chosen, user is asked for an habitat name before calling search_db_via_query() Args: 1 (int): calls search_db_via_query() 2 (int): calls search_by_coordinates() 3 (int): calls search_by_habitat() Returns: text string 'no data' if the input is anything else then 1, 2 or 3 | 3.858527 | 4 |
examples/large_rand.py | panosz/alpha_shapes | 5 | 6620550 | from time import time
from descartes import PolygonPatch
import numpy as np
import matplotlib.pyplot as plt
from alpha_shapes.alpha_shapes import Alpha_Shaper
# Define a set of random points
points = np.random.random((1000, 2))
# Prepare the shaper
alpha_shaper = Alpha_Shaper(points)
# Estimate the optimal alpha value and calculate the corresponding shape
ts = time()
alpha_opt, alpha_shape = alpha_shaper.optimize()
te = time()
print(f'optimization took: {te-ts:.2} sec')
fig, axs = plt.subplots(1,
2,
sharey=True,
sharex=True,
constrained_layout=True)
for ax in axs:
ax.plot(*zip(*points),
linestyle='',
color='k',
marker='.',
markersize=1)
ax.set_aspect('equal')
axs[0].set_title('data')
axs[1].add_patch(PolygonPatch(alpha_shape, alpha=0.2, color='r'))
axs[1].set_title(r'$\alpha_{\mathrm{opt}}$')
# Calculate the shape for greater than optimal alpha
alpha_sub_opt = alpha_shaper.get_shape(alpha_opt*1.5)
print(alpha_opt)
# Compare the alpha shapes
fig, axs = plt.subplots(1, 3, sharey=True, sharex=True)
for ax in axs:
ax.plot(*zip(*points),
linestyle='',
color='k',
marker='.',
markersize=1)
ax.set_aspect('equal')
axs[0].set_title('data')
axs[1].add_patch(PolygonPatch(alpha_shape, alpha=0.2, color='r'))
axs[1].set_title(r'$\alpha_{\mathrm{opt}}$')
axs[2].add_patch(PolygonPatch(alpha_sub_opt, alpha=0.2, color='r'))
axs[2].set_title(r'$1.5\ \alpha_{\mathrm{opt}}$')
plt.show()
| from time import time
from descartes import PolygonPatch
import numpy as np
import matplotlib.pyplot as plt
from alpha_shapes.alpha_shapes import Alpha_Shaper
# Define a set of random points
points = np.random.random((1000, 2))
# Prepare the shaper
alpha_shaper = Alpha_Shaper(points)
# Estimate the optimal alpha value and calculate the corresponding shape
ts = time()
alpha_opt, alpha_shape = alpha_shaper.optimize()
te = time()
print(f'optimization took: {te-ts:.2} sec')
fig, axs = plt.subplots(1,
2,
sharey=True,
sharex=True,
constrained_layout=True)
for ax in axs:
ax.plot(*zip(*points),
linestyle='',
color='k',
marker='.',
markersize=1)
ax.set_aspect('equal')
axs[0].set_title('data')
axs[1].add_patch(PolygonPatch(alpha_shape, alpha=0.2, color='r'))
axs[1].set_title(r'$\alpha_{\mathrm{opt}}$')
# Calculate the shape for greater than optimal alpha
alpha_sub_opt = alpha_shaper.get_shape(alpha_opt*1.5)
print(alpha_opt)
# Compare the alpha shapes
fig, axs = plt.subplots(1, 3, sharey=True, sharex=True)
for ax in axs:
ax.plot(*zip(*points),
linestyle='',
color='k',
marker='.',
markersize=1)
ax.set_aspect('equal')
axs[0].set_title('data')
axs[1].add_patch(PolygonPatch(alpha_shape, alpha=0.2, color='r'))
axs[1].set_title(r'$\alpha_{\mathrm{opt}}$')
axs[2].add_patch(PolygonPatch(alpha_sub_opt, alpha=0.2, color='r'))
axs[2].set_title(r'$1.5\ \alpha_{\mathrm{opt}}$')
plt.show()
| en | 0.679889 | # Define a set of random points # Prepare the shaper # Estimate the optimal alpha value and calculate the corresponding shape # Calculate the shape for greater than optimal alpha # Compare the alpha shapes | 2.969281 | 3 |