function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def calculate_all(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalNumericActivity, slug=activity_slug, offering=course, deleted=False) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def calculate_all_lettergrades(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalLetterActivity, slug=activity_slug, offering=course, deleted=False) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def calculate_individual_ajax(request, course_slug, activity_slug):
"""
Ajax way to calculate individual numeric grade.
This ajav view function is called in the activity_info page.
"""
if request.method == 'POST':
userid = request.POST.get('userid')
if userid == None:
return ForbiddenResponse(request) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def _create_activity_formdatadict(activity):
if not [activity for activity_type in ACTIVITY_TYPES if isinstance(activity, activity_type)]:
return
data = dict()
data['name'] = activity.name
data['short_name'] = activity.short_name
data['status'] = activity.status
data['due_date'] = activity.due_date
data['percent'] = activity.percent
data['url'] = ''
if 'url' in activity.config:
data['url'] = activity.config['url']
data['showstats'] = True
if 'showstats' in activity.config:
data['showstats'] = activity.config['showstats']
data['showhisto'] = True
if 'showhisto' in activity.config:
data['showhisto'] = activity.config['showhisto']
if 'calculation_leak' in activity.config:
data['calculation_leak'] = activity.config['calculation_leak']
for (k, v) in list(GROUP_STATUS_MAP.items()):
if activity.group == v:
data['group'] = k
if isinstance(activity, NumericActivity):
data['max_grade'] = activity.max_grade
if isinstance(activity, CalNumericActivity):
data['formula'] = activity.formula
if isinstance(activity, CalLetterActivity):
data['numeric_activity'] = activity.numeric_activity_id
data['exam_activity'] = activity.exam_activity_id
return data | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def _semester_date_warning(request, activity):
"""
Generate warnings for this request if activity due date is outside semester boundaries.
"""
if not activity.due_date:
return
# don't warn for 24 hours after the last day of classes (start of last day + 48 hours)
if activity.due_date > datetime.datetime.combine(
activity.offering.semester.end, datetime.time(0,0,0)) + datetime.timedelta(hours=48):
messages.warning(request, "Activity is due after the end of the semester.")
if activity.due_date < datetime.datetime.combine(
activity.offering.semester.start, datetime.time(0,0,0)):
messages.warning(request, "Activity is due before the start of the semester.") | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def edit_activity(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
numact_choices = [(na.pk, na.name) for na in NumericActivity.objects.filter(offering=course, deleted=False)]
examact_choices = [(0, '\u2014')] + [(na.pk, na.name) for na in Activity.objects.filter(offering=course, deleted=False)]
if (len(activities) == 1):
activity = activities[0]
# extend group options
activities_list = [(None, '\u2014'),]
activities = all_activities_filter(offering=course)
for a in activities:
if a.group == True and a.id != activity.id:
activities_list.append((a.slug, a.name))
from_page = request.GET.get('from_page') | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def delete_activity(request, course_slug, activity_slug):
"""
Flag activity as deleted
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(Activity, slug=activity_slug, offering=course)
if request.method == 'POST':
if not Member.objects.filter(offering=course, person__userid=request.user.username, role="INST"):
# only instructors can delete
return ForbiddenResponse(request, "Only instructors can delete activities") | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def release_activity(request, course_slug, activity_slug):
"""
Bump activity status: INVI -> URLS, URLS -> RLS.
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(Activity, slug=activity_slug, offering=course, deleted=False)
if request.method == 'POST':
if activity.status == "INVI":
activity.status = "URLS"
activity.save(entered_by=request.user.username)
messages.success(request, 'Activity made visible to students (but grades are still unreleased).')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("activity %s made visible") % (activity),
related_object=course)
l.save()
elif activity.status == "URLS":
activity.status = "RLS"
activity.save(entered_by=request.user.username)
messages.success(request, 'Grades released to students.')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("activity %s grades released") % (activity),
related_object=course)
l.save()
return HttpResponseRedirect(reverse('offering:activity_info', kwargs={'course_slug': course.slug, 'activity_slug': activity.slug})) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def add_letter_activity(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def all_grades(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(offering=course)
students = Member.objects.filter(offering=course, role="STUD").select_related('person', 'offering') | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def _all_grades_output(response, course):
activities = all_activities_filter(offering=course)
students = Member.objects.filter(offering=course, role="STUD").select_related('person')
# get grade data into a format we can work with
labtut = course.labtut
grades = {}
for a in activities:
grades[a.slug] = {}
if hasattr(a, 'numericgrade_set'):
gs = a.numericgrade_set.all().select_related('member', 'member__person')
else:
gs = a.lettergrade_set.all().select_related('member', 'member__person')
for g in gs:
grades[a.slug][g.member.person.userid] = g | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def all_grades_csv(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def grade_history(request, course_slug):
"""
Dump all GradeHistory for the offering to a CSV
"""
offering = get_object_or_404(CourseOffering, slug=course_slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'inline; filename="%s-history.csv"' % (course_slug,)
writer = csv.writer(response)
writer.writerow(['Date/Time', 'Activity', 'Student', 'Entered By', 'Numeric Grade', 'Letter Grade', 'Status', 'Group'])
grade_histories = GradeHistory.objects.filter(activity__offering=offering, status_change=False) \
.select_related('entered_by', 'activity', 'member__person', 'group')
for gh in grade_histories:
writer.writerow([
gh.timestamp,
gh.activity.short_name,
gh.member.person.userid_or_emplid(),
gh.entered_by.userid_or_emplid(),
gh.numeric_grade,
gh.letter_grade,
FLAGS.get(gh.grade_flag, None),
gh.group.slug if gh.group else None,
])
return response | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def class_list(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
members = Member.objects.filter(offering=course, role="STUD").select_related('person', 'offering') | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def has_photo_agreement(user):
configs = UserConfig.objects.filter(user=user, key='photo-agreement')
return bool(configs and configs[0].value['agree']) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def photo_list(request, course_slug, style='horiz'):
if style not in PHOTO_LIST_STYLES:
raise Http404
user = get_object_or_404(Person, userid=request.user.username)
if not has_photo_agreement(user):
url = reverse('config:photo_agreement') + '?return=' + urllib.parse.quote(request.path)
return ForbiddenResponse(request, mark_safe('You must <a href="%s">confirm the photo usage agreement</a> before seeing student photos.' % (url))) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def student_photo(request, emplid):
# confirm user's photo agreement
user = get_object_or_404(Person, userid=request.user.username)
can_access = False
if Role.objects_fresh.filter(person=user, role__in=['ADVS', 'ADVM']):
can_access = True
else:
if not has_photo_agreement(user):
url = reverse('config:photo_agreement') + '?return=' + urllib.parse.quote(request.path)
return ForbiddenResponse(request, mark_safe('You must <a href="%s">confirm the photo usage agreement</a> before seeing student photos.' % (url)))
# confirm user is an instructor of this student (within the last two years)
# TODO: cache past_semester to save the query?
past_semester = Semester.get_semester(datetime.date.today() - datetime.timedelta(days=730))
student_members = Member.objects.filter(offering__semester__name__gte=past_semester.name,
person__emplid=emplid, role='STUD').select_related('offering')
student_offerings = [m.offering for m in student_members]
instructor_of = Member.objects.filter(person=user, role='INST', offering__in=student_offerings)
can_access = (instructor_of.count() > 0)
if not can_access:
return ForbiddenResponse(request, 'You must be an instructor of this student.')
# get the photo
data, status = photo_for_view(emplid)
# return the photo
response = HttpResponse(data, content_type='image/jpeg')
response.status_code = status
response['Content-Disposition'] = 'inline; filename="%s.jpg"' % (emplid)
response['Cache-Control'] = 'private, max-age=300'
response.slow_okay = True
return response | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def new_message(request, course_slug):
offering = get_object_or_404(CourseOffering, slug=course_slug)
staff = get_object_or_404(Person, userid=request.user.username)
default_message = NewsItem(user=staff, author=staff, course=offering, source_app="dashboard")
if request.method =='POST':
form = MessageForm(data=request.POST, instance=default_message)
if form.is_valid()==True:
NewsItem.for_members(member_kwargs={'offering': offering}, newsitem_kwargs={
'author': staff, 'course': offering, 'source_app': 'dashboard',
'title': form.cleaned_data['title'], 'content': form.cleaned_data['content'],
'url': form.cleaned_data['url'], 'markup': form.cleaned_data['_markup']})
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("created a message for every student in %s") % (offering),
related_object=offering)
l.save()
messages.add_message(request, messages.SUCCESS, 'News item created.')
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': offering.slug}))
else:
form = MessageForm()
return render(request, "grades/new_message.html", {"form" : form,'course': offering}) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def student_search(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
if request.method == 'POST':
# find the student if we can and redirect to info page
form = StudentSearchForm(request.POST)
if not form.is_valid():
messages.add_message(request, messages.ERROR, 'Invalid search')
context = {'course': course, 'form': form}
return render(request, 'grades/student_search.html', context)
search = form.cleaned_data['search']
try:
int(search)
students = Member.objects.filter(offering=course, role="STUD").filter(Q(person__userid=search) | Q(person__emplid=search))
except ValueError:
students = Member.objects.filter(offering=course, role="STUD").filter(person__userid=search) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def student_info(request, course_slug, userid):
course = get_object_or_404(CourseOffering, slug=course_slug)
member = get_object_or_404(Member, ~Q(role='DROP'), find_member(userid), offering__slug=course_slug)
requestor = get_object_or_404(Member, ~Q(role='DROP'), person__userid=request.user.username, offering__slug=course_slug)
activities = all_activities_filter(offering=course) | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def export_all(request, course_slug):
"""
Export everything we can about this offering
"""
import io, tempfile, zipfile, os, json
from django.http import StreamingHttpResponse
from wsgiref.util import FileWrapper
from marking.views import _mark_export_data, _DecimalEncoder
from discuss.models import DiscussionTopic
course = get_object_or_404(CourseOffering, slug=course_slug)
handle, filename = tempfile.mkstemp('.zip')
os.close(handle)
z = zipfile.ZipFile(filename, 'w')
# add all grades CSV
allgrades = io.StringIO()
_all_grades_output(allgrades, course)
z.writestr("grades.csv", allgrades.getvalue())
allgrades.close() | sfu-fas/coursys | [
61,
17,
61,
39,
1407368110
] |
def to_representation(self, instance):
if not instance:
return
return "{}{}".format(SITE_URL, thumbnail_url(instance, "thumbnail_240")) | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_release_display(self, obj, **kwargs):
return obj.release.name if obj.release else None | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_assets(self, obj, **kwargs):
# TODO: propperly serialize assets
stream_url = reverse_lazy(
"mediaasset-format",
kwargs={"media_uuid": obj.uuid, "quality": "default", "encoding": "mp3"},
)
waveform_url = reverse_lazy(
"mediaasset-waveform", kwargs={"media_uuid": obj.uuid, "type": "w"}
)
assets = {
"stream": "{}{}".format(SITE_URL, stream_url),
"waveform": "{}{}".format(SITE_URL, waveform_url),
}
# TODO: check if this is a good idea...
# request asset generation for media
# print('request asset generation for {}'.format(obj))
# Format.objects.get_or_create_for_media(media=obj)
# Waveform.objects.get_or_create_for_media(media=obj, type=Waveform.WAVEFORM)
return assets | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_items(self, obj, **kwargs):
items = []
for media in obj.get_media():
serializer = MediaSerializer(
media, context={"request": self.context["request"]}
)
items.append({"content": serializer.data})
return items | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def to_representation(self, value):
"""
Serialize tagged objects to a simple textual representation.
"""
if isinstance(value, Media):
# return 'Media: {}'.format(value.pk)
serializer = MediaSerializer(
value, context={"request": self.context["request"]}
)
elif isinstance(value, Media):
return "Jingle: {}".format(value.pk)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_content(self, obj, **kwargs):
# TODO: implement for `Jingle`
if isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
elif isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_user(self, obj):
if not (obj.user and getattr(obj.user, "profile")):
return
return ProfileSerializer(obj.user.profile, context=self.context).data | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def get_dayparts(self, obj, **kwargs):
return [
{"day": dp.day, "start": dp.time_start, "end": dp.time_end}
for dp in obj.dayparts.active()
] | hzlf/openbroadcast.org | [
9,
2,
9,
44,
1413831364
] |
def __init__(self, developer_key, client_id, client_secret):
self.CLIENT_ID = client_id
self.CLIENT_SECRET = client_secret
self.DEVELOPER_KEY = developer_key | Spoken-tutorial/spoken-website | [
9,
44,
9,
83,
1393308162
] |
def setup_http_request_object(self):
self.headers = {
"GData-Version": "2",
"X-GData-Key": "key=%s" % self.DEVELOPER_KEY
}
self.http = self.credentials.authorize(httplib2.Http()) | Spoken-tutorial/spoken-website | [
9,
44,
9,
83,
1393308162
] |
def set_caption_language_title(self, language='', title=''):
self.CAPTIONS_LANGUAGE_CODE = language
self.CAPTIONS_TITLE = title | Spoken-tutorial/spoken-website | [
9,
44,
9,
83,
1393308162
] |
def readme():
""" open readme for long_description """
try:
with open('README.md') as fle:
return fle.read()
except IOError:
return '' | rh-marketingops/dwm | [
11,
4,
11,
9,
1471874052
] |
def _exception_detail(exc):
# this is what stdlib module traceback does
try:
return str(exc)
except:
return '<unprintable %s object>' % type(exc).__name__ | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def __init__(self, stream, descriptions, verbosity, config=None,
errorClasses=None):
if errorClasses is None:
errorClasses = {}
self.errorClasses = errorClasses
if config is None:
config = Config()
self.config = config
_TextTestResult.__init__(self, stream, descriptions, verbosity) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = _exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.stream.writeln('ERROR')
elif self.dots:
stream.write('E') | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def printSummary(self, start, stop):
"""Called by the test runner to print the final summary of test
run results.
"""
write = self.stream.write
writeln = self.stream.writeln
taken = float(stop - start)
run = self.testsRun
plural = run != 1 and "s" or "" | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def wasSuccessful(self):
"""Overrides to check that there are no errors in errorClasses
lists that are marked as errors and should cause a run to
fail.
"""
if self.errors or self.failures:
return False
for cls in self.errorClasses.keys():
storage, label, isfail = self.errorClasses[cls]
if not isfail:
continue
if storage:
return False
return True | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _exc_info_to_string(self, err, test=None):
# 2.3/2.4 -- 2.4 passes test, 2.3 does not
try:
return _TextTestResult._exc_info_to_string(self, err, test)
except TypeError:
# 2.3: does not take test arg
return _TextTestResult._exc_info_to_string(self, err) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def __init__(self):
"""
Constructor...
"""
self.platformName = "Streakgaming"
self.tags = ["social", "news", "gaming"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False | i3visio/osrframework | [
751,
229,
751,
66,
1419991203
] |
def lockfile() -> CoursierResolvedLockfile:
# Calculate transitive deps
transitive_ = {(i, k) for i, j in direct.items() for k in j}
while True:
old_len = len(transitive_)
transitive_ |= {(i, k) for i, j in transitive_ for k in direct[j]}
if old_len == len(transitive_):
break
transitive = DefaultDict(set)
for (i, j) in transitive_:
transitive[i].add(j)
entries = (
CoursierLockfileEntry(
coord=coord,
file_name=f"{coord.artifact}.jar",
direct_dependencies=Coordinates(direct[coord]),
dependencies=Coordinates(transitive[coord]),
file_digest=mock.Mock(),
)
for coord in direct
)
return CoursierResolvedLockfile(entries=tuple(entries)) | pantsbuild/pants | [
2553,
518,
2553,
833,
1355765944
] |
def test_filter_non_transitive_includes_direct_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord2, lockfile, False)
assert filtered == [coord2, coord3] | pantsbuild/pants | [
2553,
518,
2553,
833,
1355765944
] |
def vs_to_130(data):
data['attributes'].append({
'varname': 'bl_Vertex',
'type': gpu.CD_ORCO,
'datatype': gpu.GPU_DATA_4F
})
data['attributes'].append({
'varname': 'bl_Normal',
'type': -1,
'datatype': gpu.GPU_DATA_3F
})
data['uniforms'].append({
'varname': 'bl_ModelViewMatrix',
'type': 0,
'datatype': gpu.GPU_DATA_16F,
})
data['uniforms'].append({
'varname': 'bl_ProjectionMatrix',
'type': 0,
'datatype': gpu.GPU_DATA_16F,
})
data['uniforms'].append({
'varname': 'bl_NormalMatrix',
'type': 0,
'datatype': gpu.GPU_DATA_9F,
})
src = '#version 130\n'
src += 'in vec4 bl_Vertex;\n'
src += 'in vec3 bl_Normal;\n'
src += 'uniform mat4 bl_ModelViewMatrix;\n'
src += 'uniform mat4 bl_ProjectionMatrix;\n'
src += 'uniform mat3 bl_NormalMatrix;\n'
src += data['vertex']
src = re.sub(r'#ifdef USE_OPENSUBDIV([^#]*)#endif', '', src)
src = re.sub(r'#ifndef USE_OPENSUBDIV([^#]*)#endif', r'\1', src)
src = re.sub(r'#ifdef CLIP_WORKAROUND(.*?)#endif', '', src, 0, re.DOTALL)
src = re.sub(r'\bvarying\b', 'out', src)
src = re.sub(r'\bgl_(?!Position)(.*?)\b', r'bl_\1', src)
data['vertex'] = src | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def vs_to_web(data):
src = data['vertex']
precision_block = '\n'
for data_type in ('float', 'int'):
precision_block += 'precision mediump {};\n'.format(data_type)
src = src.replace('#version 130', '#version 100\n' + precision_block)
src = re.sub(r'\bin\b', 'attribute', src)
src = re.sub(r'\bout\b', 'varying', src)
data['vertex'] = src | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def to_130(data):
vs_to_130(data)
fs_to_130(data) | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def export_material(self, state, material):
shader_data = gpu.export_shader(bpy.context.scene, material)
if state['settings']['asset_profile'] == 'DESKTOP':
to_130(shader_data)
else:
to_web(shader_data)
if self.settings.embed_shaders is True:
fs_bytes = shader_data['fragment'].encode()
fs_uri = 'data:text/plain;base64,' + base64.b64encode(fs_bytes).decode('ascii')
vs_bytes = shader_data['vertex'].encode()
vs_uri = 'data:text/plain;base64,' + base64.b64encode(vs_bytes).decode('ascii')
else:
names = [
bpy.path.clean_name(name) + '.glsl'
for name in (material.name+'VS', material.name+'FS')
]
data = (shader_data['vertex'], shader_data['fragment'])
for name, data in zip(names, data):
filename = os.path.join(state['settings']['gltf_output_dir'], name)
with open(filename, 'w') as fout:
fout.write(data)
vs_uri, fs_uri = names
state['output']['shaders'].append({
'type': 35632,
'uri': fs_uri,
'name': material.name + 'FS',
})
state['output']['shaders'].append({
'type': 35633,
'uri': vs_uri,
'name': material.name + 'VS',
})
# Handle programs
state['output']['programs'].append({
'attributes': [a['varname'] for a in shader_data['attributes']],
'fragmentShader': 'shaders_{}FS'.format(material.name),
'vertexShader': 'shaders_{}VS'.format(material.name),
'name': material.name,
})
# Handle parameters/values
values = {}
parameters = {}
for attribute in shader_data['attributes']:
name = attribute['varname']
semantic = TYPE_TO_SEMANTIC[attribute['type']]
_type = DATATYPE_TO_GLTF_TYPE[attribute['datatype']]
parameters[name] = {'semantic': semantic, 'type': _type}
for uniform in shader_data['uniforms']:
valname = TYPE_TO_NAME.get(uniform['type'], uniform['varname'])
rnaname = valname
semantic = None
node = None
value = None
if uniform['varname'] == 'bl_ModelViewMatrix':
semantic = 'MODELVIEW'
elif uniform['varname'] == 'bl_ProjectionMatrix':
semantic = 'PROJECTION'
elif uniform['varname'] == 'bl_NormalMatrix':
semantic = 'MODELVIEWINVERSETRANSPOSE'
else:
if uniform['type'] in LAMP_TYPES:
node = uniform['lamp'].name
valname = node + '_' + valname
semantic = TYPE_TO_SEMANTIC.get(uniform['type'], None)
if not semantic:
lamp_obj = bpy.data.objects[node]
value = getattr(lamp_obj.data, rnaname)
elif uniform['type'] in MIST_TYPES:
valname = 'mist_' + valname
mist_settings = bpy.context.scene.world.mist_settings
if valname == 'mist_color':
value = bpy.context.scene.world.horizon_color
else:
value = getattr(mist_settings, rnaname)
if valname == 'mist_falloff':
if value == 'QUADRATIC':
value = 0.0
elif value == 'LINEAR':
value = 1.0
else:
value = 2.0
elif uniform['type'] in WORLD_TYPES:
world = bpy.context.scene.world
value = getattr(world, rnaname)
elif uniform['type'] in MATERIAL_TYPES:
converter = DATATYPE_TO_CONVERTER[uniform['datatype']]
value = converter(getattr(material, rnaname))
values[valname] = value
elif uniform['type'] == gpu.GPU_DYNAMIC_SAMPLER_2DIMAGE:
texture_slots = [
slot for slot in material.texture_slots
if slot and slot.texture.type == 'IMAGE'
]
for slot in texture_slots:
if slot.texture.image.name == uniform['image'].name:
value = 'texture_' + slot.texture.name
values[uniform['varname']] = value
else:
print('Unconverted uniform:', uniform)
parameter = {}
if semantic:
parameter['semantic'] = semantic
if node:
parameter['node'] = 'node_' + node
elif value:
parameter['value'] = DATATYPE_TO_CONVERTER[uniform['datatype']](value)
else:
parameter['value'] = None
if uniform['type'] == gpu.GPU_DYNAMIC_SAMPLER_2DIMAGE:
parameter['type'] = 35678 # SAMPLER_2D
else:
parameter['type'] = DATATYPE_TO_GLTF_TYPE[uniform['datatype']]
parameters[valname] = parameter
uniform['valname'] = valname
# Handle techniques
tech_name = 'techniques_' + material.name
state['output']['techniques'].append({
'parameters': parameters,
'program': 'programs_' + material.name,
'attributes': {a['varname']: a['varname'] for a in shader_data['attributes']},
'uniforms': {u['varname']: u['valname'] for u in shader_data['uniforms']},
'name': material.name,
})
return {'technique': tech_name, 'values': values, 'name': material.name} | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def fit(funct, data, err=None, **kwargs):
"""Generic function fitter.
Fit data to a given function.
TODO
----
* Dictionary support for funct to submit user data..
Parameters
----------
funct: callable
Function with the first argmument as data space, e.g., x, t, f, Nr. ..
Any following arguments are the parameters to be fit.
Except if a verbose flag if used.
data: iterable (float)
Data values
err: iterable (float) [None]
Data error values in %/100. Default is 1% if None are given.
Other Parameters
----------------
*dataSpace*: iterable
Keyword argument of the data space of len(data).
The name need to fit the first argument of funct.
Returns
-------
model: array
Fitted model parameter.
response: array
Model response.
Example
-------
>>> import pygimli as pg
>>>
>>> func = lambda t, a, b: a*np.exp(b*t)
>>> t = np.linspace(1, 2, 20)
>>> data = func(t, 1.1, 2.2)
>>> model, response = pg.frameworks.fit(func, data, t=t)
>>> print(pg.core.round(model, 1e-5))
2 [1.1, 2.2]
>>> _ = pg.plt.plot(t, data, 'o', label='data')
>>> _ = pg.plt.plot(t, response, label='response')
>>> _ = pg.plt.legend()
"""
mgr = ParameterInversionManager(funct, **kwargs)
model = mgr.invert(data, err, **kwargs)
return model, mgr.fw.response | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, fop=None, fw=None, data=None, **kwargs):
"""Constructor."""
self._fop = fop
self._fw = fw
# we hold our own copy of the data
self._verbose = kwargs.pop('verbose', False)
self._debug = kwargs.pop('debug', False)
self.data = None
if data is not None:
if isinstance(data, str):
self.load(data)
else:
self.data = data
# The inversion framework
self._initInversionFramework(verbose=self._verbose,
debug=self._debug)
# The forward operator is stored in self._fw
self._initForwardOperator(verbose=self._verbose, **kwargs)
# maybe obsolete
self.figs = {}
self.errIsAbsolute = False | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def verbose(self):
return self._verbose | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def verbose(self, v):
self._verbose = v
self.fw.verbose = self._verbose | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def debug(self):
return self._debug | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def debug(self, v):
self._debug = v
self.fw.debug = self._debug | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def fw(self):
return self._fw | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def fop(self):
return self.fw.fop | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def inv(self):
return self.fw | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def model(self):
return self.fw.model | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def _initForwardOperator(self, **kwargs):
"""Initialize or re-initialize the forward operator.
Called once in the constructor to force the manager to create the
necessary forward operator member. Can be recalled if you need to
changed the mangers own forward operator object. If you want an own
instance of a valid FOP call createForwardOperator.
"""
if self._fop is not None:
fop = self._fop
else:
fop = self.createForwardOperator(**kwargs)
if fop is None:
pg.critical("It seems that createForwardOperator method "
"does not return a valid forward operator.")
if self.fw is not None:
self.fw.reset()
self.fw.setForwardOperator(fop)
else:
pg.critical("No inversion framework defined.") | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def _initInversionFramework(self, **kwargs):
"""Initialize or re-initialize the inversion framework.
Called once in the constructor to force the manager to create the
necessary Framework instance.
"""
self._fw = self.createInversionFramework(**kwargs)
if self.fw is None:
pg.critical("createInversionFramework does not return "
"valid inversion framework.") | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def load(self, fileName):
"""API, overwrite in derived classes."""
pg.critical('API, overwrite in derived classes', fileName) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def simulate(self, model, **kwargs):
# """Run a simulation aka the forward task."""
ra = self.fop.response(par=model)
noiseLevel = kwargs.pop('noiseLevel', 0.0)
if noiseLevel > 0:
err = self.estimateError(ra, errLevel=noiseLevel)
ra *= 1. + pg.randn(ra.size(), seed=kwargs.pop('seed', None)) * err
return ra, err
return ra | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def applyData(self, data):
""" """
self.fop.data = data | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def _ensureData(self, data):
"""Check data validity"""
if data is None:
data = self.fw.dataVals
vals = self.checkData(data)
if vals is None:
pg.critical("There are no data values.")
if abs(min(vals)) < 1e-12:
print(min(vals), max(vals))
pg.critical("There are zero data values.")
return vals | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def _ensureError(self, err, dataVals=None):
"""Check error validity"""
if err is None:
err = self.fw.errorVals
vals = self.checkError(err, dataVals)
if vals is None:
pg.warn('No data error given, set Fallback set to 1%')
vals = np.ones(len(dataVals)) * 0.01
try:
if min(vals) <= 0:
pg.critical("All error values need to be larger then 0. Either"
" give and err argument or fill dataContainer "
" with a valid 'err' ", min(vals), max(vals))
except ValueError:
pg.critical("Can't estimate data error")
return vals | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def postRun(self, *args, **kwargs):
"""Called just after the inversion run."""
pass | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def showModel(self, model, ax=None, **kwargs):
"""Show a model.
Draw model into a given axes or show inversion result from last run.
Forwards on default to the self.fop.drawModel function
of the modelling operator.
If there is no function given, you have to override this method.
Parameters
----------
ax : mpl axes
Axes object to draw into. Create a new if its not given.
model : iterable
Model data to be draw.
Returns
-------
ax, cbar
"""
if ax is None:
fig, ax = pg.plt.subplots()
ax, cBar = self.fop.drawModel(ax, model, **kwargs)
return ax, cBar | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def showResult(self, model=None, ax=None, **kwargs):
"""Show the last inversion result.
TODO
----
DRY: decide showModel or showResult
Parameters
----------
ax : mpl axes
Axes object to draw into. Create a new if its not given.
model : iterable [None]
Model values to be draw. Default is self.model from the last run
Returns
-------
ax, cbar
"""
if model is None:
model = self.model
return self.showModel(model, ax=ax, **kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def showResultAndFit(self, **kwargs):
"""Calls showResults and showFit."""
fig = pg.plt.figure()
ax = fig.add_subplot(1, 2, 1)
self.showResult(ax=ax, model=self.model, **kwargs)
ax1 = fig.add_subplot(2, 2, 2)
ax2 = fig.add_subplot(2, 2, 4)
self.showFit(axs=[ax1, ax2], **kwargs)
fig.tight_layout()
return fig | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def createArgParser(dataSuffix='dat'):
"""Create default argument parser.
TODO move this to some kind of app class
Create default argument parser for the following options:
-Q, --quiet
-R, --robustData: options.robustData
-B, --blockyModel: options.blockyModel
-l, --lambda: options.lam
-i, --maxIter: options.maxIter
--depth: options.depth
"""
import argparse
parser = argparse.ArgumentParser(
description="usage: %prog [options] *." + dataSuffix)
parser.add_argument("-Q", "--quiet", dest="quiet",
action="store_true", default=False,
help="Be verbose.") | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, funct=None, fop=None, **kwargs):
"""Constructor."""
if fop is not None:
if not isinstance(fop, pg.frameworks.ParameterModelling):
pg.critical("We need a fop if type ",
pg.frameworks.ParameterModelling)
elif funct is not None:
fop = pg.frameworks.ParameterModelling(funct)
else:
pg.critical("you should either give a valid fop or a function so "
"I can create the fop for you")
super(ParameterInversionManager, self).__init__(fop, **kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def invert(self, data=None, err=None, **kwargs):
"""
Parameters
----------
limits: {str: [min, max]}
Set limits for parameter by parameter name.
startModel: {str: startModel}
Set the start value for parameter by parameter name.
"""
dataSpace = kwargs.pop(self.fop.dataSpaceName, None)
if dataSpace is not None:
self.fop.dataSpace = dataSpace
limits = kwargs.pop('limits', {})
for k, v in limits.items():
self.fop.setRegionProperties(k, limits=v)
startModel = kwargs.pop('startModel', {})
if isinstance(startModel, dict):
for k, v in startModel.items():
self.fop.setRegionProperties(k, startModel=v)
else:
kwargs['startModel'] = startModel
return super(ParameterInversionManager, self).invert(data=data,
err=err,
**kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, fop=None, **kwargs):
"""Constructor."""
super(MethodManager1d, self).__init__(fop, **kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def invert(self, data=None, err=None, **kwargs):
""" """
return super(MethodManager1d, self).invert(data=data, err=err,
**kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, **kwargs):
"""Constructor.
Attribute
---------
mesh: pg.Mesh
Copy of the main mesh to be distributed to inversion and the fop.
You can overwrite it with invert(mesh=mesh).
"""
super(MeshMethodManager, self).__init__(**kwargs)
self.mesh = None | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def paraDomain(self):
return self.fop.paraDomain | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def createMesh(self, data=None, **kwargs):
"""API, implement in derived classes."""
pg.critical('no default mesh generation defined .. implement in '
'derived class') | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def applyMesh(self, mesh, ignoreRegionManager=False, **kwargs):
""" """
if ignoreRegionManager:
mesh = self.fop.createRefinedFwdMesh(mesh, **kwargs)
self.fop.setMesh(mesh, ignoreRegionManager=ignoreRegionManager) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def showFit(self, axs=None, **kwargs):
"""Show data and the inversion result model response."""
orientation = 'vertical'
if axs is None:
fig, axs = pg.plt.subplots(nrows=1, ncols=2)
orientation = 'horizontal'
self.showData(data=self.inv.dataVals,
orientation=orientation,
ax=axs[0], **kwargs)
axs[0].text(0.0, 1.03, "Data",
transform=axs[0].transAxes,
horizontalalignment='left',
verticalalignment='center')
resp = None
data = None
if 'model' in kwargs:
resp = self.fop.response(kwargs['model'])
data = self._ensureData(self.fop.data)
else:
resp = self.inv.response
data = self.fw.dataVals
self.showData(data=resp,
orientation=orientation,
ax=axs[1], **kwargs)
axs[1].text(0.0, 1.03, "Response",
transform=axs[1].transAxes,
horizontalalignment='left',
verticalalignment='center')
fittext = r"rrms: {0}%, $\chi^2$: {1}".format(
pg.pf(pg.utils.rrms(data, resp)*100),
pg.pf(self.fw.chi2History[-1]))
axs[1].text(1.0, 1.03, fittext,
transform=axs[1].transAxes,
horizontalalignment='right',
verticalalignment='center')
# if not kwargs.pop('hideFittingAnnotation', False):
# axs[0].text(0.01, 1.0025, "rrms: {0}, $\chi^2$: {1}"
# .format(pg.utils.prettyFloat(self.fw.inv.relrms()),
# pg.utils.prettyFloat(self.fw.inv.chi2())),
# transform=axs[0].transAxes,
# horizontalalignment='left',
# verticalalignment='bottom')
return axs | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def standardizedCoverage(self, threshhold=0.01):
"""Return standardized coverage vector (0|1) using thresholding.
"""
return 1.0*(abs(self.coverage()) > threshhold) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, petro, mgr=None, **kwargs):
"""Initialize instance with manager and petrophysical relation."""
petrofop = kwargs.pop('petrofop', None)
if petrofop is None:
fop = kwargs.pop('fop', None)
if fop is None and mgr is not None:
# Check! why I can't use mgr.fop
# fop = mgr.fop
fop = mgr.createForwardOperator()
self.checkData = mgr.checkData
self.checkError = mgr.checkError
if fop is not None:
if not isinstance(fop, pg.frameworks.PetroModelling):
petrofop = pg.frameworks.PetroModelling(fop, petro)
if petrofop is None:
print(mgr)
print(fop)
pg.critical('implement me')
super().__init__(fop=petrofop, **kwargs) | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def __init__(self, petros, mgrs):
"""Initialize with lists of managers and transformations"""
self.mgrs = mgrs
self.fops = [pg.frameworks.PetroModelling(m.fop, p)
for p, m in zip(petros, mgrs)]
super().__init__(fop=pg.frameworks.JointModelling(self.fops))
# just hold a local copy
self.dataTrans = pg.trans.TransCumulative() | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def checkData(self, data):
"""Collect data values."""
if len(data) != len(self.mgrs):
pg.critical("Please provide data for all managers")
self.dataTrans.clear()
vals = pg.Vector(0)
for i, mgr in enumerate(self.mgrs):
self.dataTrans.add(mgr.inv.dataTrans, data[i].size())
vals = pg.cat(vals, mgr.checkData(data[i]))
self.inv.dataTrans = self.dataTrans
return vals | gimli-org/gimli | [
257,
109,
257,
13,
1378329047
] |
def connect(server=None, url=None, ip=None, port=None, https=None, verify_ssl_certificates=None, auth=None,
proxy=None, cookies=None, verbose=True, config=None):
"""
Connect to an existing H2O server, remote or local.
There are two ways to connect to a server: either pass a `server` parameter containing an instance of
an H2OLocalServer, or specify `ip` and `port` of the server that you want to connect to.
:param server: An H2OLocalServer instance to connect to (optional).
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param https: Set to True to connect via https:// instead of http://.
:param verify_ssl_certificates: When using https, setting this to False will disable SSL certificates verification.
:param auth: Either a (username, password) pair for basic authentication, an instance of h2o.auth.SpnegoAuth
or one of the requests.auth authenticator objects.
:param proxy: Proxy server address.
:param cookies: Cookie (or list of) to add to request
:param verbose: Set to False to disable printing connection status messages.
:param connection_conf: Connection configuration object encapsulating connection parameters.
:returns: the new :class:`H2OConnection` object.
"""
global h2oconn
if config:
if "connect_params" in config:
h2oconn = _connect_with_conf(config["connect_params"])
else:
h2oconn = _connect_with_conf(config)
else:
h2oconn = H2OConnection.open(server=server, url=url, ip=ip, port=port, https=https,
auth=auth, verify_ssl_certificates=verify_ssl_certificates,
proxy=proxy, cookies=cookies,
verbose=verbose)
if verbose:
h2oconn.cluster.show_status()
return h2oconn | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def connection():
"""Return the current :class:`H2OConnection` handler."""
return h2oconn | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: cloud name. If None while connecting to an existing cluster it will not check the cloud name.
If set then will connect only if the target cloud name matches. If no instance is found and decides to start a local
one then this will be used as the cloud name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status() | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def _import_multi(paths, pattern):
assert_is_type(paths, [str])
assert_is_type(pattern, str, None)
j = api("POST /3/ImportFilesMulti", {"paths": paths, "pattern": pattern})
if j["fails"]: raise ValueError("ImportFiles of '" + ".".join(paths) + "' failed on " + str(j["fails"]))
return j["destination_frames"] | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def import_file(path=None, destination_frame=None, parse=True, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, pattern=None, skipped_columns=None):
"""
Import a dataset that is already on the cluster.
The path to the data must be a valid path for each node in the H2O cluster. If some node in the H2O cluster
cannot see the file, then an exception will be thrown by the H2O cluster. Does a parallel/distributed
multi-threaded pull of the data. The main difference between this method and :func:`upload_file` is that
the latter works with local files, whereas this method imports remote files (i.e. files local to the server).
If you running H2O server on your own maching, then both methods behave the same.
:param path: path(s) specifying the location of the data to import or a path to a directory of files to import
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will be
automatically generated.
:param parse: If True, the file should be parsed after import. If False, then a list is returned containing the file path.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param pattern: Character string containing a regular expression to match file(s) in the folder if `path` is a
directory.
:param skipped_columns: an integer list of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> # Single file import
>>> iris = import_file("h2o-3/smalldata/iris.csv")
>>> # Return all files in the folder iris/ matching the regex r"iris_.*\.csv"
>>> iris_pattern = h2o.import_file(path = "h2o-3/smalldata/iris",
... pattern = "iris_.*\.csv")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str, [str])
assert_is_type(pattern, str, None)
assert_is_type(destination_frame, str, None)
assert_is_type(parse, bool)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert isinstance(skipped_columns, (type(None), list)), "The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
patharr = path if isinstance(path, list) else [path]
if any(os.path.split(p)[0] == "~" for p in patharr):
raise H2OValueError("Paths relative to a current user (~) are not valid in the server environment. "
"Please use absolute paths if possible.")
if not parse:
return lazy_import(path, pattern)
else:
return H2OFrame()._import_parse(path, pattern, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def import_sql_select(connection_url, select_query, username, password, optimize=True, fetch_mode=None):
"""
Import the SQL table that is the result of the specified SQL query to H2OFrame in memory.
Creates a temporary SQL table from the specified sql_query.
Runs multiple SELECT SQL queries on the temporary table concurrently for parallel ingestion, then drops the table.
Be sure to start the h2o.jar in the terminal with your downloaded JDBC driver in the classpath::
java -cp <path_to_h2o_jar>:<path_to_jdbc_driver_jar> water.H2OApp
Also see h2o.import_sql_table. Currently supported SQL databases are MySQL, PostgreSQL, and MariaDB. Support
for Oracle 12g and Microsoft SQL Server is forthcoming.
:param connection_url: URL of the SQL database connection as specified by the Java Database Connectivity (JDBC)
Driver. For example, "jdbc:mysql://localhost:3306/menagerie?&useSSL=false"
:param select_query: SQL query starting with `SELECT` that returns rows from one or more database tables.
:param username: username for SQL server
:param password: password for SQL server
:param optimize: DEPRECATED. Ignored - use fetch_mode instead. Optimize import of SQL table for faster imports.
:param fetch_mode: Set to DISTRIBUTED to enable distributed import. Set to SINGLE to force a sequential read by a single node
from the database.
:returns: an :class:`H2OFrame` containing data of the specified SQL query.
:examples:
>>> conn_url = "jdbc:mysql://172.16.2.178:3306/ingestSQL?&useSSL=false"
>>> select_query = "SELECT bikeid from citibike20k"
>>> username = "root"
>>> password = "abc123"
>>> my_citibike_data = h2o.import_sql_select(conn_url, select_query,
... username, password, fetch_mode)
"""
assert_is_type(connection_url, str)
assert_is_type(select_query, str)
assert_is_type(username, str)
assert_is_type(password, str)
assert_is_type(optimize, bool)
assert_is_type(fetch_mode, str, None)
p = {"connection_url": connection_url, "select_query": select_query, "username": username, "password": password,
"fetch_mode": fetch_mode}
j = H2OJob(api("POST /99/ImportSQLTable", data=p), "Import SQL Table").poll()
return get_frame(j.dest_key) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def parse_raw(setup, id=None, first_line_is_header=0):
"""
Parse dataset using the parse setup structure.
:param setup: Result of ``h2o.parse_setup()``
:param id: an id for the frame.
:param first_line_is_header: -1, 0, 1 if the first line is to be used as the header
:returns: an :class:`H2OFrame` object.
"""
assert_is_type(setup, dict)
assert_is_type(id, str, None)
assert_is_type(first_line_is_header, -1, 0, 1)
check_frame_id(id)
if id:
setup["destination_frame"] = id
if first_line_is_header != (-1, 0, 1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def deep_copy(data, xid):
"""
Create a deep clone of the frame ``data``.
:param data: an H2OFrame to be cloned
:param xid: (internal) id to be assigned to the new frame.
:returns: new :class:`H2OFrame` which is the clone of the passed frame.
"""
assert_is_type(data, H2OFrame)
assert_is_type(xid, str)
assert_satisfies(xid, xid != data.frame_id)
check_frame_id(xid)
duplicate = data.apply(lambda x: x)
duplicate._ex = ExprNode("assign", xid, duplicate)._eval_driver(False)
duplicate._ex._cache._id = xid
duplicate._ex._children = None
return duplicate | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def get_grid(grid_id):
"""
Return the specified grid.
:param grid_id: The grid identification in h2o
:returns: an :class:`H2OGridSearch` instance.
"""
assert_is_type(grid_id, str)
grid_json = api("GET /99/Grids/%s" % grid_id)
models = [get_model(key["name"]) for key in grid_json["model_ids"]]
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
first_model_json = api("GET /3/Models/%s" % grid_json["model_ids"][0]["name"])["models"][0]
gs = H2OGridSearch(None, {}, grid_id)
gs._resolve_grid(grid_id, grid_json, first_model_json)
gs.models = models
hyper_params = {param: set() for param in gs.hyper_names}
for param in gs.hyper_names:
for model in models:
if isinstance(model.full_parameters[param]["actual_value"], list):
hyper_params[param].add(model.full_parameters[param]["actual_value"][0])
else:
hyper_params[param].add(model.full_parameters[param]["actual_value"])
hyper_params = {str(param): list(vals) for param, vals in hyper_params.items()}
gs.hyper_params = hyper_params
gs.model = model.__class__()
return gs | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def no_progress():
"""
Disable the progress bar from flushing to stdout.
The completed progress bar is printed when a job is complete so as to demarcate a log file.
"""
H2OJob.__PROGRESS_BAR__ = False | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def enable_expr_optimizations(flag):
"""Enable expression tree local optimizations."""
ExprNode.__ENABLE_EXPR_OPTIMIZATIONS__ = flag | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def log_and_echo(message=""):
"""
Log a message on the server-side logs.
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
:param message: message to write to the log.
"""
assert_is_type(message, str)
api("POST /3/LogAndEcho", data={"message": str(message)}) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def remove_all():
"""Remove all objects from H2O."""
api("DELETE /3/DKV") | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def ls():
"""List keys on an H2O Cluster."""
return H2OFrame._expr(expr=ExprNode("ls")).as_data_frame(use_pandas=True) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def frames():
"""
Retrieve all the Frames.
:returns: Meta information on the frames
"""
return api("GET /3/Frames") | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make sure you have enough
hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename: name for the CSV file where the data should be saved to.
"""
assert_is_type(data, H2OFrame)
assert_is_type(filename, str)
url = h2oconn.make_url("DownloadDataset", 3) + "?frame_id={}&hex_string=false".format(data.frame_id)
with open(filename, "wb") as f:
f.write(urlopen()(url).read()) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def save_model(model, path="", force=False):
"""
Save an H2O Model object to disk. (Note that ensemble binary models can now be saved using this method.)
:param model: The model object to save.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:returns: the path of the saved model
:examples:
>>> path = h2o.save_model(my_model, dir=my_path)
"""
assert_is_type(model, ModelBase)
assert_is_type(path, str)
assert_is_type(force, bool)
path = os.path.join(os.getcwd() if path == "" else path, model.model_id)
return api("GET /99/Models.bin/%s" % model.model_id, data={"dir": path, "force": force})["dir"] | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def export_file(frame, path, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param frame: the Frame to save to disk.
:param path: the path to the save point on disk.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
"""
assert_is_type(frame, H2OFrame)
assert_is_type(path, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (frame.frame_id), data={"path": path, "num_parts": parts, "force": force}),
"Export File").poll() | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
def create_frame(frame_id=None, rows=10000, cols=10, randomize=True,
real_fraction=None, categorical_fraction=None, integer_fraction=None,
binary_fraction=None, time_fraction=None, string_fraction=None,
value=0, real_range=100, factors=100, integer_range=100,
binary_ones_fraction=0.02, missing_fraction=0.01,
has_response=False, response_factors=2, positive_response=False,
seed=None, seed_for_column_types=None):
"""
Create a new frame with random data.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param frame_id: the destination key. If empty, this will be auto-generated.
:param rows: the number of rows of data to generate.
:param cols: the number of columns of data to generate. Excludes the response column if has_response is True.
:param randomize: If True, data values will be randomly generated. This must be True if either
categorical_fraction or integer_fraction is non-zero.
:param value: if randomize is False, then all real-valued entries will be set to this value.
:param real_range: the range of randomly generated real values.
:param real_fraction: the fraction of columns that are real-valued.
:param categorical_fraction: the fraction of total columns that are categorical.
:param factors: the number of (unique) factor levels in each categorical column.
:param integer_fraction: the fraction of total columns that are integer-valued.
:param integer_range: the range of randomly generated integer values.
:param binary_fraction: the fraction of total columns that are binary-valued.
:param binary_ones_fraction: the fraction of values in a binary column that are set to 1.
:param time_fraction: the fraction of randomly created date/time columns.
:param string_fraction: the fraction of randomly created string columns.
:param missing_fraction: the fraction of total entries in the data frame that are set to NA.
:param has_response: A logical value indicating whether an additional response column should be prepended to the
final H2O data frame. If set to True, the total number of columns will be ``cols + 1``.
:param response_factors: if has_response is True, then this variable controls the type of the "response" column:
setting response_factors to 1 will generate real-valued response, any value greater or equal than 2 will
create categorical response with that many categories.
:param positive_reponse: when response variable is present and of real type, this will control whether it
contains positive values only, or both positive and negative.
:param seed: a seed used to generate random values when ``randomize`` is True.
:param seed_for_column_types: a seed used to generate random column types when ``randomize`` is True.
:returns: an :class:`H2OFrame` object
"""
t_fraction = U(None, BoundNumeric(0, 1))
assert_is_type(frame_id, str, None)
assert_is_type(rows, BoundInt(1))
assert_is_type(cols, BoundInt(1))
assert_is_type(randomize, bool)
assert_is_type(value, numeric)
assert_is_type(real_range, BoundNumeric(0))
assert_is_type(real_fraction, t_fraction)
assert_is_type(categorical_fraction, t_fraction)
assert_is_type(integer_fraction, t_fraction)
assert_is_type(binary_fraction, t_fraction)
assert_is_type(time_fraction, t_fraction)
assert_is_type(string_fraction, t_fraction)
assert_is_type(missing_fraction, t_fraction)
assert_is_type(binary_ones_fraction, t_fraction)
assert_is_type(factors, BoundInt(1))
assert_is_type(integer_range, BoundInt(1))
assert_is_type(has_response, bool)
assert_is_type(response_factors, None, BoundInt(1))
assert_is_type(positive_response, bool)
assert_is_type(seed, int, None)
assert_is_type(seed_for_column_types, int, None)
check_frame_id(frame_id)
if randomize and value:
raise H2OValueError("Cannot set data to a `value` if `randomize` is true")
if (categorical_fraction or integer_fraction) and not randomize:
raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.")
# The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect
# all explicitly set fractions, and will auto-select the remaining fractions.
frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction]
wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0]
sum_explicit_fractions = sum(0 if f is None else f for f in frcs)
count_explicit_fractions = sum(0 if f is None else 1 for f in frcs)
remainder = 1 - sum_explicit_fractions
if sum_explicit_fractions >= 1 + 1e-10:
raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up "
"to a number less than 1.")
elif sum_explicit_fractions >= 1 - 1e-10:
# The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny
# remainder into the real_fraction column).
pass
else:
# sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly
if count_explicit_fractions == 6:
raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a "
"number less than 1.")
# Each column type receives a certain part (proportional to column's "weight") of the remaining fraction.
sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6))
for i, f in enumerate(frcs):
if frcs[i] is not None: continue
if sum_implicit_weights == 0:
frcs[i] = remainder
else:
frcs[i] = remainder * wgts[i] / sum_implicit_weights
remainder -= frcs[i]
sum_implicit_weights -= wgts[i]
for i, f in enumerate(frcs):
if f is None:
frcs[i] = 0
real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs
parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id),
"rows": rows,
"cols": cols,
"randomize": randomize,
"categorical_fraction": categorical_fraction,
"integer_fraction": integer_fraction,
"binary_fraction": binary_fraction,
"time_fraction": time_fraction,
"string_fraction": string_fraction,
# "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions)
"value": value,
"real_range": real_range,
"factors": factors,
"integer_range": integer_range,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"has_response": has_response,
"response_factors": response_factors,
"positive_response": positive_response,
"seed": -1 if seed is None else seed,
"seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types,
}
H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll()
return get_frame(parms["dest"]) | h2oai/h2o-dev | [
6169,
1943,
6169,
208,
1393862887
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.