code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
name: Create Release
on:
workflow_dispatch:
inputs:
tags:
description: 'Version'
required: true
default: 'v'
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
version:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.get_version.outputs.VERSION }}
steps:
- name: Explode GITHUB_REF
id: github_ref
run: |
echo ::set-output name=PUSH_TAG::${GITHUB_REF/refs\/tags\//}
- name: Echo GITHUB_REF parts
env:
PUSH_TAG: ${{ steps.github_ref.outputs.PUSH_TAG }}
run: |
echo $PUSH_TAG
- name: Set release name based on originating event
id: get_version
env:
EVENT_NAME: ${{ github.event_name }}
INPUT_VERSION: ${{ github.event.inputs.tags }}
PUSH_TAG: ${{ steps.github_ref.outputs.PUSH_TAG }}
run: |
if [[ "$EVENT_NAME" == "workflow_dispatch" ]]; then output="$INPUT_VERSION"; else output="$PUSH_TAG"; fi
echo ::set-output name=VERSION::$output
- name: Echo release name
run: echo "release name evaluated to '${{ steps.get_version.outputs.VERSION }}'"
release:
needs: version
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Echo release.dat
run: echo "Writing 'version = ${{ needs.version.outputs.version }}' to release.dat"
- name: Update release.dat
uses: "finnp/create-file-action@master"
env:
FILE_NAME: "release.dat"
FILE_DATA: "version = ${{ needs.version.outputs.version }}"
- name: Compile LaTeX document
uses: xu-cheng/latex-action@v2
with:
root_file: cv.tex
latexmk_use_lualatex: true
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ needs.version.outputs.version }}
release_name: Release ${{ needs.version.outputs.version }}
draft: false
prerelease: true
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./cv.pdf
asset_name: cv.pdf
asset_content_type: application/pdf
- name: Trigger workflow 'Publish PDF release to website'
uses: mvasigh/dispatch-action@main
if: ${{ github.event_name=='push' }}
with:
token: ${{ secrets.PERSONAL_TOKEN_REPO_WORKFLOW }}
event_type: dispatch_publish
|
.github/workflows/release.yml
|
postgres_installed_locally: false
postgres_global_config:
- option: listen_addresses
value: "*"
- option: port
value: "5432"
- option: max_connections
value: "100"
- option: unix_socket_directories
value: '/var/run/postgresql'
- option: ssl
value: "true"
- option: shared_buffers
value: "24MB"
- option: "log_line_prefix"
value: '%t '
- option: datestyle
value: 'iso, mdy'
- option: lc_messages
value: 'en_US.utf8'
- option: lc_monetary
value: 'en_US.utf8'
- option: lc_numeric
value: 'en_US.utf8'
- option: lc_time
value: 'en_US.utf8'
- option: default_text_search_config
value: 'pg_catalog.english'
postgres_hba_entries:
- {type: local, database: all, user: postgres, auth_method: peer}
- {type: local, database: all, user: all, auth_method: peer}
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
- {type: host, database: ckan_default, user: ckan_default, address: "{{ postgres_trusted_ip_range }}", auth_method: md5}
- {type: host, database: datastore_default, user: ckan_default, address: "{{ postgres_trusted_ip_range }}", auth_method: md5}
- {type: host, database: datastore_default, user: datastore_default, address: "{{ postgres_trusted_ip_range }}", auth_method: md5}
- {type: host, database: drupal8, user: drupal8, address: "{{ postgres_trusted_ip_range }}", auth_method: md5}
- {type: host, database: ckan_test, user: ckan_test, address: "{{ postgres_trusted_ip_range }}", auth_method: md5}
postgres_users:
- username: ckan_default
password: <PASSWORD>
- username: datastore_default
password: <PASSWORD>
- username: drupal8
password: <PASSWORD>
postgres_databases:
- name: ckan_default # ckan
owner: ckan_default
- name: datastore_default # ckan_datastore
owner: ckan_default
- name: drupal8
owner: drupal8
postgres_spatial_databases:
- name: ckan_default
owner: ckan_default
|
ansible/inventories/group_vars/all/postgres.yml
|
title: |-
Deploy to Huawei App Gallery
summary: |
Uploads APK/AAB to Huawei App Gallery
description: |
This step is based on [Huawei Publishing API](https://developer.huawei.com/consumer/en/service/hms/catalog/AGCConnectAPI.html?page=hmssdk_appGalleryConnect_getstarted)
website: https://github.com/FutureMind/bitrise-step-huawei-app-gallery-apk-distribution
source_code_url: https://github.com/FutureMind/bitrise-step-huawei-app-gallery-apk-distribution
support_url: https://github.com/FutureMind/bitrise-step-huawei-app-gallery-apk-distribution/issues
host_os_tags:
- osx-10.10
- ubuntu-16.04
# If this step should be available only for certain project types
# just uncomment this `project_type_tags` section and include all the
# project types supported by the step. If the step can be used for all
# project types then you can just remove this section.
# If no `project_type_tags` specified (or specified as an empty array)
# that means the step can be used for any project type.
# You can find more information about project type tags in the Step Development Guideline:
# https://github.com/bitrise-io/bitrise/blob/master/_docs/step-development-guideline.md
#
project_type_tags:
# - ios
# - macos
- android
- xamarin
- react-native
- flutter
# - cordova
# - ionic
# Type tags are used for categorizing steps, for easier step discovery in Step Libraries.
# You can find more information about type tags in the Step Development Guideline:
# https://github.com/bitrise-io/bitrise/blob/master/_docs/step-development-guideline.md
type_tags:
- deploy
is_requires_admin_user: true
is_always_run: false
is_skippable: false
run_if: ""
deps:
brew:
- name: jq
apt_get:
- name: jq
toolkit:
bash:
entry_file: step.sh
inputs:
- file_path: $BITRISE_APK_PATH
opts:
category: Config
title: File path
description: Path to generated APK/AAB file to deploy.
is_expand: true
is_required: true
- huawei_filename: build_${BITRISE_BUILD_NUMBER}.apk
opts:
category: Config
title: File name
description: Unique name of APK/AAB file upload.
is_expand: true
is_required: true
- huawei_app_id:
opts:
category: Config
title: App ID
description: App ID can be found in App information tab.
is_expand: true
is_required: true
- huawei_client_id:
opts:
category: Config
title: Client ID
description: Client ID generated in `Users and permissions -> Api Key -> AppGalleryConnect API`
is_expand: true
is_required: true
- huawei_client_secret:
opts:
category: Config
description: Client Secret generated in `Users and permissions -> Api Key -> AppGalleryConnect API`
title: Client Secret
is_expand: true
is_required: true
is_sensitive: true
- lang: en-EN
opts:
category: Config
title: Language Type
description: For more languages refer to [Language Types](https://developer.huawei.com/consumer/en/doc/development/AppGallery-connect-Guides/agcapi-reference-langtype)
is_expand: true
is_required: true
- show_debug_logs: "no"
opts:
category: Debug
title: "Show debug logs?"
description: |
If debug=yes the step will print debug infos
value_options:
- "no"
- "yes"
|
step.yml
|
nameWithType: PersonGroups.createAsync
type: method
members:
- fullName: com.microsoft.azure.cognitiveservices.vision.faceapi.PersonGroups.createAsync(String personGroupId, CreatePersonGroupsOptionalParameter createOptionalParameter)
name: createAsync(String personGroupId, CreatePersonGroupsOptionalParameter createOptionalParameter)
nameWithType: PersonGroups.createAsync(String personGroupId, CreatePersonGroupsOptionalParameter createOptionalParameter)
parameters:
- description: <p>Id referencing a particular person group. </p>
name: personGroupId
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
- description: <p>the object representing the optional parameters to be set before calling this API </p>
name: createOptionalParameter
type: <xref href="com.microsoft.azure.cognitiveservices.vision.faceapi.models.CreatePersonGroupsOptionalParameter?alt=com.microsoft.azure.cognitiveservices.vision.faceapi.models.CreatePersonGroupsOptionalParameter&text=CreatePersonGroupsOptionalParameter" data-throw-if-not-resolved="False"/>
exceptions:
- type: <xref href="IllegalArgumentException?alt=IllegalArgumentException&text=IllegalArgumentException" data-throw-if-not-resolved="False"/>
description: <p>thrown if parameters fail the validation </p>
returns:
description: <p>a representation of the deferred computation of this call if successful. </p>
type: <xref href="Observable%3CVoid%3E?alt=Observable%3CVoid%3E&text=Observable%3CVoid%3E" data-throw-if-not-resolved="False"/>
summary: >-
<p>Create a new person group with specified personGroupId, name and user-provided userData.</p>
<p></p>
syntax: public Observable<Void> createAsync(String personGroupId, CreatePersonGroupsOptionalParameter createOptionalParameter)
uid: com.microsoft.azure.cognitiveservices.vision.faceapi.PersonGroups.createAsync(String,CreatePersonGroupsOptionalParameter)
uid: com.microsoft.azure.cognitiveservices.vision.faceapi.PersonGroups.createAsync*
fullName: com.microsoft.azure.cognitiveservices.vision.faceapi.PersonGroups.createAsync
name: createAsync(String personGroupId, CreatePersonGroupsOptionalParameter createOptionalParameter)
package: com.microsoft.azure.cognitiveservices.vision.faceapi
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.cognitiveservices.vision.faceapi.PersonGroups.createAsync.yml
|
name: generate_changelog
on:
workflow_run:
workflows:
- "split_monorepo0"
types:
- completed
jobs:
generate_changelog:
name: "Generate Changelog"
runs-on: ubuntu-latest
steps:
-
if: "startsWith(github.ref, 'refs/tags/')"
name: "Checkout"
uses: "actions/checkout@v2"
-
if: "startsWith(github.ref, 'refs/tags/')"
name: "Write Changelog"
uses: heinrichreimer/github-changelog-generator-action@v2.1.1
with:
token: ${{ secrets.CHANGELOG_TOKEN }}
user: pheature-flags
project: pheature-flags
output: ./CHANGELOG.md
maxIssues: 9999999
issues: true
issuesWoLabels: true
pullRequests: true
prWoLabels: true
unreleased: true
filterByMilestone: false
compareLink: true
author: true
usernamesAsGithubLogins: true
-
if: "startsWith(github.ref, 'refs/tags/')"
name: "Commit changes"
uses: EndBug/add-and-commit@v7 # You can change this to use a specific version
with:
# Determines the way the action fills missing author name and email. Three options are available:
# - github_actor -> UserName <<EMAIL>>
# - user_info -> Your Display Name <<EMAIL>>
# - github_actions -> github-actions <email associated with the github logo>
# Default:
default_author: github_actor
# The name of the user that will be displayed as the author of the commit
# Default: depends on the default_author input
author_name: ${{ github.event.pusher.name }}
# The email of the user that will be displayed as the author of the commit
# Default: depends on the default_author input
author_email: ${{ github.event.pusher.email }}
# The message for the commit
# Default: 'Commit from GitHub Actions (name of the workflow)'
message: 'update changelog'
# The flag used on the pull strategy. Use NO-PULL to avoid the action pulling at all.
# Default: '--no-rebase'
pull_strategy: '--no-rebase'
branch: 1.0.x
# Whether to use the --signoff option on `git commit` (only `true` and `false` are accepted)
# Default: false
signoff: true
push: --force-with-lease
|
.github/workflows/generate_changelog.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
image.openshift.io/triggers: '[{"from":{"kind":"ImageStreamTag","name":"product-catalog-fuse:latest","namespace":"fuse-api-demo"},"fieldPath":"spec.template.spec.containers[?(@.name==\"product-catalog-fuse\")].image"}]'
app.openshift.io/connects-to: database
app.openshift.io/vcs-ref: master
app.openshift.io/vcs-uri: 'https://github.com/gnunn1/product-catalog-fuse'
labels:
app: product-catalog-fuse
app.kubernetes.io/component: product-catalog-fuse
app.kubernetes.io/instance: product-catalog-fuse
app.kubernetes.io/name: product-catalog-fuse
app.kubernetes.io/part-of: product-catalog
app.openshift.io/runtime: camel
name: product-catalog-fuse
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
deployment: product-catalog-fuse
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
openshift.io/generated-by: OpenShiftNewApp
creationTimestamp: null
labels:
deployment: product-catalog-fuse
spec:
containers:
- image: image-registry.openshift-image-registry.svc:5000/fuse-api-demo/product-catalog-fuse@sha256:408b70eaccf234014f96594d1909293771dceda9279f5f742d1e10e51b6ef8f8
imagePullPolicy: Always
name: product-catalog-fuse
ports:
- containerPort: 8080
protocol: TCP
- containerPort: 8081
protocol: TCP
- containerPort: 8778
protocol: TCP
name: jolokia
- containerPort: 9779
protocol: TCP
name: prometheus
env:
- name: MARIADB_SERVICE
value: productdb
- name: MARIADB_USER
valueFrom:
secretKeyRef:
name: productdb
key: database-user
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: productdb
key: database-password
- name: MARIADB_DATABASE
valueFrom:
secretKeyRef:
name: productdb
key: database-name
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
|
manifests/app/fuse-deploy.yaml
|
name: 🐞 Provider Issue Report
description: Report a source issue
labels: [provider]
body:
- type: input
id: source
attributes:
label: Source information
description: |
You can find the source name in navigation drawer.
placeholder: |
Example: "Bflix"
validations:
required: true
- type: input
id: source-url
attributes:
label: Source link
placeholder: |
Example:
"www.example.org"
validations:
required: true
- type: textarea
id: reproduce-steps
attributes:
label: Steps to reproduce
description: Provide an example of the issue.
placeholder: |
Example:
1. First step
2. Second step
3. Issue here
validations:
required: true
- type: input
id: cloudstream-version
attributes:
label: CloudStream version
description: |
You can find your CloudStream version in **Settings**.
placeholder: |
Example: "2.8.16"
validations:
required: true
- type: input
id: android-version
attributes:
label: Android version
description: |
You can find this somewhere in your Android settings.
placeholder: |
Example: "Android 12"
validations:
required: true
- type: textarea
id: other-details
attributes:
label: Other details
placeholder: |
Additional details and attachments.
- type: checkboxes
id: acknowledgements
attributes:
label: Acknowledgements
description: Your issue will be closed if you haven't done these steps.
options:
- label: I have searched the existing issues and this is a new ticket, **NOT** a duplicate or related to another open issue.
required: true
- label: I have written a short but informative title.
required: true
- label: I have updated the app to pre-release version **[Latest](https://github.com/LagradOst/CloudStream-3/releases)**.
required: true
- label: If related to a provider, I have checked the site and it works, but not the app.
required: true
- label: I will fill out all of the requested information in this form.
required: true
|
.github/ISSUE_TEMPLATE/provider-bug.yml
|
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:{{ .Values.name }}
labels:
{{- include "labels.common" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- {{ .Values.name }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "labels.common" . | nindent 4 }}
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
# API Server Extension Role binding
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Values.name }}-auth-reader
namespace: kube-system
labels:
{{- include "labels.common" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.name }}:system:auth-delegator
namespace: {{ .Values.namespace }}
labels:
{{- include "labels.common" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:{{ .Values.name }}
labels:
{{- include "labels.common" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:{{ .Values.name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Values.namespace }}
{{- end -}}
|
helm/metrics-server-app/templates/rbac.yaml
|
beef:
version: '0.4.4.3-alpha'
debug: false
restrictions:
# subnet of browser ip addresses that can hook to the framework
permitted_hooking_subnet: "0.0.0.0/0"
# subnet of browser ip addresses that can connect to the UI
# permitted_ui_subnet: "127.0.0.1/32"
permitted_ui_subnet: "0.0.0.0/0"
http:
debug: false #Thin::Logging.debug, very verbose. Prints also full exception stack trace.
host: "0.0.0.0"
port: "3000"
# Decrease this setting up to 1000 if you want more responsiveness when sending modules and retrieving results.
# It's not advised to decrease it with tons of hooked browsers (more than 50),
# because it might impact performance. Also, enable WebSockets is generally better.
xhr_poll_timeout: 5000
# if running behind a nat set the public ip address here
#public: ""
#public_port: "" # port setting is experimental
dns: "localhost"
panel_path: "/ui/panel"
hook_file: "/hook.js"
hook_session_name: "BEEFHOOK"
session_cookie_name: "BEEFSESSION"
# Prefer WebSockets over XHR-polling when possible.
websocket:
enable: false
secure: true # use WebSocketSecure work only on https domain and whit https support enabled in BeEF
port: 61985 # WS: good success rate through proxies
secure_port: 61986 # WSSecure
ws_poll_timeout: 1000 # poll BeEF every second
# Imitate a specified web server (default root page, 404 default error page, 'Server' HTTP response header)
web_server_imitation:
enable: false
type: "apache" #supported: apache, iis
# Experimental HTTPS support for the hook / admin / all other Thin managed web services
https:
enable: false
# In production environments, be sure to use a valid certificate signed for the value
# used in beef.http.dns (the domain name of the server where you run BeEF)
key: "beef_key.pem"
cert: "beef_cert.pem"
database:
# For information on using other databases please read the
# README.databases file
# supported DBs: sqlite, mysql, postgres
# NOTE: you must change the Gemfile adding a gem require line like:
# gem "dm-postgres-adapter"
# or
# gem "dm-mysql-adapter"
# if you want to switch drivers from sqlite to postgres (or mysql).
# Finally, run a 'bundle install' command and start BeEF.
driver: "sqlite"
# db_file is only used for sqlite
db_file: "beef.db"
# db connection information is only used for mysql/postgres
db_host: "localhost"
db_name: "beef"
db_user: "beef"
db_passwd: "<PASSWORD>"
db_encoding: "UTF-8"
# Credentials to authenticate in BeEF. Used by both the RESTful API and the Admin_UI extension
credentials:
user: "beef"
passwd: "<PASSWORD>"
# Autorun modules as soon the browser is hooked.
# NOTE: only modules with target type 'working' or 'user_notify' can be run automatically.
autorun:
enable: true
# set this to FALSE if you don't want to allow auto-run execution for modules with target->user_notify
allow_user_notify: true
crypto_default_value_length: 80
# You may override default extension configuration parameters here
extension:
requester:
enable: true
proxy:
enable: true
metasploit:
enable: false
social_engineering:
enable: true
evasion:
enable: false
console:
shell:
enable: false
ipec:
enable: true
|
config.yaml
|
---
name: "qt"
suites:
- "precise"
architectures:
- "amd64"
packages:
- "mingw-w64"
- "g++-mingw-w64"
- "zip"
- "unzip"
- "faketime"
- "libz-dev"
reference_datetime: "2011-01-30 00:00:00"
remotes: []
files:
- "qt-everywhere-opensource-src-5.2.0.tar.gz"
- "bitcoin-deps-win32-gitian-r10.zip"
- "bitcoin-deps-win64-gitian-r10.zip"
script: |
# Defines
export TZ=UTC
INDIR=$HOME/build
# Integrity Check
echo "395ec72277c5786c65b8163ef5817fd03d0a1f524a6d47f53624baf8056f1081 qt-everywhere-opensource-src-5.2.0.tar.gz" | sha256sum -c
for BITS in 32 64; do # for architectures
#
INSTALLPREFIX=$HOME/staging${BITS}
BUILDDIR=$HOME/build${BITS}
DEPSDIR=$HOME/deps${BITS}
if [ "$BITS" == "32" ]; then
HOST=i686-w64-mingw32
else
HOST=x86_64-w64-mingw32
fi
#
mkdir -p $INSTALLPREFIX $INSTALLPREFIX/host/bin $DEPSDIR $BUILDDIR
#
# Need mingw-compiled openssl from bitcoin-deps:
cd $DEPSDIR
unzip $INDIR/bitcoin-deps-win${BITS}-gitian-r10.zip
#
cd $BUILDDIR
#
tar xzf $INDIR/qt-everywhere-opensource-src-5.2.0.tar.gz
cd qt-everywhere-opensource-src-5.2.0
SPECNAME="win32-g++"
SPECFILE="qtbase/mkspecs/${SPECNAME}/qmake.conf"
sed 's/$TODAY/2011-01-30/' -i configure
sed --posix "s|QMAKE_CFLAGS = -pipe -fno-keep-inline-dllexport|QMAKE_CFLAGS\t\t= -pipe -fno-keep-inline-dllexport -isystem /usr/$HOST/include/ -frandom-seed=qtbuild -I$DEPSDIR/include|" -i ${SPECFILE}
sed --posix "s|QMAKE_LFLAGS =|QMAKE_LFLAGS\t\t= -L$DEPSDIR/lib|" -i ${SPECFILE}
# ar adds timestamps to every object file included in the static library
# providing -D as ar argument is supposed to solve it, but doesn't work as qmake strips off the arguments and adds -M to pass a script...
# which somehow cannot be combined with other flags.
# use faketime only for ar, as it confuses make/qmake into hanging sometimes
sed --posix "s|QMAKE_LIB = \\\$\\\${CROSS_COMPILE}ar -ru|QMAKE_LIB\t\t= $HOME/ar -Dr|" -i ${SPECFILE}
echo '#!/bin/bash' > $HOME/ar
echo 'export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1' >> $HOME/ar
echo "$HOST-ar \"\$@\"" >> $HOME/ar
chmod +x $HOME/ar
# Don't load faketime while compiling Qt, qmake will get stuck in nearly infinite loops
#export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1
export FAKETIME=$REFERENCE_DATETIME
#
# Compile static libraries, and use statically linked openssl (-openssl-linked):
OPENSSL_LIBS="-L$DEPSDIR/lib -lssl -lcrypto -lgdi32" ./configure -prefix $INSTALLPREFIX -bindir $INSTALLPREFIX/host/bin -confirm-license -release -opensource -static -xplatform $SPECNAME -device-option CROSS_COMPILE="$HOST-" -no-audio-backend -no-javascript-jit -no-sql-sqlite -no-sql-odbc -no-nis -no-cups -no-iconv -no-dbus -no-gif -no-opengl -no-compile-examples -no-feature-style-windowsce -no-feature-style-windowsmobile -no-qml-debug -openssl-linked -skip qtsvg -skip qtwebkit -skip qtwebkit-examples -skip qtserialport -skip qtdeclarative -skip qtmultimedia -skip qtimageformats -skip qtlocation -skip qtsensors -skip qtquick1 -skip qtquickcontrols -skip qtactiveqt -skip qtconnectivity -skip qtwinextras -skip qtxmlpatterns -skip qtscript -skip qtdoc -system-libpng -system-zlib
make $MAKEOPTS install
#
cd $INSTALLPREFIX
# as zip stores file timestamps, use faketime to intercept stat calls to set dates for all files to reference date
export LD_PRELOAD=/usr/lib/faketime/libfaketime.so.1
zip -r $OUTDIR/qt-win${BITS}-5.2.0-gitian-r1.zip *
unset LD_PRELOAD
unset FAKETIME
done # for BITS in
|
contrib/gitian-descriptors/qt-win.yml
|
server:
port: 0
spring:
nacos:
# host: 172.16.30.100:8848
# namespace: 10d8c97e-d556-413f-bbf8-72d1e5b538b0
group: @project.version@
host: 172.16.30.130:8848
namespace: 7d03f927-32d1-4fa3-810d-9a5311ec5bf1
application:
name: @project.artifactId@
cloud:
nacos:
config:
server-addr: ${spring.nacos.host}
file-extension: yaml
namespace: ${spring.nacos.namespace}
group: ${spring.nacos.group}
extension-configs:
- data-id: lion-common.${spring.cloud.nacos.config.file-extension}
refresh: true
group: GLOBAL_GROUP
- data-id: lion-datasource-common.${spring.cloud.nacos.config.file-extension}
refresh: true
group: GLOBAL_GROUP
- data-id: lion-redis-common.${spring.cloud.nacos.config.file-extension}
refresh: true
group: GLOBAL_GROUP
- data-id: lion-dubbo-common.${spring.cloud.nacos.config.file-extension}
refresh: true
group: GLOBAL_GROUP
#security:
# user:
# name: user
# password: password
# role: USER
# http://localhost:8081/oauth/authorize?response_type=code&client_id=console 授权端点
# http://console:console@127.0.0.1:8081/oauth/token?code=eqE1OE&grant_type=authorization_code 令牌端点
# /oauth/confirm_access 用户批准授权的端点
# /oauth/error 用于渲染授权服务器的错误
# /oauth/check_token 资源服务器解码access token
# /oauth/check_token 当使用JWT的时候,暴露公钥的端点
# OAuth2.0错误响应中的错误码定义如下表所示:
# 错误码(error) 错误编号(error_code) 错误描述(error_description)
# redirect_uri_mismatch 21322 重定向地址不匹配
# invalid_request 21323 请求不合法
# invalid_client 21324 client_id或client_secret参数无效
# invalid_grant 21325 提供的Access Grant是无效的、过期的或已撤销的
# unauthorized_client 21326 客户端没有权限
# expired_token <PASSWORD> token过期
# unsupported_grant_type 21328 不支持的 GrantType
# unsupported_response_type 21329 不支持的 ResponseType
# access_denied 21330 用户或授权服务器拒绝授予数据访问权限
# temporarily_unavailable 21331 服务暂时无法访问
|
lion-oauth2-authorization-server/src/main/resources/bootstrap.yml
|
- name: Prepare random number
set_fact:
rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
run_once: yes
- name: Create MySQL Server
azure_rm_mysqlserver:
resource_group: "{{ resource_group }}"
name: mysqlsrv{{ rpfx }}
sku:
name: GP_Gen4_2
tier: GeneralPurpose
location: westus
version: 5.6
enforce_ssl: True
admin_username: zimxyz
admin_password: <PASSWORD>!
storage_mb: 51200
- name: Create instance of Firewall Rule -- check mode
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
check_mode: yes
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create instance of Firewall Rule
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create again instance of Firewall Rule
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
register: output
- name: Assert the state has not changed
assert:
that:
- output.changed == false
- name: Delete instance of Firewall Rule -- check mode
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
check_mode: yes
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete instance of Firewall Rule
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete unexisting instance of Firewall Rule
azure_rm_mysqlfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mysqlsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed == false
- name: Delete instance of MySQL Server
azure_rm_mysqlserver:
resource_group: "{{ resource_group }}"
name: mysqlsrv{{ rpfx }}
state: absent
|
tests/integration/targets/azure_rm_mysqlfirewallrule/tasks/main.yml
|
text_yes: "&aSim"
text_no: "&cNão"
###################
# Beehive GUI
###################
beehive_info_gui_title: "&8Informação da colmeia"
beehive_info_gui_honey_capacity: "&fCapacidade de mel"
beehive_info_gui_honey_capacity_desc: "&aCapacidade: &7%current%/%maximum%"
beehive_info_gui_bee_capacity: "&fCapacidade de abelhas"
beehive_info_gui_bee_capacity_desc: "&aCapacidade: &7%current%/%maximum% abelhas"
beehive_info_gui_sedated: "&aSedada"
beehive_info_gui_not_sedated: "&cNão sedada"
beehive_info_gui_flower: "&fFlor"
# Use || to switch to a create a new line
beehive_info_gui_no_target_flower_desc: "&aEsta colmeia não||&atem uma flor!"
honey_low: "Baixo"
honey_medium: "Médio"
honey_high: "Alto"
honey_very_high: "Muito alto"
###################
# Bee GUI
###################
bee_info_gui_title: "&8Informação da abelha"
bee_info_gui_age: "&fIdade"
bee_info_gui_age_adult: "&aAdulta"
bee_info_gui_age_baby: "&aBébé"
bee_info_gui_anger: "&fRaiva"
# Use %level% as a placeholder for the bee's anger level
bee_info_gui_anger_level_desc: "&aNível de raiva: &7%level%"
bee_info_gui_hive_location: "&fLocalização da colmeia"
# Use || to a create a new line
bee_info_gui_no_hive_desc: "&aEsta abelha não||&atem uma colmeia!"
bee_info_gui_ride: "&fMontar"
bee_info_gui_ride_no_permission: "&cNão tens permissão para montar abelhas!"
bee_info_gui_ride_angry: "&cNão podes montar abelhas irritadas!"
bee_info_gui_ride_already: "&cAlguem já está a montar esta abelha!"
bee_info_gui_ride_too_far: "&cEstá muito longe para montar a abelha!"
bee_info_gui_has_stung: "&fJá picou?"
bee_info_gui_has_nectar: "&fTem nectar?"
bee_info_gui_health: "&fVida"
# Use %health% to represent the bee's health
bee_info_gui_health_desc: "&aVida: &7%health% ❤"
###################
# Riding a bee title
# Use %name% as a placeholder for the player's name
###################
ride_bee_title: "&6Estás a montar"
ride_bee_subtitle: "&6uma abelha &8%name%&6!"
###################
# Protection suit name
###################
bee_protection_helmet: "&6Proteção contra abelhas"
bee_protection_chestplate: "&6Proteção contra abelhas"
bee_protection_leggings: "&6Proteção contra abelhas"
bee_protection_boots: "&6Proteção contra abelhas"
###################
# Beehive upgrade item
###################
beehive_upgrade_item_name: "&6Melhorar Colmeia"
# Use || to a create a new line
beehive_upgrade_item_lore: "&7População de abelhas: &a+3||&8(Clique direito para usar)"
beehive_upgrade_success: "&aColmeia melhorada! Nova população: &7%beesno%&a abelhas"
beehive_upgrade_max: "&cErro: Esta colmeia atingiu a população máxima permitida!"
|
src/main/resources/locale/pt.yml
|
trigger:
- sandbox
pr: none
variables:
- group: azure-birthday-bot-testing-var-group
#APPSETTINGS
- name: BambooHRUsersFileName
value: $(AzureDevops-BambooHRUsersFileName)
- name: BlobStorageConversationContainer
value: $(AzureDevops-BlobStorageConversationContainer)
- name: BlobStorageStringConnection
value: $(AzureDevops-BlobStorageStringConnection)
- name: EnabledNotifications
value: $(AzureDevops-EnabledNotifications)
- name: QueueName
value: $(AzureDevops-QueueName)
- name: QueueStorageStringConnection
value: $(AzureDevops-QueueStorageStringConnection)
- name: SlackBotToken
value: $(AzureDevops-SlackBotToken)
- name: SlackClientSigningSecret
value: $(AzureDevops-SlackClientSigningSecret)
- name: SlackVerificationToken
value: $(AzureDevops-SlackVerificationToken)
- name: SpecificChannelName
value: $(AzureDevops-SpecificChannelName)
- name: BlobStorageDataUsersContainer
value: $(AzureDevops-BlobStorageDataUsersContainer)
stages:
- stage: Build
jobs:
- job: Build
pool:
vmImage: 'windows-latest'
variables:
solution: '**/Birthday-Bot.sln'
buildPlatform: 'Any CPU'
buildConfiguration: 'Release'
steps:
- task: NuGetToolInstaller@1
- task: NuGetCommand@2
inputs:
restoreSolution: '$(solution)'
- task: VSBuild@1
inputs:
solution: '$(solution)'
msbuildArgs: '/p:DeployOnBuild=true /p:WebPublishMethod=Package /p:PackageAsSingleFile=true /p:SkipInvalidConfigurations=true /p:PackageLocation="$(build.artifactStagingDirectory)"'
platform: '$(buildPlatform)'
configuration: '$(buildConfiguration)'
- stage: Deploy
jobs:
- job: Deploy
pool:
vmImage: 'windows-latest'
steps:
- task: FileTransform@1
displayName: 'App settings transform'
inputs:
folderPath: '$(System.DefaultWorkingDirectory)\birthday-bot'
fileType: 'json'
targetFiles: 'appsettings.json'
- task: PowerShell@2
displayName: 'Login'
continueOnError: true
inputs:
targetType: inline
script: >-
az login --service-principal --username "$(AzureDevOps-ServicePrincipal-Username)" --password "$(AzureDevOps-ServicePrincipal-Secret)" --tenant "$(AzureDevOps-ServicePrincipal-Tenant)"
- task: PowerShell@2
displayName: 'Birthday Bot Publish'
inputs:
targetType: inline
script: .\birthday-bot\Deployment\Scripts\publish.ps1 -name "$(BirthdayBotName)" -resourceGroup "$(BirthdayBotResourceGroup)" -projFolder ".\birthday-bot"
errorActionPreference: continue
failOnStderr: false
pwsh: true
|
birthday-bot/Pipeline/Azure-Birthday-Bot-Build.yml
|
fields:
- fullName: com.microsoft.azure.management.storage.KeyPermission.FULL
name: FULL
nameWithType: KeyPermission.FULL
summary: <p>Enum value FULL. </p>
uid: com.microsoft.azure.management.storage.KeyPermission.FULL
- fullName: com.microsoft.azure.management.storage.KeyPermission.READ
name: READ
nameWithType: KeyPermission.READ
summary: <p>Enum value READ. </p>
uid: com.microsoft.azure.management.storage.KeyPermission.READ
inheritances:
- <xref href="java.lang.Object" data-throw-if-not-resolved="False"/>
- <xref href="java.lang.Enum<KeyPermission>" data-throw-if-not-resolved="False"/>
methods:
- fullName: com.microsoft.azure.management.storage.KeyPermission.fromString(String value)
name: fromString(String value)
nameWithType: KeyPermission.fromString(String value)
parameters:
- description: <p>the serialized value to parse. </p>
name: value
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
returns:
description: <p>the parsed KeyPermission object, or null if unable to parse. </p>
type: <xref href="com.microsoft.azure.management.storage.KeyPermission?alt=com.microsoft.azure.management.storage.KeyPermission&text=KeyPermission" data-throw-if-not-resolved="False"/>
summary: >-
<p>Parses a serialized value to a KeyPermission instance.</p>
<p></p>
syntax: public static KeyPermission fromString(String value)
uid: com.microsoft.azure.management.storage.KeyPermission.fromString(String)
- fullName: com.microsoft.azure.management.storage.KeyPermission.toString()
name: toString()
nameWithType: KeyPermission.toString()
returns:
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
syntax: public String toString()
uid: com.microsoft.azure.management.storage.KeyPermission.toString()
nameWithType: KeyPermission
syntax: public enum KeyPermission
uid: com.microsoft.azure.management.storage.KeyPermission
fullName: com.microsoft.azure.management.storage.KeyPermission
name: KeyPermission
package: com.microsoft.azure.management.storage
summary: <p>Defines values for KeyPermission. </p>
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.management.storage.KeyPermission.yml
|
description: >-
Person detector (ShuffleNetv2 backbone and FCOS head) for ASL scenario
task_type: detection
files:
- name: FP32/person-detection-asl-0001.xml
size: 989419
sha256: 4c4e57fab55363e6e6051d5030dbd282bb3e2debc0f8bbbd888884e7cc8ba495
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP32/person-detection-asl-0001.xml
- name: FP32/person-detection-asl-0001.bin
size: 4026304
sha256: 0211de411259cf3601bab2529f16df42547fc3378b37994f17f4fe84f18d9599
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP32/person-detection-asl-0001.bin
- name: FP16/person-detection-asl-0001.xml
size: 989270
sha256: 9e7f419488cee38566495c1c4a1ad0bb83c8483d34069a3d5995774cf9e731cb
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP16/person-detection-asl-0001.xml
- name: FP16/person-detection-asl-0001.bin
size: 2013330
sha256: 2aab4b519ea48a90f9c0ad8d00ca11aa128d8ffe9233f8e281e2a78d1382a019
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP16/person-detection-asl-0001.bin
- name: FP16-INT8/person-detection-asl-0001.xml
size: 1597906
sha256: 55e82a51b4211fe894ba0cff9fc09322f8d7a380f50b2365ecbef6db7d7d9395
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP16-INT8/person-detection-asl-0001.xml
- name: FP16-INT8/person-detection-asl-0001.bin
size: 1055660
sha256: 4be7605680dd4f2ca92da8eb6edb8e333091712e803f1e454967e4d96cb71972
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.3/models_bin/2/person-detection-asl-0001/FP16-INT8/person-detection-asl-0001.bin
framework: dldt
license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE
|
models/intel/person-detection-asl-0001/model.yml
|
on:
push:
tags:
# Only match TagIt tags, which always start with this prefix
- 'v20*'
name: TagIt
jobs:
build:
name: Release
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Archive project
id: archive_project
run: |
FILE_NAME=${GITHUB_REPOSITORY#*/}-${GITHUB_REF##*/}
git archive ${{ github.ref }} -o ${FILE_NAME}.zip
git archive ${{ github.ref }} -o ${FILE_NAME}.tar.gz
echo "::set-output name=file_name::${FILE_NAME}"
- name: Compute digests
id: compute_digests
run: |
echo "::set-output name=tgz_256::$(openssl dgst -sha256 ${{ steps.archive_project.outputs.file_name }}.tar.gz)"
echo "::set-output name=tgz_512::$(openssl dgst -sha512 ${{ steps.archive_project.outputs.file_name }}.tar.gz)"
echo "::set-output name=zip_256::$(openssl dgst -sha256 ${{ steps.archive_project.outputs.file_name }}.zip)"
echo "::set-output name=zip_512::$(openssl dgst -sha512 ${{ steps.archive_project.outputs.file_name }}.zip)"
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
body: |
Automated release from TagIt
<details>
<summary>File Hashes</summary>
<ul>
<li>${{ steps.compute_digests.outputs.zip_256 }}</li>
<li>${{ steps.compute_digests.outputs.zip_512 }}</li>
<li>${{ steps.compute_digests.outputs.tgz_256 }}</li>
<li>${{ steps.compute_digests.outputs.tgz_512 }}</li>
</ul>
</details>
draft: false
prerelease: false
- name: Upload zip
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./${{ steps.archive_project.outputs.file_name }}.zip
asset_name: ${{ steps.archive_project.outputs.file_name }}.zip
asset_content_type: application/zip
- name: Upload tar.gz
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./${{ steps.archive_project.outputs.file_name }}.tar.gz
asset_name: ${{ steps.archive_project.outputs.file_name }}.tar.gz
asset_content_type: application/gzip
|
.github/workflows/TagIt.yml
|
clusterName: awcc-kube-aws-k8s-cluster
s3URI: s3://kube-aws-klefevre
amiId: "ami-0772233ad155871ff"
disableContainerLinuxAutomaticUpdates: true
keyName: ocelot
region: eu-west-1
kmsKeyArn: "arn:aws:kms:eu-west-1:161285725140:key/130f8fa4-4380-4d68-bceb-29087998e75a"
apiEndpoints:
- name: default
dnsName: kube-aws-showcase.archifleks.net
loadBalancer:
subnets:
- name: ExistingPublicSubnet1
- name: ExistingPublicSubnet2
- name: ExistingPublicSubnet3
type: network
hostedZone:
id: Z1QX2F9WHERETA
controller:
count: 1
instanceType: t3.medium
instanceTags:
instanceRole: controller
rootVolume:
size: 30
type: gp2
subnets:
- name: ExistingPrivateSubnet1
- name: ExistingPrivateSubnet2
- name: ExistingPrivateSubnet3
worker:
nodePools:
- name: default
subnets:
- name: ExistingPrivateSubnet1
- name: ExistingPrivateSubnet2
- name: ExistingPrivateSubnet3
instanceType: t3.medium
instanceTags:
instanceRole: worker
rootVolume:
size: 30
type: gp2
autoScalingGroup:
minSize: 1
maxSize: 3
autoscaling:
clusterAutoscaler:
enabled: true
etcd:
count: 1
instanceType: t3.medium
instanceTags:
instanceRole: etcd
rootVolume:
size: 30
type: gp2
subnets:
- name: ExistingPrivateSubnet1
- name: ExistingPrivateSubnet2
- name: ExistingPrivateSubnet3
version: 3.2.18
vpc:
idFromStackOutput: awcc-kube-aws-showcase-VPCid
subnets:
- name: ExistingPublicSubnet1
availabilityZone: eu-west-1a
idFromStackOutput: awcc-kube-aws-showcase-PublicSubnet1
- name: ExistingPublicSubnet2
availabilityZone: eu-west-1b
idFromStackOutput: awcc-kube-aws-showcase-PublicSubnet2
- name: ExistingPublicSubnet3
availabilityZone: eu-west-1c
idFromStackOutput: awcc-kube-aws-showcase-PublicSubnet3
- name: ExistingPrivateSubnet1
availabilityZone: eu-west-1a
idFromStackOutput: awcc-kube-aws-showcase-PrivateSubnet1
- name: ExistingPrivateSubnet2
availabilityZone: eu-west-1b
idFromStackOutput: awcc-kube-aws-showcase-PrivateSubnet2
- name: ExistingPrivateSubnet3
availabilityZone: eu-west-1c
idFromStackOutput: awcc-kube-aws-showcase-PrivateSubnet3
tlsCADurationDays: 36500
tlsCertDurationDays: 3650
kubernetesVersion: v1.11.5
kubernetes:
encryptionAtRest:
enabled: false
networking:
selfHosting:
type: canal # either "canal" or "flannel"
typha: false # enable for type 'canal' for 50+ node clusters
kubeSystemNamespaceLabels:
name: kube-system
kubernetesDashboard:
adminPrivileges: true
insecureLogin: false
enabled: true
kubeDns:
provider: coredns
autoscaler:
coresPerReplica: 256
nodesPerReplica: 16
min: 2
kubeProxy:
ipvsMode:
enabled: false
scheduler: rr
syncPeriod: 300s
minSyncPeriod: 60s
addons:
clusterAutoscaler:
enabled: true
rescheduler:
enabled: false
metricsServer:
enabled: true
prometheus:
securityGroupsEnabled: true
experimental:
admission:
podSecurityPolicy:
enabled: false
alwaysPullImages:
enabled: false
denyEscalatingExec:
enabled: false
initializers:
enabled: false
priority:
enabled: false
mutatingAdmissionWebhook:
enabled: false
validatingAdmissionWebhook:
enabled: false
OwnerReferencesPermissionEnforcement:
enabled: false
persistentVolumeClaimResize:
enabled: true
awsEnvironment:
enabled: false
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
auditLog:
enabled: false
logPath: /var/log/kube-apiserver-audit.log
maxAge: 30
maxBackup: 1
maxSize: 100
authentication:
webhook:
enabled: false
cacheTTL: 1m0s
configBase64: base64-encoded-webhook-yaml
awsNodeLabels:
enabled: false
clusterAutoscalerSupport:
enabled: true
tlsBootstrap:
enabled: true
nodeAuthorizer:
enabled: true
ephemeralImageStorage:
enabled: false
kiamSupport:
enabled: true
image:
repo: quay.io/uswitch/kiam
tag: v2.7
rktPullDocker: false
sessionDuration: 15m
serverAddresses:
serverAddress: localhost:443
agentAddress: kiam-server:443
kube2IamSupport:
enabled: false
nodeDrainer:
enabled: false
drainTimeout: 5
iamRole:
arn: ""
oidc:
enabled: false
issuerUrl: "https://accounts.google.com"
clientId: "kubernetes"
usernameClaim: "email"
groupsClaim: "groups"
disableSecurityGroupIngress: false
kubelet:
RotateCerts:
enabled: true
hostOS:
|
assets/kube-aws-v0.12.0-cluster.yaml
|
--- !<MAP_PACK>
contentType: "MAP_PACK"
firstIndex: "2018-11-01 11:53"
game: "Unreal Tournament"
name: "TMH Map Pack Part 3"
author: "Various"
description: "None"
releaseDate: "2013-07"
attachments:
- type: "IMAGE"
name: "TMH_Map_Pack_Part_3_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/MapPacks/T/TMH_Map_Pack_Part_3_shot_1.png"
- type: "IMAGE"
name: "TMH_Map_Pack_Part_3_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/MapPacks/T/TMH_Map_Pack_Part_3_shot_2.png"
originalFilename: "TMH Map Pack Part 3.zip"
hash: "f070ced97600b515cd441584c1bdbbdb211ba984"
fileSize: 4177929
files:
- name: "TMH-Elsinore.unr"
fileSize: 3159815
hash: "8f4bf4dea7bf6e9f1e8d9587deee897e56233308"
- name: "Neurosis.umx"
fileSize: 425274
hash: "7efa73e895be12ee97104455f134d8817240b3d4"
- name: "TMH-Shakrah1on1CB.unr"
fileSize: 11720552
hash: "b91b529f511447709ca167de014a67cf01494103"
otherFiles: 0
dependencies:
TMH-Shakrah1on1CB.unr:
- status: "MISSING"
name: "TeamMH"
- status: "MISSING"
name: "MonsterHunt"
TMH-Elsinore.unr:
- status: "MISSING"
name: "TeamMH"
- status: "OK"
name: "Neurosis"
downloads:
- url: "http://medor.no-ip.org/index.php?dir=Maps/TeamMonsterHunt&file=TMH+Map+Pack+Part+3.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/MapPacks/T/TMH%20Map%20Pack%20Part%203.zip"
main: true
repack: false
state: "OK"
- url: "http://ut-files.com/index.php?dir=Maps/TeamMonsterHunt/&file=TMH%20Map%20Pack%20Part%203.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/MapPacks/T/f/0/70ced9/TMH%2520Map%2520Pack%2520Part%25203.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/MapPacks/T/f/0/70ced9/TMH%2520Map%2520Pack%2520Part%25203.zip"
main: false
repack: false
state: "OK"
deleted: false
maps:
- name: "TMH-Shakrah1on1CB"
title: "Shakrah SE"
author: "Derdak2rot"
- name: "TMH-Elsinore"
title: "Elsinore"
author: "-}Night_Terror{-"
gametype: "Team Monster Hunt"
themes:
Nali Temple: 0.4
Nali Castle: 0.6
|
content/Unreal Tournament/MapPacks/T/f/0/70ced9/tmh-map-pack-part-3_[f070ced9].yml
|
{{- $masterCount := int .Values.master.replicaCount }}
{{- $isEmbedded := (eq .Values.journal.type "EMBEDDED") }}
{{- $isHaEmbedded := and $isEmbedded (gt $masterCount 1) }}
{{- $isMonitored := (.Values.monitoring) }}
{{- $metricsLabel := .Values.monitoring }}
{{- $release := .Release }}
{{- $masterRpcPort := .Values.master.ports.rpc }}
{{- $masterWebPort := .Values.master.ports.web }}
{{- $jobMasterRpcPort := .Values.jobMaster.ports.rpc }}
{{- $jobMasterWebPort := .Values.jobMaster.ports.web }}
{{- $masterEmbeddedPort := .Values.master.ports.embedded }}
{{- $jobMasterEmbeddedPort := .Values.jobMaster.ports.embedded }}
{{- $isAPIGatewayEnabled := .Values.apiGateway.enabled }}
{{- $apiGatewayPort := .Values.apiGateway.ports.rest }}
{{- $name := include "goosefs.name" . }}
{{- $fullName := include "goosefs.fullname" . }}
{{- $chart := include "goosefs.chart" . }}
{{- range $i := until $masterCount }}
{{- $masterName := printf "master-%v" $i }}
{{- $masterJavaOpts := printf " -Dgoosefs.master.hostname=%v-%v " $fullName $masterName }}
apiVersion: v1
kind: Service
metadata:
name: {{ $fullName }}-{{ $masterName }}
labels:
app: {{ $name }}
chart: {{ $chart }}
release: {{ $release.Name }}
heritage: {{ $release.Service }}
role: goosefs-master
{{- if $isMonitored }}
monitor: {{ $metricsLabel }}
{{- end }}
spec:
ports:
- port: {{ $masterRpcPort }}
name: rpc
- port: {{ $masterWebPort }}
name: web
- port: {{ $jobMasterRpcPort }}
name: job-rpc
- port: {{ $jobMasterWebPort }}
name: job-web
{{- if $isAPIGatewayEnabled }}
- port: {{ $apiGatewayPort }}
name: rest
{{- end }}
{{- if $isHaEmbedded }}
- port: {{ $masterEmbeddedPort }}
name: embedded
- port: {{ $jobMasterEmbeddedPort }}
name: job-embedded
{{- end }}
clusterIP: None
selector:
role: goosefs-master
app: {{ $name }}
release: {{ $release.Name }}
statefulset.kubernetes.io/pod-name: {{ $fullName }}-{{ $masterName }}
---
{{- end }}
|
charts/goosefs/templates/master/service.yaml
|
version: '2'
services:
bft_node1:
container_name: bft_node1
image: mutadev/muta:latest
hostname: bft_node1
environment:
- RUST_LOG
- RUST_BACKTRACE
volumes:
- ./config-bft-1.toml:/app/devtools/chain/config.toml
- ./genesis-bft.toml:/app/devtools/chain/genesis.toml
- /tmp/data/bft-1:/app/devtools/chain/data
networks:
bft:
aliases:
- bft_node1
ipv4_address: 192.168.3.11
command: |
sh -c '
cd /app;
./muta-chain;'
bft_node2:
container_name: bft_node2
image: mutadev/muta:latest
hostname: bft_node2
environment:
- RUST_LOG
- RUST_BACKTRACE
depends_on:
- bft_node1
volumes:
- ./config-bft-2.toml:/app/devtools/chain/config.toml
- ./genesis-bft.toml:/app/devtools/chain/genesis.toml
- /tmp/data/bft-2:/app/devtools/chain/data
networks:
bft:
aliases:
- bft_node2
ipv4_address: 192.168.3.11
command: |
sh -c '
cd /app;
./muta-chain;'
bft_node3:
container_name: bft_node3
image: mutadev/muta:latest
hostname: bft_node3
environment:
- RUST_LOG
- RUST_BACKTRACE
depends_on:
- bft_node1
volumes:
- ./config-bft-3.toml:/app/devtools/chain/config.toml
- ./genesis-bft.toml:/app/devtools/chain/genesis.toml
- /tmp/data/bft-3:/app/devtools/chain/data
networks:
bft:
aliases:
- bft_node3
ipv4_address: 172.16.17.32
command: |
sh -c '
cd /app;
./muta-chain;'
bft_node4:
container_name: bft_node4
image: mutadev/muta:latest
hostname: bft_node4
environment:
- RUST_LOG
- RUST_BACKTRACE
depends_on:
- bft_node1
volumes:
- ./config-bft-4.toml:/app/devtools/chain/config.toml
- ./genesis-bft.toml:/app/devtools/chain/genesis.toml
- /tmp/data/bft-4:/app/devtools/chain/data
networks:
bft:
aliases:
- bft_node4
ipv4_address: 172.16.58.3
command: |
sh -c '
cd /app;
./muta-chain;'
networks:
bft:
ipam:
config:
- subnet: 192.168.3.11/24
|
devtools/docker/dockercompose/docker-compose-bft.yaml
|
title: Suspicious GrantedAccess Flags on LSASS Access
id: a18dd26b-6450-46de-8c91-9659150cf088
related:
- id: 32d0d3e2-e58d-4d41-926b-18b520b2b32d
type: obsoletes
status: experimental
description: Detects process access to LSASS memory with suspicious access flags
author: <NAME>
date: 2021/11/22
modified: 2022/03/13
references:
- https://docs.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
- https://onedrive.live.com/view.aspx?resid=D026B4699190F1E6!2843&ithint=file%2cpptx&app=PowerPoint&authkey=!AMvCRTKB_V1J5ow
- https://cyberwardog.blogspot.com/2017/03/chronicles-of-threat-hunter-hunting-for_22.html
- https://www.slideshare.net/heirhabarov/hunting-for-credentials-dumping-in-windows-environment
- http://security-research.dyndns.org/pub/slides/FIRST2017/FIRST-2017_Tom-Ueltschi_Sysmon_FINAL_notes.pdf
tags:
- attack.credential_access
- attack.t1003.001
- attack.s0002
logsource:
category: process_access
product: windows
detection:
selection:
TargetImage|endswith: '\lsass.exe'
GrantedAccess|endswith:
# - '10' # covered in rule 678dfc63-fefb-47a5-a04c-26bcf8cc9f65
- '30'
- '50'
- '70'
- '90'
- 'B0'
- 'D0'
- 'F0'
- '18'
- '38'
- '58'
- '78'
- '98'
- 'B8'
- 'D8'
- 'F8'
- '1A'
- '3A'
- '5A'
- '7A'
- '9A'
- 'BA'
- 'DA'
- 'FA'
- '0x14C2' # https://github.com/b4rtik/ATPMiniDump/blob/master/ATPMiniDump/ATPMiniDump.c
# Absolute paths to programs that cause false positives
filter_absolute:
SourceImage:
- 'C:\WINDOWS\system32\taskmgr.exe'
- 'C:\Program Files\Malwarebytes\Anti-Malware\MBAMService.exe'
- 'C:\PROGRAMDATA\MALWAREBYTES\MBAMSERVICE\ctlrupdate\mbupdatr.exe'
- 'C:\WINDOWS\system32\taskhostw.exe'
- 'C:\Users\\*\AppData\Local\Programs\Microsoft VS Code\Code.exe'
- 'C:\Program Files\Windows Defender\MsMpEng.exe'
- 'C:\Windows\SysWOW64\msiexec.exe'
- 'C:\Windows\System32\msiexec.exe'
- 'C:\Windows\System32\lsass.exe'
- 'C:\WINDOWS\System32\perfmon.exe'
- 'C:\Windows\System32\MRT.exe'
# Windows Defender
filter1:
SourceImage|startswith: 'C:\ProgramData\Microsoft\Windows Defender\'
SourceImage|endswith: '\MsMpEng.exe'
# VMware Tools
filter2:
SourceImage|startswith: 'C:\ProgramData\VMware\VMware Tools\'
SourceImage|endswith: '\vmtoolsd.exe'
# Antivirus and EDR agents
filter3:
SourceImage|startswith:
- 'C:\Progra Files\'
- 'C:\Progra Files (x86)\'
SourceImage|contains:
- 'Antivirus'
filter_mrt:
SourceImage: 'C:\WINDOWS\system32\MRT.exe'
GrantedAccess: '0x1418'
filter_mcafee:
SourceImage: 'C:\Program Files\Common Files\McAfee\MMSSHost\MMSSHOST.exe'
filter_nextron:
SourceImage|startswith: 'C:\Windows\Temp\asgard2-agent\'
SourceImage|endswith:
- '\thor64.exe'
- '\thor.exe'
GrantedAccess: '0x1fffff'
condition: selection and not 1 of filter*
fields:
- User
- SourceImage
- GrantedAccess
falsepositives:
- Legitimate software accessing LSASS process for legitimate reason
level: high
|
images/rules/windows/process_access/proc_access_win_susp_proc_access_lsass.yml
|
이 번역본의 제작은 HERO님과 아무런 관련이 없습니다. 모든 대사와 관련된 권리는 원본 만화의 저작권자에게 있으며 번역자는 번역본에 대한
모든 권리를 저작권자에게 양도합니다. 번역본은 오로지 HERO의 사이트 "読解アヘン"의 가독성을 향상시키는 데에만 사용할 수 있습니다.
만화의 저작권자는 언제든 이 번역본의 배포자에게 번역본과 이 조항들의 삭제, 변경을 포함한 모든 것을 요청할 수 있습니다. 이 모든 조항을
준수할 때에는 번역본의 배포 및 변형이 가능하며 이 모든 조항을 번역본과 함께 포함하여야 합니다.
This translation is not related to HERO. The copyright holder of the original
comic has all dialogue-related rights, and the translator transfers all rights
to the translation to the copyright holder. The translation can only be used
to improve the readability of HERO's website "読解アヘン". The copyright holder of
the comic can at any time request the distributor of this translation,
including the translation and the deletion and modification of these clauses.
In compliance with all of these provisions, the translation can be distributed
and transformed, and all of these provisions must be notified along with the
translation.
この翻訳版の製作はHEROと何ら関連がありません。
すべての台詞に関する権利は原本の漫画の著作権者にあり、翻訳者は翻訳版に対するすべての権利を著作権者に譲渡します。
翻訳版はHEROのサイト「読解アヘン」の可読性を向上させることにのみ使用できます。
漫画の著作権者は、いつでもこの翻訳版の配布者に対し、翻訳版とこれらの条項の削除、変更を含むすべてを要請することができます。
この全ての条項を遵守する場合、翻訳版の配布及び変形が可能であり、この全ての条項を翻訳版と共に告知しなければなりません。
hm121_150/122/hori_1794.gif:
- - 호리 선배는요- 초콜릿 좋아해요-?
- - 어... 밸런타인?
- text: 혹시
type: plain
- 직접 만들 거에요-!!
- - 사와다, 보통은 남자에게 주는 게 아닐
- 저 열심히 만들 거예요!! 기다려 주세요!
- - 기다려 주세요-!!
- text: 안 듣네...
type: thought
- 으, 응, 기다릴게-
hm121_150/122/hori_1795.gif:
- - 나 초코 케이크 나눠 주려고!
- - 하... 정말?
- text: 방금 「하지 마」라고 하려고 했어
type: thought
- 초코 케이크 말이지-
- text: 시선
type: plain
- 케이크구나-
- - 있잖아, 먹어 줄 거지?
- 응?
- 응?
- 응?
- - 직접 만들게?
hm121_150/122/hori_1796.gif:
- - 당연하지!!
- 요시카와, 학교 뒤에 있는 백화점에서 지금 밸런타인 행사한대!! 딱 맞아! 거기 가!!
- text: 250엔~ 박스도 종류도 잔뜩♥
type: plain
- - ''
- - text: ♪
type: plain
- text: 수제 초콜릿
type: plain
- - 레미 신나 보이네
- 그게~ 밸런타인이잖아, 벌써 너무 신나~!!
hm121_150/122/hori_1797.gif:
- - 있잖아, 센고쿠♡
- - 투구벌레랑
- - 도마뱀 중에
- - 어떤 게 더 좋아-?
hm121_150/122/hori_1798.gif:
- - text: 투구... <small>아니...</small>
type: speech
- 그냥 사각형이나 하트
- 우리집엔 그런 모양이 없어
- 어... 그래...
- - text: 침착해... 괜찮아... 진짜로 투구벌레나 도마뱀이 들어 있는 것도 아니니까...
type: thought
- text: 힘내, 센고쿠
type: plain
- 어...
- - 그, 그렇지, 모처럼 밸런타인 초콜릿인데
- 난 투구벌레니 도마뱀이니... 바보 같이... 미안해...
- - 아니... 레미... 그런 속셈으로 말한 건...
hm121_150/122/hori_1799.gif:
- - 거미로 할래
- - ''
- - text: <b>제기라아아알!!</b>
type: speech
- text: 드르륵
type: plain
- text: 턱!!
type: stroke
- text: 타다다다...
type: plain
- 앗, 센고쿠!! 어디 가는 거야!!
- - ''
hm121_150/122/hori_1800.gif:
- - 나도 도우려고- 가게에서 초콜릿 만드는 거
- - 그럼 카타기리 근처 남자들은 대부분 미야무라가 만든 초콜릿을 먹겠네... 얄밉다...
- 아니 그렇게 마음을 담아서 만드는 게 아니니까...!!
- - 아빠도 초콜릿 받고 싶네-
- - 초등학생도 아니고, 안 줘-
- 뭐?
hm121_150/122/hori_1801.gif:
- - 네 얘길 하는 게 아니라! 미야무라 말이야
- 네?
- - 당신에겐 평생 초콜릿 안 줄 거야
- 만약 그게 눈보라가 몰아치는 산 속이라고 해도
- 죽을 것 같으면 그 땐 줘
- - 호리도 줄까? 초콜릿...
- 가게 거지만
- - 줘!
- text: 받는구나...
type: thought
- 제일 큰 걸로!
- text: 악착스럽게
type: square
hm121_150/122/hori_1802.gif:
- - 아! 그러고보니 사와다한테서도 받을 거야
- - 다 먹을 수 있으려나-
- 아빠가 먹어 줄게!
- .....
- 조용히 해
- - text: 기껏 수제 초콜릿은 크기가 알려져 있고... 크게 해도 홀 케이크 정도
type: thought
- text: 우리 집에서 가장 큰 건... 아니 그거라면 임팩트가...
type: thought
- - 미야무라 왜 그래?
- 1미터에서 1미터 50센티 정도려나...
- text: 중얼중얼
type: plain
- 뭐가?
- 초콜릿
hm121_150/122/hori_1803.gif:
- - 머어!?
- text: 하지만 사와다가 주는 초콜릿<b>보다야</b> 크게 하고 싶은데
type: speech
- 바보 아니야? 보통 사이즈로 줘!!
- - 하지만 사와다 초콜릿이 2미터에 내 초콜릿이 1미터면 호리는 2미터를 고를 거잖아!!
- 싫어, 2미터 초콜릿은!!
- - 아, 아냐, 1미터도 싫어!!
- 괴물 아니야, 그 정도면
- text: 호리<big><big>I</big> <small>I</small></big>초콜릿
type: speech
- 그그그 그래도
- - 80센티... 80센티면 어때...?
- 80센티라니 너 진짜...
- text: 80...?
type: plain
hm121_150/122/hori_1804.gif:
- - 자, 두 사람 초콜릿
- 그냥 주는 거야
- - 고마워 코야나기
- text: 녹였다가 굳힌 거 뿐이야
type: plain
- 받아둘게, 주는 건 거절해도 할 수 없으니
- - 거절 안 할거면... 자! 내가 주는 초콜렛도 받아!
- text: <b>필요 없어</b>, 그런 건!!
type: speech
- text: 짠-
type: plain
- - 방금 받는다고 했잖아!!
- text: 내게 수치심을 안겨주지 마!
type: plain
- text: 자 자
type: plain
- 말은 했지만...!!
- 호리, 그거 반에서 받은 초콜릿 아니야?
hm121_150/122/hori_1805.gif:
- - 어, 신도, 저 가게 들러도 될까?
- text: 100엔 숍
type: plain
- 괜찮은데 뭐 사려고?
- - 고무
- text: 길어져서
type: plain
- '// 고무: 콘돔'
- 고무가 그런 데 있겠어, 편의점이나 약국 가!
- - 그 고무가 아니야, 너 바보냐?
- 뒈질래?
- - text: 농담 좀 했다고 이런 공격...
type: square
- text: 뒈지라고까지...
type: thought
- text: 그래도 좋아
type: plain
- text: 울 거야...
type: square
- 신도, 가자-
|
translations/hm121_150/pict_com_133.yaml
|
---
### INSTRUCTIONS AND INFORMATION ###
#
# 1. Run the play with the command: 'ansible-playbook agent_clamav/deploy_agent.yml --extra-vars "env=<hostlist>"'
#
# Author: <NAME>
# Objective: Deploy ClamAV Scanner across all servers
# Date: 7/15/2019
#
###
- hosts: "{{ env }}"
remote_user: root
gather_facts: True
vars:
workdir: clamav_files/
logdir: /var/log/clamav/
tasks:
- name: "Ensuring that logging {{ logdir }} directory exists. Create it if it doesn't exist."
file:
path: "{{ logdir }}"
state: directory
mode: 0755
- name: Ensure all packages are installed for ClamAV (RHEL 7)
yum:
name:
- clamav
- clamav-server
- clamav-devel
- clamav-lib
- clamav-update
- clamav-scanner-systemd
- clamav-server-systemd
- clamd
- clamav-filesystem
- clamav-data
state: latest
disable_gpg_check: yes
when: ansible_distribution_major_version == "7"
- name: Ensure all packages are installed for ClamAV (RHEL 6)
yum:
name:
- clamav
- clamd
state: latest
disable_gpg_check: yes
when: ansible_distribution_major_version == "6"
- name: Push over freshclam systemctl startup script (RHEL 7)
copy:
src: "{{ workdir }}freshclam.service"
dest: /usr/lib/systemd/system/freshclam.service
mode: 644
when: ansible_distribution_major_version == "7"
- name: Start freshclam daemon to update virus database every 12 hours (RHEL 7)
service:
name: freshclam
state: started
when: ansible_distribution_major_version == "7"
- name: Set the clamd@scan service so that it is ready for on demand scanning (On Demand Scanning is disabled)
lineinfile:
path: /lib/systemd/system/clamd@.service
regexp: "^ExecStart"
line: "ExecStart = /usr/bin/ionice -c3 /usr/bin/nice -n19 /usr/sbin/clamd -c /etc/clamd.d/%i.conf"
when: ansible_distribution_major_version == "7"
- name: Push over config for Clamd Scanning (Disabled, but ready to run)
copy:
src: "{{ workdir }}scan.conf"
dest: /etc/clamd.d/scan.conf
mode: 644
- name: Push over cron.daily clam scanner (Scanner is at approximately 3:05AM, at 19 nice setting)
copy:
src: "{{ workdir }}clamav.cron"
dest: /etc/cron.daily/clamav
mode: 755
|
deploy_clamav.yml
|
name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
build:
if: "!contains(github.event.head_commit.message, 'skip ci')"
name: Build Test
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [12.x, 14.x]
steps:
- uses: actions/checkout@v2
- name: Cache pnpm modules
uses: actions/cache@v2
env:
cache-name: cache-pnpm-modules
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-
- uses: pnpm/action-setup@v2.0.1
with:
version: 6.2.5
run_install: true
- name: Copy config for build sample
run: |
cp ./src/config.ts.example ./src/config.ts
cp ./.env.example ./.env
- name: Start build on ${{ matrix.node-version }}
run: pnpm run build
tests:
if: "!contains(github.event.head_commit.message, 'skip ci')"
name: Unittests
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [12.x, 14.x]
steps:
- uses: actions/checkout@v2
- name: Cache pnpm modules
uses: actions/cache@v2
env:
cache-name: cache-pnpm-modules
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-
- uses: pnpm/action-setup@v2.0.1
with:
version: 6.2.5
run_install: true
- name: Start testing on ${{ matrix.node-version }}
run: pnpm run testall
- name: Upload Coverage to Codecov
if: matrix.node-version == '12.x'
uses: codecov/codecov-action@v1.0.14
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: unittests
name: ihaapi-codecov
lint:
if: "!contains(github.event.head_commit.message, 'skip ci')"
name: Lint Check
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [12.x]
steps:
- uses: actions/checkout@v2
- name: Cache pnpm modules
uses: actions/cache@v2
env:
cache-name: cache-pnpm-modules
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-${{ matrix.node-version }}-
- uses: pnpm/action-setup@v2.0.1
with:
version: 6.2.5
run_install: true
- name: Run Lint
run: pnpm run lint
deploy:
name: Deploy to Production
if: |
!contains(github.event.head_commit.message, 'skip ci') &&
!contains(github.event.head_commit.message, 'skip deploy')
environment:
name: Production
url: https://api.ihateani.me/
needs: [build, tests]
runs-on: ubuntu-latest
steps:
- name: Execute deployment script
uses: appleboy/ssh-action@master
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USERNAME }}
key: ${{ secrets.SSH_KEY }}
passphrase: ${{ secrets.SSH_PASSPHRASE }}
port: 22
script: |
curl -H "Authorization: token ${{ secrets.GH_TOKEN }}" ${{ secrets.CONFIG_PATH }} > /var/www/ihaapi-ts/src/config.ts
sh /var/www/ihaapi-ts/deploy.sh
|
.github/workflows/ci.yml
|
name: CI
on: push
jobs:
appinspect-api:
name: AppInspect API Validation
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- run: |
find . -name '*.pyc' -delete
tar --exclude="utbox/local" --exclude="utbox/metadata/local.meta" -zcvf utbox.tgz utbox
- uses: splunk/appinspect-api-action@v2
with:
filePath: utbox.tgz
splunkUser: ${{ secrets.SPLUNK_USER }}
splunkPassword: ${{ secrets.SPLUNK_PASS }}
includedTags: cloud
failOnError: true
failOnWarning: false
splunk-test:
needs: appinspect-api
runs-on: ubuntu-latest
strategy:
matrix:
version: ["7.3.9","192.168.3.11","8.2"]
services:
splunk:
image: splunk/splunk:${{ matrix.version }}
env:
SPLUNK_START_ARGS: --accept-license
SPLUNK_PASSWORD: password
options: >-
--health-interval 30s
--health-timeout 5s
--health-retries 5
ports:
- 8000:8000
- 8089:8089
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Copy app to service container
run: docker cp utbox ${{ job.services.splunk.id }}:/opt/splunk/etc/apps/utbox
- run: docker exec -i ${{ job.services.splunk.id }} ls -a /opt/splunk/etc/apps/utbox
- run: docker exec -i ${{ job.services.splunk.id }} ls -a /opt/splunk/etc/apps
- name: Restart instance
run: |
curl -k -u admin:password https://localhost:8089/services/server/control/restart -X POST
sleep 30
- name: Print App Configuration
run: |
curl -k -u admin:password https://localhost:8089/services/apps/local/utbox
- name: Test ut_parse_simple
run:
curl -k -u admin:password https://localhost:8089/services/search/jobs/export -d search="|makeresults n = 1 | eval cs_url = \"https://splunk.com\", list = \"*\" | \`ut_parse_simple(cs_url)\`" -d output_mode=json | jq -e '(.result.ut_scheme | contains("https"))'
- name: Test ut_parse_extended
run:
curl -k -u admin:password https://localhost:8089/services/search/jobs/export -d search="|makeresults n = 1 | eval cs_url = \"https://splunk.com/test\", list = \"*\" | \`ut_parse_extended(cs_url, list)\`" -d output_mode=json | jq -e '(.result.ut_path | contains("test"))'
|
.github/workflows/ci.yml
|
uid: "com.azure.data.tables.TableServiceClientBuilder"
fullName: "com.azure.data.tables.TableServiceClientBuilder"
name: "TableServiceClientBuilder"
nameWithType: "TableServiceClientBuilder"
summary: "This class provides a fluent builder API to help aid the configuration and instantiation of <xref uid=\"com.azure.data.tables.TableServiceClient\" data-throw-if-not-resolved=\"false\" data-raw-source=\"TableServiceClient\"></xref> and <xref uid=\"com.azure.data.tables.TableServiceAsyncClient\" data-throw-if-not-resolved=\"false\" data-raw-source=\"TableServiceAsyncClient\"></xref> objects. Call <xref uid=\"com.azure.data.tables.TableServiceClientBuilder.buildClient()\" data-throw-if-not-resolved=\"false\" data-raw-source=\"#buildClient()\"></xref> or <xref uid=\"com.azure.data.tables.TableServiceClientBuilder.buildAsyncClient()\" data-throw-if-not-resolved=\"false\" data-raw-source=\"#buildAsyncClient()\"></xref>, respectively, to construct an instance of the desired client."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public class TableServiceClientBuilder"
constructors:
- "com.azure.data.tables.TableServiceClientBuilder.TableServiceClientBuilder()"
methods:
- "com.azure.data.tables.TableServiceClientBuilder.addPolicy(com.azure.core.http.policy.HttpPipelinePolicy)"
- "com.azure.data.tables.TableServiceClientBuilder.buildAsyncClient()"
- "com.azure.data.tables.TableServiceClientBuilder.buildClient()"
- "com.azure.data.tables.TableServiceClientBuilder.clientOptions(com.azure.core.util.ClientOptions)"
- "com.azure.data.tables.TableServiceClientBuilder.configuration(com.azure.core.util.Configuration)"
- "com.azure.data.tables.TableServiceClientBuilder.connectionString(java.lang.String)"
- "com.azure.data.tables.TableServiceClientBuilder.credential(com.azure.core.credential.AzureSasCredential)"
- "com.azure.data.tables.TableServiceClientBuilder.credential(com.azure.core.credential.TokenCredential)"
- "com.azure.data.tables.TableServiceClientBuilder.credential(com.azure.data.tables.TablesSharedKeyCredential)"
- "com.azure.data.tables.TableServiceClientBuilder.endpoint(java.lang.String)"
- "com.azure.data.tables.TableServiceClientBuilder.httpClient(com.azure.core.http.HttpClient)"
- "com.azure.data.tables.TableServiceClientBuilder.httpLogOptions(com.azure.core.http.policy.HttpLogOptions)"
- "com.azure.data.tables.TableServiceClientBuilder.pipeline(com.azure.core.http.HttpPipeline)"
- "com.azure.data.tables.TableServiceClientBuilder.retryOptions(com.azure.storage.common.policy.RequestRetryOptions)"
- "com.azure.data.tables.TableServiceClientBuilder.sasToken(java.lang.String)"
- "com.azure.data.tables.TableServiceClientBuilder.serviceVersion(com.azure.data.tables.TablesServiceVersion)"
type: "class"
metadata: {}
package: "com.azure.data.tables"
artifact: com.azure:azure-data-tables:12.0.0-beta.5
|
preview/docs-ref-autogen/com.azure.data.tables.TableServiceClientBuilder.yml
|
album: Music Information Retrieval
artist: Tabla Breakbeat Science
composer:
- <NAME>
excerpt: 'no'
genre: Electronic/Fusion
has_bleed: 'no'
instrumental: 'yes'
mix_filename: TablaBreakbeatScience_MoodyPlucks_MIX.wav
origin: Dolan Studio
producer:
- <NAME>
raw_dir: TablaBreakbeatScience_MoodyPlucks_RAW
stem_dir: TablaBreakbeatScience_MoodyPlucks_STEMS
stems:
S01:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_01.wav
instrument: drum machine
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_01_01.wav
instrument: drum machine
S02:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_02.wav
instrument: fx/processed sound
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_02_01.wav
instrument: fx/processed sound
S03:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_03.wav
instrument: fx/processed sound
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_03_01.wav
instrument: fx/processed sound
S04:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_04.wav
instrument: synthesizer
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_04_01.wav
instrument: synthesizer
S05:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_05.wav
instrument: synthesizer
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_05_01.wav
instrument: synthesizer
S06:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_06.wav
instrument: harp
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_06_01.wav
instrument: harp
R02:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_06_02.wav
instrument: harp
S07:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_07.wav
instrument: synthesizer
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_07_01.wav
instrument: synthesizer
R02:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_07_02.wav
instrument: synthesizer
S08:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_08.wav
instrument: fx/processed sound
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_08_01.wav
instrument: fx/processed sound
S09:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_09.wav
instrument: clean electric guitar
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_09_01.wav
instrument: clean electric guitar
S10:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_10.wav
instrument: tabla
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_10_01.wav
instrument: tabla
R02:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_10_02.wav
instrument: tabla
S11:
component: ''
filename: TablaBreakbeatScience_MoodyPlucks_STEM_11.wav
instrument: tabla
raw:
R01:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_11_01.wav
instrument: tabla
R02:
filename: TablaBreakbeatScience_MoodyPlucks_RAW_11_02.wav
instrument: tabla
title: Moody Plucks
version: 1.2
website:
- www.ethanhein.com
|
medleydb/data/Metadata/TablaBreakbeatScience_MoodyPlucks_METADATA.yaml
|
---
- name: set variable overrides
set_fact:
_gitlab_requirements: "{{ gitlab_requirements | default(gitlab_requirements_default | sorted_get(overrides)) }}"
vars:
overrides:
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release | lower }}"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version }}"
- "{{ ansible_distribution | lower }}"
- "{{ ansible_os_family | lower }}"
- "default"
tags:
- always
- name: load installation script url
set_fact:
gitlab_repository_installation_script_url: "{{ gitlab_repository_installation_script_url_default[ansible_os_family | lower] }}"
when: gitlab_repository_installation_script_url is not defined
- name: check existing configuration
stat:
path: /etc/gitlab/gitlab.rb
register: gitlab_config_file
- name: check existing installation
stat:
path: /usr/bin/gitlab-ctl
register: gitlab_file
- name: install gitlab dependencies
package:
name: "{{ _gitlab_requirements }}"
state: "{{ gitlab_package_state }}"
- name: create temporary target directory
tempfile:
state: directory
prefix: gitlab
register: tmpdir
changed_when: false
- name: register installation script location
set_fact:
install_script: "{{ tmpdir.path }}/gitlab_install_repository.sh"
- name: download gitlab repository installation script
get_url:
url: "{{ gitlab_repository_installation_script_url }}"
dest: "{{ install_script }}"
validate_certs: "{{ gitlab_download_validate_certs }}"
mode: 755
when: not gitlab_file.stat.exists
- name: install gitlab repository
command: "{{ install_script }}"
when: not gitlab_file.stat.exists
- block:
- name: import apt gpg key
apt_key:
url: https://packages.gitlab.com/gpg.key
- name: update apt package cache
apt:
update_cache: true
- name: update ca certificates
command: update-ca-certificates
changed_when: false
when: ansible_os_family | lower == 'debian'
- name: install gitlab
package:
name: "{{ gitlab_package | default(gitlab_package_default) }}"
notify: restart gitlab-runsvdir service
- name: flush handlers
meta: flush_handlers
- name: manage gitlab configuration
template:
src: "{{ gitlab_conf_template }}"
dest: "{{ gitlab_conf_path }}"
owner: "{{ gitlab_user }}"
group: "{{ gitlab_group }}"
mode: 0600
notify: reconfigure gitlab
- name: reconfigure gitlab (first run)
command: gitlab-ctl reconfigure
args:
creates: /var/opt/gitlab/bootstrapped
when: not gitlab_file.stat.exists
- name: remove temporary directories
file:
path: "{{ tmpdir.path }}"
state: absent
changed_when: false
- block:
- name: create gitlab backup script
template:
src: "{{ gitlab_backup_template }}"
dest: "{{ gitlab_backup_bin }}"
owner: root
group: root
mode: 0750
tags:
- backup
- restore
- name: create gitlab restore script
template:
src: "{{ gitlab_restore_template }}"
dest: "{{ gitlab_restore_bin }}"
owner: root
group: root
mode: 0750
tags:
- backup
- restore
- name: manage gitlab backup cronjob
cron:
name: gitlab-backup-create
job: "{{ gitlab_backup_bin }}"
hour: "{{ gitlab_cron_hour_setting }}"
minute: "{{ gitlab_cron_minute_setting }}"
tags:
- backup
- restore
- name: create gitlab backup motd pointers for debian
copy:
dest: /etc/update-motd.d/99-gitlab
content: |
#!/usr/bin/env bash
echo
echo run gitlab-backup to manually force gitlab backup creation
echo run gitlab-restore to manually restore latest gitlab backup
echo
mode: 0755
when: ansible_os_family | lower == 'debian'
tags:
- backup
- restore
- name: create gitlab backup motd pointers for redhat
copy:
dest: /etc/profile.d/99-gitlab
content: |
#!/usr/bin/env bash
echo
echo run gitlab-backup to manually force gitlab backup creation
echo run gitlab-restore to manually restore latest gitlab backup
echo
mode: 0755
when: ansible_os_family | lower == 'redhat'
tags:
- backup
- restore
when: gitlab_backup | default(True)
|
tasks/main.yml
|
name: schedule
on:
push:
branches:
- master
pull_request:
branches:
- master
schedule:
- cron: '0 */4 * * *'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest, windows-latest ]
steps:
- name: Checkout
uses: actions/checkout@master
- name: Build on Ubuntu
if: matrix.os == 'ubuntu-latest'
run: |
sudo apt-get update
sudo apt-get install -y unixodbc unixodbc-dev libcurl4-openssl-dev libglfw3-dev
bash auto-build.sh
bash package_tools/deb/make.sh
cp *.deb covscript-amd64.deb
- name: Download MinGW-w64
if: matrix.os == 'windows-latest'
uses: i3h/download-release-asset@v1.2.0
with:
owner: covscript-archives
repo: mingw-w64
tag: gcc-10.2.0-mingw-w64-8.0.0-r8
file: gcc-10.2.0-mingw-w64-8.0.0-r8-covscript.org.zip
- name: Download Wix Toolset
if: matrix.os == 'windows-latest'
uses: i3h/download-release-asset@v1.2.0
with:
owner: wixtoolset
repo: wix3
tag: wix3112rtm
file: wix311-binaries.zip
- name: Install Toolset in Windows
if: matrix.os == 'windows-latest'
run: |
rm C:/ProgramData/chocolatey/bin/gcc.exe -force
rm C:/ProgramData/chocolatey/bin/g++.exe -force
rm C:/Strawberry/c/bin/gcc.exe -force
rm C:/Strawberry/c/bin/g++.exe -force
7z x gcc-10.2.0-mingw-w64-8.0.0-r8-covscript.org.zip -oC:\
7z x wix311-binaries.zip -oC:\Wix
- name: Build on Windows
if: matrix.os == 'windows-latest'
run: |
.github/workflows/build.bat
cp *.msi covscript-amd64.msi
- name: Run tests on Ubuntu
if: matrix.os == 'ubuntu-latest'
run: |
sudo dpkg -i covscript-amd64.deb
cs -v
cs_dbg -v
cspkg install --import
cspkg list
cd build-cache/covscript/examples
cs benchmark.csc
- name: Package CSPKG Repo on Ubuntu
if: matrix.os == 'ubuntu-latest'
uses: montudor/action-zip@v0.1.1
with:
args: zip -qq -r cspkg-repo.zip cspkg-repo
- name: Package Build files on Ubuntu
if: matrix.os == 'ubuntu-latest'
uses: montudor/action-zip@v0.1.1
with:
args: zip -qq -r covscript-linux.zip build
- name: Package CSPKG Repo and Build files on Windows
if: matrix.os == 'windows-latest'
run: |
7z a -mmt4 -mx9 cspkg-repo.7z cspkg-repo
7z a -mmt4 -mx9 covscript-win32.7z build
- name: Automatic Release MSI
if: matrix.os == 'windows-latest'
uses: marvinpinto/action-automatic-releases@master
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: "windows-schedule"
title: "Windows Schedule Build"
prerelease: false
files: |
*.msi
*.7z
- name: Automatic Release DEB
if: matrix.os == 'ubuntu-latest'
uses: marvinpinto/action-automatic-releases@master
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: "ubuntu-schedule"
title: "Ubuntu Schedule Build"
prerelease: false
files: |
*.deb
*.zip
|
.github/workflows/schedule.yml
|
uid: "com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration.withDnsSettings*"
fullName: "com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration.withDnsSettings"
name: "withDnsSettings"
nameWithType: "VirtualMachineScaleSetNetworkConfiguration.withDnsSettings"
members:
- uid: "com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration.withDnsSettings(com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings)"
fullName: "com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration.withDnsSettings(VirtualMachineScaleSetNetworkConfigurationDnsSettings dnsSettings)"
name: "withDnsSettings(VirtualMachineScaleSetNetworkConfigurationDnsSettings dnsSettings)"
nameWithType: "VirtualMachineScaleSetNetworkConfiguration.withDnsSettings(VirtualMachineScaleSetNetworkConfigurationDnsSettings dnsSettings)"
summary: "Set the dnsSettings property: The dns settings to be applied on the network interfaces."
parameters:
- description: "the dnsSettings value to set."
name: "dnsSettings"
type: "<xref href=\"com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings?alt=com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings&text=VirtualMachineScaleSetNetworkConfigurationDnsSettings\" data-throw-if-not-resolved=\"False\" />"
syntax: "public VirtualMachineScaleSetNetworkConfiguration withDnsSettings(VirtualMachineScaleSetNetworkConfigurationDnsSettings dnsSettings)"
returns:
description: "the VirtualMachineScaleSetNetworkConfiguration object itself."
type: "<xref href=\"com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration?alt=com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration&text=VirtualMachineScaleSetNetworkConfiguration\" data-throw-if-not-resolved=\"False\" />"
type: "method"
metadata: {}
package: "com.azure.resourcemanager.compute.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-compute:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.compute.models.VirtualMachineScaleSetNetworkConfiguration.withDnsSettings.yml
|
uid: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get*"
fullName: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get"
name: "get"
nameWithType: "InvoiceLineItemCollectionOperations.get"
members:
- uid: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get()"
fullName: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get()"
name: "get()"
nameWithType: "InvoiceLineItemCollectionOperations.get()"
summary: "Retrieves invoice line items for a specific billing provider and invoice line item type"
syntax: "public ResourceCollection<InvoiceLineItem> get()"
returns:
description: "The collection of invoice line items."
type: "<xref href=\"com.microsoft.store.partnercenter.models.ResourceCollection?alt=com.microsoft.store.partnercenter.models.ResourceCollection&text=ResourceCollection\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.microsoft.store.partnercenter.models.invoices.InvoiceLineItem?alt=com.microsoft.store.partnercenter.models.invoices.InvoiceLineItem&text=InvoiceLineItem\" data-throw-if-not-resolved=\"False\" />>"
- uid: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get(int,int)"
fullName: "com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get(int size, int offset)"
name: "get(int size, int offset)"
nameWithType: "InvoiceLineItemCollectionOperations.get(int size, int offset)"
summary: "Retrieves invoice line items for a specific billing provider and invoice line item type and allows paging"
parameters:
- name: "size"
type: "<xref href=\"int?alt=int&text=int\" data-throw-if-not-resolved=\"False\" />"
- name: "offset"
type: "<xref href=\"int?alt=int&text=int\" data-throw-if-not-resolved=\"False\" />"
syntax: "public ResourceCollection<InvoiceLineItem> get(int size, int offset)"
returns:
description: "The collection of invoice line items."
type: "<xref href=\"com.microsoft.store.partnercenter.models.ResourceCollection?alt=com.microsoft.store.partnercenter.models.ResourceCollection&text=ResourceCollection\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.microsoft.store.partnercenter.models.invoices.InvoiceLineItem?alt=com.microsoft.store.partnercenter.models.invoices.InvoiceLineItem&text=InvoiceLineItem\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.microsoft.store.partnercenter.invoices"
artifact: com.microsoft.store:partnercenter:1.15.3
|
docs-ref-autogen/com.microsoft.store.partnercenter.invoices.InvoiceLineItemCollectionOperations.get.yml
|
Urbem\CoreBundle\Entity\Pessoal\ContratoPensionista:
type: entity
table: pessoal.contrato_pensionista
id:
codContrato:
type: integer
generator:
strategy: NONE
column: cod_contrato
fields:
codContratoCedente:
type: integer
nullable: false
column: cod_contrato_cedente
codDependencia:
type: integer
nullable: false
column: cod_dependencia
codPensionista:
type: integer
nullable: false
column: cod_pensionista
numBeneficio:
type: string
nullable: true
length: 15
column: num_beneficio
percentualPagamento:
type: decimal
nullable: true
precision: 5
scale: 2
column: percentual_pagamento
dtInicioBeneficio:
type: date
nullable: false
column: dt_inicio_beneficio
dtEncerramento:
type: date
nullable: true
column: dt_encerramento
motivoEncerramento:
type: string
nullable: true
length: 200
column: motivo_encerramento
manyToOne:
fkPessoalPensionista:
targetEntity: Urbem\CoreBundle\Entity\Pessoal\Pensionista
inversedBy: fkPessoalContratoPensionistas
joinColumns:
cod_pensionista:
referencedColumnName: cod_pensionista
cod_contrato_cedente:
referencedColumnName: cod_contrato_cedente
fkPessoalTipoDependencia:
targetEntity: Urbem\CoreBundle\Entity\Pessoal\TipoDependencia
inversedBy: fkPessoalContratoPensionistas
joinColumns:
cod_dependencia:
referencedColumnName: cod_dependencia
oneToMany:
fkPessoalAtributoContratoPensionistas:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Pessoal\AtributoContratoPensionista
mappedBy: fkPessoalContratoPensionista
fkPessoalContratoPensionistaContaSalarios:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Pessoal\ContratoPensionistaContaSalario
mappedBy: fkPessoalContratoPensionista
fkPessoalContratoPensionistaOrgoes:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Pessoal\ContratoPensionistaOrgao
mappedBy: fkPessoalContratoPensionista
fkPessoalContratoPensionistaPrevidencias:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Pessoal\ContratoPensionistaPrevidencia
mappedBy: fkPessoalContratoPensionista
oneToOne:
fkPessoalContratoPensionistaProcesso:
cascade:
- persist
- remove
targetEntity: Urbem\CoreBundle\Entity\Pessoal\ContratoPensionistaProcesso
mappedBy: fkPessoalContratoPensionista
joinColumns:
cod_contrato:
referencedColumnName: cod_contrato
fkPessoalContratoPensionistaCasoCausa:
cascade:
- persist
- remove
targetEntity: Urbem\CoreBundle\Entity\Pessoal\ContratoPensionistaCasoCausa
mappedBy: fkPessoalContratoPensionista
joinColumns:
cod_contrato:
referencedColumnName: cod_contrato
fkPessoalContrato:
targetEntity: Urbem\CoreBundle\Entity\Pessoal\Contrato
inversedBy: fkPessoalContratoPensionista
joinColumns:
cod_contrato:
referencedColumnName: cod_contrato
|
src/Urbem/CoreBundle/Resources/config/doctrine/Pessoal.ContratoPensionista.orm.yml
|
items:
- uid: azure-arm-eventgrid.JsonInputSchemaMapping
name: JsonInputSchemaMapping
fullName: JsonInputSchemaMapping
children:
- azure-arm-eventgrid.JsonInputSchemaMapping.dataVersion
- azure-arm-eventgrid.JsonInputSchemaMapping.eventTime
- azure-arm-eventgrid.JsonInputSchemaMapping.eventType
- azure-arm-eventgrid.JsonInputSchemaMapping.id
- azure-arm-eventgrid.JsonInputSchemaMapping.inputSchemaMappingType
- azure-arm-eventgrid.JsonInputSchemaMapping.subject
- azure-arm-eventgrid.JsonInputSchemaMapping.topic
langs:
- typeScript
type: interface
summary: Isso permite a publicação para a grade de eventos usando um esquema de entrada personalizado. Isso pode ser usado para mapear propriedades de um esquema personalizado de JSON de entrada para o esquema de evento da grade de eventos.
extends:
name: azure-arm-eventgrid.InputSchemaMapping
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.dataVersion
name: dataVersion
fullName: dataVersion
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade DataVersion do evento de grade de eventos.
optional: true
syntax:
content: 'dataVersion?: JsonFieldWithDefault'
return:
type:
- azure-arm-eventgrid.JsonFieldWithDefault
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.eventTime
name: eventTime
fullName: eventTime
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade EventTime do evento de grade de eventos.
optional: true
syntax:
content: 'eventTime?: JsonField'
return:
type:
- azure-arm-eventgrid.JsonField
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.eventType
name: eventType
fullName: eventType
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade EventType do evento de grade de eventos.
optional: true
syntax:
content: 'eventType?: JsonFieldWithDefault'
return:
type:
- azure-arm-eventgrid.JsonFieldWithDefault
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.id
name: id
fullName: id
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade de Id do evento de grade de eventos.
optional: true
syntax:
content: 'id?: JsonField'
return:
type:
- azure-arm-eventgrid.JsonField
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.inputSchemaMappingType
name: inputSchemaMappingType
fullName: inputSchemaMappingType
children: []
langs:
- typeScript
type: property
summary: Discriminador polimórfico
syntax:
content: 'inputSchemaMappingType: string'
return:
type:
- string
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.subject
name: subject
fullName: subject
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade de assunto do evento de grade de eventos.
optional: true
syntax:
content: 'subject?: JsonFieldWithDefault'
return:
type:
- azure-arm-eventgrid.JsonFieldWithDefault
package: azure-arm-eventgrid
- uid: azure-arm-eventgrid.JsonInputSchemaMapping.topic
name: topic
fullName: topic
children: []
langs:
- typeScript
type: property
summary: As informações de mapeamento para a propriedade de tópico do evento de grade de eventos.
optional: true
syntax:
content: 'topic?: JsonField'
return:
type:
- azure-arm-eventgrid.JsonField
package: azure-arm-eventgrid
references:
- uid: azure-arm-eventgrid.InputSchemaMapping
name: InputSchemaMapping
spec.typeScript:
- name: InputSchemaMapping
fullName: InputSchemaMapping
uid: azure-arm-eventgrid.InputSchemaMapping
- uid: azure-arm-eventgrid.JsonFieldWithDefault
name: JsonFieldWithDefault
spec.typeScript:
- name: JsonFieldWithDefault
fullName: JsonFieldWithDefault
uid: azure-arm-eventgrid.JsonFieldWithDefault
- uid: azure-arm-eventgrid.JsonField
name: JsonField
spec.typeScript:
- name: JsonField
fullName: JsonField
uid: azure-arm-eventgrid.JsonField
|
docs-ref-autogen/azure-arm-eventgrid/JsonInputSchemaMapping.yml
|
---
# vim: filetype=yaml.ansible
# --------------------------
- name: Mark the packages-install sub-role as IN PROGRESS
shell: echo 'IN PROGRESS' > /etc/.setup/packages-install
# -----------------------------------------------------------------------------
- name: Prepare complete list of packages to be installed
set_fact:
packages_list: "{{ packages_list | default('') }} {{ item }}"
with_items:
- "{{ packages_default | difference(packages_default_mask) }}"
- "{{ packages_extra_laptop }}"
- "{{ packages_extra_user }}"
- "{{ packages_extra_work }}"
- "{{ packages_extra_theme }}"
- "{{ packages_extra_codecs }}"
- "{{ packages_extra_plugins }}"
- "{{ packages_extra_utilities }}"
# -----------------------------------------------------------------------------
- name: Cancel any running rpm-ostree transactions
command: rpm-ostree cancel
- name: Run complete cleanup for rpm-ostree
command: rpm-ostree cleanup --base --pending --repomd
when: drivers_ostree_cleanup is not defined
async: 300
poll: 5
- name: Force refresh the repositories metadata for rpm-ostree
command: rpm-ostree refresh-md --force
when: drivers_ostree_refresh is not defined
async: 600
poll: 5
# NOTE: To prevent installation failures we need to upgrade all the ostree layers.
- name: Upgrade system via rpm-ostree (might take a long time)
command: rpm-ostree upgrade
when: drivers_upgrade_reg is not defined
register: upgrade_reg
changed_when: upgrade_reg.stdout|regex_search('No upgrade available.') == ""
async: 900
poll: 5
- name: Uninstall all packages requested by the user (might take a long time)
command: rpm-ostree uninstall --reboot --allow-inactive --idempotent {{ packages_to_remove | join(' ') }}
when: packages_to_remove | length > 0
register: uninstall_reg
changed_when: uninstall_reg.stdout|regex_search('No change.') == ""
async: 900
poll: 5
- name: Install all packages requested by the user (might take a long time)
command: rpm-ostree install --reboot --allow-inactive --idempotent {{ packages_list }}
register: install_reg
changed_when: install_reg.stdout|regex_search('No change.') == ""
async: 1800
poll: 5
- name: Remove packages from OSTree base layer (and reboot system if needed)
command: rpm-ostree override remove --reboot {{ base_packages_to_remove | join(' ') }}
when: base_packages_to_remove | length > 0
register: base_layer_remove_reg
failed_when: base_layer_remove_reg.stdout|regex_search('Override already exists') == ""
changed_when: base_layer_remove_reg.stdout|regex_search('Override already exists') == ""
async: 600
poll: 5
# -----------------------------------------------------------------------------
- name: Mark the packages-install sub-role as DONE
shell: echo 'DONE' > /etc/.setup/packages-install
|
silverblue/roles/layered-packages/tasks/packages-install.yml
|
layout: sidebar
style: dark
plugins:
- jekyll-octicons
- jekyll-github-metadata
- jemoji
permalink: /:year/:month/:day/:title/
defaults:
-
scope:
path: "" # an empty string here means all files in the project
type: "posts"
values:
layout: "post"
projects:
sort_by: pushed
# sort_by options:
# - pushed
# - stars
limit: 9
exclude:
archived: true
forks: true
projects:
# - repo-name
# social_media:
# behance: your_username
# dribbble: your_username
# facebook: your_username
# hackerrank: your_username
# instagram: your_username
# keybase: your_username
# linkedin: your_username
# mastodon: your_username
# medium: your_username
# stackoverflow: your_user_id
# telegram: your_username
# twitter: your_username
# unsplash: your_username
# vk: your_username
# website: http://your_website_url
# youtube: your_username
#topics:
#
# - name: Data Engineering
# web_url: https://github.com/topics/data-engineering
# image_url: http://wfarm2.dataknet.com/static/resources/icons/set113/52641ba2.png
#
# - name: Rust
# web_url: https://github.com/topics/rust
# image_url: https://raw.githubusercontent.com/github/explore/80688e429a7d4ef2fca1e82350fe8e3517d3494d/topics/rust/rust.png
#
# - name: Pytorch
# web_url: https://github.com/topics/pytorch
# image_url: https://pytorch.org/assets/images/pytorch-logo.png
#
# - name: ML Ops / Production pipelines
# web_url: https://github.com/topics/ml-ops
# image_url: https://www.contentcaptureservices.co.uk/wp-content/uploads/2018/11/Machine_Learning-300x300.png
#
# - name: Philosophy and logic
# web_url: https://plato.stanford.edu/entries/russell-paradox/
# image_url: https://cdn0.iconfinder.com/data/icons/network-and-communication-1-8/66/87-512.png
#
# - name: Investing and Algorithmic Trading
# web_url: https://www.investopedia.com/terms/m/macd.asp
# image_url: https://cdn.onlinewebfonts.com/svg/img_547960.png
#thoughts:
|
_config.yml
|
#------------------------------------------------------------------------------
# Assert before launch instances
#------------------------------------------------------------------------------
- name: Check redis instance existance
tags: [ redis_ins , redis_precheck ]
block:
- name: Check for redis instance port
any_errors_fatal: true
command: 'ss -tp state listening sport = :{{ port }}'
register: check_redis_port_result
- name: Set fact whether redis port is open
tags: [ redis_ins , redis_precheck ]
set_fact:
redis_exists: "{{ 'redis' in check_redis_port_result.stdout }}"
# when any running instance exists, abort play in two cases:
# 1. redis_disable_purge = true (global flag to prevent accidentally purge)
# 2. redis_exists_action == 'abort'
- name: Abort due to existing postgres instance
when: redis_exists and redis_disable_purge or redis_exists and redis_exists_action == 'abort'
any_errors_fatal: true
fail: msg="Abort because redis instance {{ redis_cluster }}-{{ redis_node }}-{{ port }} is running"
- debug:
msg: "{{ redis_cluster }}-{{ redis_node }}-{{ port }} @ {{ inventory_hostname }}:{{ port }} {% if redis_exists %}is running{% else %}is not running{% endif %} {% if redis_exists and redis_exists_action == 'clean' %} WILL BE PURGED!{% endif %}"
#------------------------------------------------------------------------------
# Clean [DANGEROUS!]
#------------------------------------------------------------------------------
# if redis_exists_action == 'clean' and purge is not disabled, clean before running
- name: Clean existing redis instance
tags: [ redis_ins, redis_clean ]
when: redis_exists_action == 'clean' and not redis_disable_purge
block:
- name: Shut redis instance systemd service
ignore_errors: true
systemd:
name: "{{ redis_cluster }}-{{ redis_node }}-{{ port }}.service"
state: stopped
#enabled: no
- name: Remove redis instance data dir
file: path={{ redis_fs_main }}/redis/{{ redis_cluster }}-{{ redis_node }}-{{ port }} state=absent owner=redis mode=0700
#------------------------------------------------------------------------------
# Config
#------------------------------------------------------------------------------
- name: Create redis resource dir
tags: [ redis_ins, redis_config ]
become: yes
block:
# data dir @ /data/redis/<port> (default)
- name: Create redis instance data dir
file: path={{ redis_fs_main }}/redis/{{ redis_cluster }}-{{ redis_node }}-{{ port }} state=directory owner=redis mode=0700
# config @ /etc/redis/<port>.conf
- name: Render redis instance config
template: src={{ config_template }} dest=/etc/redis/{{ redis_cluster }}-{{ redis_node }}-{{ port }}.conf owner=redis mode=0700
vars:
upstream: "{{ conf['replica_of']|default(None) }}"
config_template: "{% if redis_mode == 'sentinel' %}redis-sentinel.conf{% else %}{{ redis_conf }}{% endif %}"
# services name examples: redis-test-1-6379 , redis-sentinel-2-26379
- name: Render redis systemd service file
template: src={{ service_template }} dest=/usr/lib/systemd/system/{{ svcname }} owner=root mode=644
vars:
svcname: "{{ redis_cluster }}-{{ redis_node }}-{{ port }}.service"
service_template: "{% if redis_mode == 'sentinel' %}redis-sentinel.service{% else %}redis.service{% endif %}"
#------------------------------------------------------------------------------
# launch
#------------------------------------------------------------------------------
# if existing data dir not cleaned, it will RESTART with existing rdb file
- name: Restart redis-server primary systemd service
tags: [ redis_ins, redis_launch, redis_primary ]
when: conf.replica_of is not defined
shell: systemctl daemon-reload;systemctl restart "{{ redis_cluster }}-{{ redis_node }}-{{ port }}"
# systemd:
# name: "{{ redis_cluster }}-{{ redis_node }}-{{ port }}"
# daemon_reload: on
# state: restarted
# enabled: yes
- name: Restart redis-server replica systemd service
tags: [ redis_ins, redis_launch, redis_replica ]
when: conf.replica_of is defined
shell: systemctl daemon-reload; systemctl restart "{{ redis_cluster }}-{{ redis_node }}-{{ port }}"
# systemd:
# name: "{{ redis_cluster }}-{{ redis_node }}-{{ port }}"
# daemon_reload: yes
# state: restarted
# enabled: yes
|
roles/redis/tasks/instance.yml
|
name: .NET Core CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build-and-test-mongo:
name: Test against supported versions of MongoDB
runs-on: ubuntu-latest
strategy:
matrix:
dotnet-version: [3.1.x]
mongo-driver-version: [ '2.4.4', '2.5.1', '2.6.1', '2.7.3', '2.8.1', '2.9.3', '2.10.4', '2.11.4' ]
steps:
- uses: actions/checkout@v2
- name: Setup .NET Core ${{ matrix.dotnet-version }}
uses: actions/setup-dotnet@v1
with:
dotnet-version: ${{ matrix.dotnet-version }}
- name: Set MongoDB.Driver version to ${{ matrix.mongo-driver-version }}
run: sed -i -E "s/(<PackageReference Include=\"MongoDB.Driver\" Version=\").*(\" \/>)/\1${{ matrix.mongo-driver-version }}\2/" ./tests/Elastic.Apm.Mongo.IntegrationTests/Elastic.Apm.Mongo.IntegrationTests.csproj
- name: Print replaced content
run: cat ./tests/Elastic.Apm.Mongo.IntegrationTests/Elastic.Apm.Mongo.IntegrationTests.csproj
- name: Install dependencies
run: dotnet restore
- name: Build
run: dotnet build --configuration Release --no-restore
- name: Test
run: dotnet test --configuration Release --no-build --verbosity normal /p:CollectCoverage=true /p:CoverletOutputFormat=opencover
build-and-test-apm:
name: Test against supported versions of Elastic APM
runs-on: ubuntu-latest
strategy:
matrix:
dotnet-version: [ 3.1.x ]
apm-agent-version: [ '1.7.0' ]
steps:
- uses: actions/checkout@v2
- name: Setup .NET Core ${{ matrix.dotnet-version }}
uses: actions/setup-dotnet@v1
with:
dotnet-version: ${{ matrix.dotnet-version }}
- name: Set Elastic.APM version to ${{ matrix.apm-agent-version }}
run: sed -i -E "s/(<PackageReference Include=\"Elastic.Apm\" Version=\").*(\" \/>)/\1${{ matrix.apm-agent-version }}\2/" ./tests/Elastic.Apm.Mongo.IntegrationTests/Elastic.Apm.Mongo.IntegrationTests.csproj
- name: Print replaced content
run: cat ./tests/Elastic.Apm.Mongo.IntegrationTests/Elastic.Apm.Mongo.IntegrationTests.csproj
- name: Install dependencies
run: dotnet restore
- name: Build
run: dotnet build --configuration Release --no-restore
- name: Test
run: dotnet test --configuration Release --no-build --verbosity normal /p:CollectCoverage=true /p:CoverletOutputFormat=opencover
artifacts:
name: Generate NuGet packages
runs-on: ubuntu-latest
needs: [build-and-test-mongo, build-and-test-apm]
strategy:
matrix:
dotnet-version: [ 3.1.x ]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup .NET Core ${{ matrix.dotnet-version }}
uses: actions/setup-dotnet@v1
with:
dotnet-version: ${{ matrix.dotnet-version }}
- name: Install GitVersion
uses: gittools/actions/gitversion/setup@v0.9.7
with:
versionSpec: '5.5.x'
- name: Use GitVersion
id: gitversion # step id used as reference for output values
uses: gittools/actions/gitversion/execute@v0.9.7
with:
updateAssemblyInfo: true
updateAssemblyInfoFilename: ./src/Elastic.Apm.Mongo/Properties/AssemblyInfo.cs
- name: Pack
run: dotnet pack --configuration Release --include-symbols --output nupkgs -p:PackageVersion=${{ steps.gitversion.outputs.nuGetVersionV2 }}
- uses: actions/upload-artifact@v2
with:
name: packages
path: |
nupkgs/*.nupkg
nupkgs/*.snupkg
# - script: |
# COMMAND="$(git branch --contains tags/$BUILD_SOURCEBRANCHNAME)"
# echo "##vso[task.setvariable variable=branch]${COMMAND}"
# displayName: Identify Branch Name of tag
# condition: startsWith(variables['Build.SourceBranch'], 'refs/tags/')
#
# # DotNetCoreCLI@2 cannot push nuget packages to external NuGet feeds with authentication using an encrypted Api Key
# # https://github.com/Microsoft/azure-pipelines-tasks/issues/7160
# - task: DotNetCoreCLI@2
# displayName: Publish NuGet package to NuGet
# # 'git branch --contains' command returns list of branches. In addition, two space characters are in name.
# condition: and(in(variables['branch'], 'master', ' master'), startsWith(variables['Build.SourceBranch'], 'refs/tags/'))
# inputs:
# command: custom
# custom: nuget
# arguments: >
# push $(Build.ArtifactStagingDirectory)/ElasticApm.MongoDB.$(Build.SourceBranchName).nupkg
# -s https://api.nuget.org/v3/index.json
# -k $(NuGetApiKey)
|
.github/workflows/dotnetcore.yml
|
---
# Create a project
- os_project:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: demo
cloud: {{ cs9_os_cloud }}
state: present
name: {{ project_name }}
description: CS9 Project
domain_id: {{ project_os_domainid }}
enabled: True
tags:
- project-openstack
# Create an externally accessible network named 'ext_network'.
- os_network:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: {{ project_name }}
cloud: {{ cs9_os_cloud }}
shared: no
state: present
name: tenant-{{ project_name }}
external: FALSE
validate_certs: FALSE
wait:yes
tags:
- project-openstack
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: {{ project_name }}
state: present
network_name: tenant-{{ project_name }}
name: tenant-{{ project_name }}-subnet
cidr: "{{ project_os_network_cidr }}"
dns_nameservers: {{ project_os_network_dnssrv }}
enable_dhcp: True
allocation_pool_start: "{{ project_os_network_alloc_start }}"
allocation_pool_end: "{{ project_os_network_alloc_end }}"
tags:
- project-openstack
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: {{ project_name }}
cloud: {{ cs9_os_cloud }}
state: present
name: router-{{ project_name }}
network: {{ cs9_os_external_network_subnet }}
external_fixed_ips:
- subnet: {{ cs9_os_external_network_subnet }}
interfaces:
- tenant-{{ project_name }}-subnet
tags:
- project-openstack
# Create a security group
- os_security_group:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: {{ project_name }}
cloud: {{ cs9_os_cloud }}
state: present
name: sec-{{ project_name }}
description: security group for {{ project_name }} servers
tags:
- project-openstack
# Create a security group rule for ping
- os_security_group_rule:
cloud: {{ cs9_os_cloud }}
security_group: sec-{{ project_name }}
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Creates a new key pair and the private key returned after the run.
- os_keypair:
auth:
auth_url: {{ cs9_os_auth_url }}
username: {{ cs9_os_auth_user }}
password: {{ <PASSWORD> }}
project_name: {{ project_name }}
cloud: {{ cs9_os_cloud }}
state: present
name: {{ project_name }}_key
|
tasks/openstack.yml
|
- division : Software
float : left
background-img : https://cdn3.iconfinder.com/data/icons/illustricon-tech/512/development.desktop-512.png
members :
- name : <NAME>
id : Arpit
img : arpit.jpg
division : Software Team
work :
facebook : https://www.facebook.com/Arpit159
github : https://github.com/arpit15/
Experience :
- name: <NAME>
id : Jayant
img : JayantAgarwal.jpg
division: Software
work:
facebook: https://www.facebook.com/ag.jayant
github: http://github.com/agjayant
Experience: I have been with AUV IITK since the first semester. We got the opportunity to acquire some basic skills in various domains which turned out to be really helpful in the coming semesters. It was because I joined AUV that I could explore and find my interest. It has been a great learning experience for me and I sincerely recommend freshers to consider joining the team.
- name: <NAME>
id : Soumye
img : SoumyeSinghal.jpg
division: Software
work: Motion Control
facebook: https://www.facebook.com/singhalsoumye
github : https://github.com/soumye
Experience:
- name : <NAME>
id : Shikher
img : shikher.jpg
division : Software Team
work :
facebook : https://www.facebook.com/shikherverma42?ref=br_rs
github : https://github.com/ShikherVerma
Experience :
- name: <NAME>
id : Siddhartha
img : SiddharthaSaxena.jpeg
division: Software
work: Computer Vision
facebook: https://www.facebook.com/siddhartha.saxena.71
github: http://github.com/siddsax
Experience: AUV-IITK was the first technical project in which I was involved and it definitely had a big impact. I learned stuffs ranging from doing code in OpenCV to carrying heavy stuffs at 45 degrees heat of IIT Kanpur to the Swimming pool but above all made awesome friends with like minded interests which I feel is the best part of being involved in any club activity.
- name: <NAME>
id : Prakhar
img : PrakharAgarwal.png
division: Software
work: Line Detection, Line following, Bouy Detection, Gate Detection, Torpedo Shooting
facebook: https://www.facebook.com/prakhar.agarwal.754365
github: https://github.com/pkhrag
Experience: It was really nice to work in AUV. I learnt a lot from it. It helped me developing interest in systems and development. Working with the team and achieving our goal (NIOT competition) was really a great experience.
- division : Mechanical
float : right
background-img : https://image.freepik.com/icones-gratis/engrenagens-variante-com-chave-ferramenta_318-47662.jpg
members :
- name : <NAME>
id : Abhishek
img : shastri.jpg
division : Mechanical Team
work :
facebook :
github :
Experience :
- name : <NAME>
id : Aditya
img : chopra.jpg
division : Mechanical Team
work :
facebook : https://www.facebook.com/killeraditya30
github :
Experience :
- name : <NAME>
id : Divyanshu
img : divyanshu.jpg
division : Mechanical Team
work :
facebook : https://www.facebook.com/divyanshu.patel
github :
Experience :
- name : <NAME>
id : Mohit
img : mohit.jpg
division : Mechanical Team
work :
facebook : https://www.facebook.com/mohit.pandey.1800
github :
Experience :
- name : <NAME>
id : Pankaj
img : pankaj.jpg
division : Mechanical Team
work :
facebook : https://www.facebook.com/profile.php?id=100009456651000
github :
Experience :
- name : <NAME>
id : Pawan
img : pawan.jpg
division : Mechanical Team
work :
facebook : https://www.facebook.com/Pawan.agrawal.4
github :
Experience :
- division : Electrical
float : left
background-img : http://www.electronicsb2b.com/wp-content/uploads/2017/02/ELCINA.jpg
members :
- name : <NAME>
id : Harsh
img : harsh.jpg
division : Electrical Team
work :
facebook : https://www.facebook.com/harsh.vardhan.834
github :
Experience :
- name : <NAME>
id : Atharva
img : muli.jpg
division : Electrical Team
work :
facebook : https://www.facebook.com/atharva.mulmuley
github :
Experience :
|
_data/alumni_detail/blocks.yml
|
name: build actions
on:
push:
branches:
- '**'
jobs:
build:
runs-on: ubuntu-latest
name: build actions with ncc
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1.4.4
with:
node-version: '13.x'
- run: npm i -g @zeit/ncc
- name: Install git-commit
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/git-commit
- name: Install git-commit-n-tag
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/git-commit-n-tag
- name: Install git-tag
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/git-tag
- name: Install git-tag-snapshot
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/git-tag-snapshot
- name: Install maven-deploy
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/maven-deploy
- name: Install maven-prepare-deploy
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/maven-prepare-deploy
- name: Install maven-setup
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/maven-setup
- name: Install maven-verify-dependencies
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/maven-verify-dependencies
- name: Install maven-verify-deploy
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/maven-verify-deploy
- name: Install node-build-action
uses: jactor-rises/actions/node-build-action@v2-node-build-action
with:
artifact_folder: actions/node-build-action
- name: Generate timestamp
id: now
run: echo ::set-output name=timestamp::$(date +"%Y-%m-%d:%T")
- uses: jactor-rises/actions/git-commit@v2-git-commit
with:
commit_message: "node-build-action@${{ steps.now.outputs.timestamp }}"
pattern: .
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
.github/workflows/ci.yaml
|
api_name: []
items:
- children:
- azure.batch.models.batch_service_client_enums.CachingType.none
- azure.batch.models.batch_service_client_enums.CachingType.read_only
- azure.batch.models.batch_service_client_enums.CachingType.read_write
class: azure.batch.models.batch_service_client_enums.CachingType
fullName: azure.batch.models.batch_service_client_enums.CachingType
inheritance:
- inheritance:
- type: builtins.object
type: builtins.str
- inheritance:
- type: builtins.object
type: enum.Enum
langs:
- python
module: azure.batch.models.batch_service_client_enums
name: CachingType
source:
id: CachingType
path: azure-batch\azure\batch\models\batch_service_client_enums.py
remote:
branch: master
path: azure-batch\azure\batch\models\batch_service_client_enums.py
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: 90
summary: 'An enumeration.
'
syntax: {}
type: class
uid: azure.batch.models.batch_service_client_enums.CachingType
- class: azure.batch.models.batch_service_client_enums.CachingType
fullName: azure.batch.models.batch_service_client_enums.CachingType.none
langs:
- python
module: azure.batch.models.batch_service_client_enums
name: none
source:
id: none
path: null
remote:
branch: master
path: null
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: null
summary: 'The caching mode for the disk is not enabled.
'
syntax:
content: none = 'none'
type: attribute
uid: azure.batch.models.batch_service_client_enums.CachingType.none
- class: azure.batch.models.batch_service_client_enums.CachingType
fullName: azure.batch.models.batch_service_client_enums.CachingType.read_only
langs:
- python
module: azure.batch.models.batch_service_client_enums
name: read_only
source:
id: read_only
path: null
remote:
branch: master
path: null
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: null
summary: 'The caching mode for the disk is read only.
'
syntax:
content: read_only = 'readonly'
type: attribute
uid: azure.batch.models.batch_service_client_enums.CachingType.read_only
- class: azure.batch.models.batch_service_client_enums.CachingType
fullName: azure.batch.models.batch_service_client_enums.CachingType.read_write
langs:
- python
module: azure.batch.models.batch_service_client_enums
name: read_write
source:
id: read_write
path: null
remote:
branch: master
path: null
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: null
summary: 'The caching mode for the disk is read and write.
'
syntax:
content: read_write = 'readwrite'
type: attribute
uid: azure.batch.models.batch_service_client_enums.CachingType.read_write
references:
- fullName: azure.batch.models.batch_service_client_enums.CachingType.none
isExternal: false
name: none
parent: azure.batch.models.batch_service_client_enums.CachingType
uid: azure.batch.models.batch_service_client_enums.CachingType.none
- fullName: azure.batch.models.batch_service_client_enums.CachingType.read_only
isExternal: false
name: read_only
parent: azure.batch.models.batch_service_client_enums.CachingType
uid: azure.batch.models.batch_service_client_enums.CachingType.read_only
- fullName: azure.batch.models.batch_service_client_enums.CachingType.read_write
isExternal: false
name: read_write
parent: azure.batch.models.batch_service_client_enums.CachingType
uid: azure.batch.models.batch_service_client_enums.CachingType.read_write
|
docs-ref-autogen/azure-batch/azure.batch.models.batch_service_client_enums.CachingType.yml
|
aliases:
- &restore-npm-cache
keys:
- v3-dependencies-{{ checksum "package-lock.json" }}
- v3-dependencies-master
- v3-dependencies-
- &restore-dist-cache
keys:
- v3-dist-{{ .Environment.CIRCLE_SHA1 }}
- v3-dist-master
- v3-dist-
- &restore-ios-cache
keys:
- v3-dist-{{ .Environment.CIRCLE_SHA1 }}
- v3-dist-master
- v3-dist-
defaults: &defaults
working_directory: /home/circleci/repo # need to use full path if using different docker images
docker:
- image: circleci/node:10
version: 2
jobs:
install-dependencies:
<<: *defaults
steps:
- checkout
- restore_cache: *restore-npm-cache
- run:
name: Install Node modules
command: npm i -ci
- save_cache:
key: v3-dependencies-{{ checksum "package-lock.json" }}
paths:
- node_modules
build-pwa:
<<: *defaults
steps:
- checkout
- restore_cache: *restore-npm-cache
- run:
name: Run linter
command: npm run lint
- run:
name: Build BEEP
command: npm run build
- save_cache:
key: v3-dist-{{ .Environment.CIRCLE_SHA1 }}
paths:
- dist
gimbal:
<<: *defaults
docker:
- image: moduscreate/gimbal:1.0.4
steps:
- checkout
- restore_cache: *restore-dist-cache
- run:
name: Run Gimbal
command: gimbal
- store_artifacts:
path: ./artifacts
sonarqube:
<<: *defaults
steps:
- checkout
# SonarQube download, config and scanning
- run: echo "Starting SonarQube scan"
- run: wget https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.2.0.1227-linux.zip
- run: unzip sonar-scanner-cli-3.2.0.1227-linux.zip
- run: echo "sonar.host.url=https://sonarcloud.io" > sonar-scanner-3.2.0.1227-linux/conf/sonar-scanner.properties
- run: echo "sonar.login=$sonarqubekey" >> sonar-project.properties
- run: echo "sonar.organization=$sonarorg" >> sonar-project.properties
- run:
name: Run SonarQube
command: |
[[ -z "$sonarqubekey" || -z "$sonarorg" ]] && exit 0 || sonar-scanner-3.2.0.1227-linux/bin/sonar-scanner
- run: echo "Scan complete, data sent to sonarcloud"
build-ios:
working_directory: /home/circleci/repo
macos:
xcode: '9.4.0'
steps:
- checkout
- restore_cache: *restore-npm-cache
- restore_cache: *restore-dist-cache
- run:
name: Build iOS project
command: npm run build-ios
- save_cache:
key: v3-ios-{{ .Environment.CIRCLE_SHA1 }}
paths:
- ios
deploy-testflight:
macos:
xcode: '9.4.0'
working_directory: /home/circleci/repo/ios/App
shell: /bin/bash --login -o pipefail
steps:
- restore_cache: *restore-ios-cache
- run: bundle install
- run:
name: Deploy to TestFlight
command: bundle exec fastline beta
workflows:
version: 2
build-and-deploy:
jobs:
- install-dependencies
- build-pwa:
requires:
- install-dependencies
- gimbal:
requires:
- build-pwa
- sonarqube
|
.circleci/config.yml
|
items:
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.Update
id: Update
artifact: com.microsoft.azure.apimanagement.v2019_01_01:azure-mgmt-apimanagement:1.0.0-beta-1
parent: com.microsoft.azure.management.apimanagement.v2019_01_01
langs:
- java
name: NotificationContract.Update
nameWithType: NotificationContract.Update
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.Update
type: Interface
package: com.microsoft.azure.management.apimanagement.v2019_01_01
summary: The template for a NotificationContract update operation, containing all the settings that can be modified.
syntax:
content: public static interface NotificationContract.Update extends Appliable<NotificationContract>, NotificationContract.UpdateStages.WithIfMatch
implements:
- com.microsoft.azure.arm.model.Appliable<com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract>
- com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.UpdateStages.WithIfMatch
references:
- uid: com.microsoft.azure.arm.model.Appliable<com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract>
name: Appliable<NotificationContract>
nameWithType: Appliable<NotificationContract>
fullName: com.microsoft.azure.arm.model.Appliable<com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract>
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.UpdateStages.WithIfMatch
name: NotificationContract.UpdateStages.WithIfMatch
nameWithType: NotificationContract.UpdateStages.WithIfMatch
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.UpdateStages.WithIfMatch
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract
name: NotificationContract
nameWithType: NotificationContract
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract
- uid: com.microsoft.azure.arm.model.Appliable
name: Appliable
nameWithType: Appliable
fullName: com.microsoft.azure.arm.model.Appliable
|
docs-ref-autogen/com.microsoft.azure.management.apimanagement.v2019_01_01.NotificationContract.Update.yml
|
site_name: PHOTONAI - Graph
repo_name: 'photonai-graph'
repo_url: 'https://github.com/wwu-mmll/photonai-graph'
site_dir: '../docs'
edit_uri: 'edit/master/documentation/docs'
# Social media
extra:
social:
- icon: fontawesome/brands/twitter
link: 'https://twitter.com/wwu_mmll'
# Theme configuration
theme:
name: 'material'
palette:
primary: 'teal'
accent: 'teal'
font:
text: 'Roboto'
code: 'Space Mono'
logo: assets/img/favicon.ico
favicon: assets/img/favicon.ico
features:
- toc.integrate
# Latex
extra_javascript:
- assets/javascript/mathjax.js
- https://polyfill.io/v3/polyfill.min.js?features=es6
- https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
# extensions
markdown_extensions:
- pymdownx.arithmatex:
generic: true
- pymdownx.highlight:
auto_title: true
- pymdownx.inlinehilite
- pymdownx.snippets
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_code_format
- codehilite
- admonition
- pymdownx.details
# Site structure
nav:
- Home: 'index.md'
- Installation: 'installation.md'
- Getting Started: 'getting_started.md'
- PHOTONAI Graph Workflow: 'workflow.md'
- Extending PHOTONAI Graph: 'extending_photonai_graph.md'
- API:
- API Introduction: 'api/introduction.md'
- Graph Construction: 'api/graph_construction.md'
- Graph Conversions: 'api/graph_conversions.md'
- Population Averaging: 'api/population_averaging.md'
- Dynamic Utilities: 'api/dynamic_utils.md'
- Graph Measures: 'api/graph_measures.md'
- Graph Embeddings: 'api/graph_embeddings.md'
- Graph Kernels: 'api/graph_kernels.md'
- Graph Neural Networks: 'api/graph_convnets.md'
- Graph Utilities: 'api/graph_utilities.md'
- Graph Controllability: 'api/graph_controllability.md'
- Examples:
- Networkx Loading: 'examples/networkx_pipeline.ipynb'
- Graph Embeddings:
- Graph Embedding LEM: 'examples/GraphEmbedding_lem_example.ipynb'
- Graph Embedding LLE: 'examples/GraphEmbedding_lle_example.ipynb'
- Graph Neural Networks:
- GNN Classification: 'examples/GraphNN_classification_example.ipynb'
- GNN Regression: 'examples/GraphNN_regression_example.ipynb'
- Graph Kernels:
- PyramidMatch: 'examples/GraphKernel_PyramidMatch_example.ipynb'
- SVMtheta: 'examples/GraphKernel_SVMtheta_example.ipynb'
- Graph Measures:
- Extraction Example: 'examples/GraphMeasure_extraction_example.ipynb'
- Pipeline Example: 'examples/GraphMeasure_pipeline_example.ipynb'
- Graph Litmus Test: 'examples/GraphLitmusTest.ipynb'
extra_css:
- assets/stylesheets/material.css
plugins:
- mkdocstrings:
handlers:
python:
selection:
docstring_style: numpy
rendering:
show_source: false
- mkdocs-jupyter:
execute: True
|
documentation/mkdocs.yml
|
data:
path_data_state: data/data_state_sim_nProteins=1100.txt
path_mapping_peptides: data/mapping_peptides_sim_nProteins=1100.txt
path_names_proteins: data/names_proteins_sim_nProteins=1100.txt
path_names_peptides: data/names_peptides_sim_nProteins=1100.txt
sep_input: " "
# Section for output information for MCMC
output:
# Compression for HDF5 output
compress: gzip
# Path for master results
path_results_master: "test/draws_sim-big_master.hdf5"
# Pattern (sprintf-style) for worker-specific results
pattern_results_worker: "test/draws_sim-big_worker-%03d.hdf5"
# Path for combined and serial results
path_results_combined: "test/draws_sim-big.hdf5"
# Path for posterior summaries and diagnostics
path_summaries: "test/summaries_sim-big.hdf5"
# Pattern (sprintf-style) for distributed
pattern_results_distributed: "test/draws_sim-big_distributed-%03d.hdf5"
# Path for protein-level text results
path_protein_summaries_txt: test/protein_summaries_sim-big.txt
# Path for peptide-level text results
path_peptide_summaries_txt: test/peptide_summaries_sim-big.txt
# Section for prior parameters
priors:
# Random censoring probability
p_rnd_cen:
prior_a: 1.
prior_b: 1.
# Hyperparameters for n_states model
n_states_dist:
prior_a: 1.
prior_b: 1.
prior_mean_log: 2.65
prior_prec_log: 1.
# Hyperparameters for state-level variance distribution:
sigmasq_dist:
prior_shape: 1.
prior_rate: 1.
prior_mean_log: 2.65
prior_prec_log: 2.35
# Hyperparameters for peptide-level variance distribution
tausq_dist:
prior_shape: 1.
prior_rate: 1.
prior_mean_log: 2.65
prior_prec_log: 2.35
# Protein-level means
mu:
prior_mean: 0.
prior_prec: 0.
# Section for initializations
init:
# Probability of random censoring
p_rnd_cen: 0.1
# Coefficients for intensity-based censoring model
eta:
mean: [-2.5, 0.5]
sd: [0., 0.]
cor: 0.
# Hyperparameters for state-level variance distribution:
sigmasq_dist:
shape: 4.
rate: 2.
# Hyperparameters for peptide-level variance distribution
tausq_dist:
shape: 4.
rate: 2.
# Section for algorithmic settings for MCMC
settings:
burn_in: 100
n_iterations: 1000
propDf: 3.
verbose: 1
verbose_interval: 50
n_strata: 10
seed_load_data: 0
|
examples/big.yml
|
---
actions:
- utter_greet
- action_all_tickets
- action_create_tickets
- action_update_tickets
- action_updatedtickets
forms:
- create_ticket_form
- update_ticket_form
intents:
- greet
- show_all_tickets
- create_ticket
- update_ticket
slots:
description:
auto_fill: false
type: unfeaturized
email:
auto_fill: false
type: unfeaturized
priority:
auto_fill: false
type: unfeaturized
priority-up:
auto_fill: false
type: unfeaturized
status:
auto_fill: false
type: unfeaturized
status-up:
auto_fill: false
type: unfeaturized
subject:
auto_fill: false
type: unfeaturized
ticket_id:
auto_fill: false
type: unfeaturized
templates:
utter_ask_description:
-
text: "Please provide description to your ticket"
utter_ask_email:
-
text: "Email address of the requester. If no contact exists with this email address in Freshdesk, it will be added as a new contact."
utter_ask_priority:
-
buttons:
-
payload: 1
title: "Low "
-
payload: 3
title: High
-
payload: 4
title: Urgent
text: "Priority of the ticket"
utter_ask_priority-up:
-
buttons:
-
payload: 1
title: "Low "
-
payload: 3
title: High
-
payload: 4
title: Urgent
text: "Please select a priority to update"
utter_ask_status:
-
buttons:
-
payload: 2
title: "Open "
-
payload: 3
title: Pending
-
payload: 5
title: Closed
text: "Status of the ticket"
utter_ask_status-up:
-
buttons:
-
payload: 2
title: "Open "
-
payload: 3
title: Pending
-
payload: 5
title: Closed
text: "Please select a status to update"
utter_ask_subject:
-
text: "subject of ticket"
utter_greet:
-
buttons:
-
payload: /show_all_tickets
title: "Show All Tickets "
-
payload: /create_ticket
title: "Create Tickets"
-
payload: /update_ticket
title: "Update Tickets"
text: "Hey! I am avirtual assistant. I can help you with Ticket Creation, Update Ticket, Show All Tickets in Freshdesk(Help Desk)"
utter_ask_ticket_id:
-
text: "Please enter ticket id "
|
10_freshdesk_customer_support_bot/src/domain.yml
|
encoding: UTF-8
title: <NAME> # your site title
author: johndelara1 # You :P
description: > # Meta description of Homepage
Neste portfólio você poderá acompanhar meus projetos, análises e estudos relacionado à Ciência de Dados,
aplicação de Machine Learning, entre outros.
baseurl: "/portfolioweb" # the subpath of your site, e.g. /blog
url: "https://johndelara1.github.io/" # the base hostname & protocol for your site, e.g. http://example.com
# SEO PLUGIN
social:
name: <NAME>
links:
#- https://twitter.com/BenBalter
#- https://www.facebook.com/ben.balter
- https://www.linkedin.com/in/johndelara1
#- https://plus.google.com/+BenBalter
- https://github.com/johndelara1
#- https://keybase.io/benbalter
# THEME SETTINGS
navigation: # Navigation links
- {name: 'Início', link: '/'}
- {name: 'Sobre', link: '/sobre'}
- {name: 'Contato', link: '/contato'}
tagline: >
Aqui você encontrará meus projetos, análises e estudos relacionado à Ciência de Dados, aplicação de Machine Learning, entre outros.
hero_img: posts/trajetoria_lg.jpg # Main background image: use the path from /assets/img/ => This will output /assets/img/posts/sleek_lg.jpg
hero_img1: posts/sleek_lg.jpg
email: <EMAIL> # email for form submission
twitter_username: johndelara01 # replace with your_twitter_username or comment to remove link from footer
github_username: johndelara1 # replace with your_github_username or comment to remove link from footer
instagram_username: johndelara1 # replace with your_instagram_username or comment to remove link from footer
disqus:
shortname: john # to enable comments replace with your_disqus_shortname
google_tag_manager: GTM-5Z9JZD7 # Uncomment to enable GTM. Replace GTM-XXXXXXX with your GTM container ID
service_worker: true
# BUILD SETTINGS
markdown: kramdown
highlighter: rouge # or pygments or null
# theme: jekyll-sleek # Uncomment if you're using the theme as a ruby gem
exclude:
- README.md
- node_modules
- script
- package.json
- package-lock.json
- Gemfile
- Gemfile.lock
- vendor
- npm-debug.log
- gulpfile.js
- sleek.jpg
include: ['_pages']
permalink: /:title/
compress_html:
clippings: all
plugins:
- jekyll-sitemap
- jekyll-seo-tag
theme: jekyll-theme-slate
|
_config.yml
|
name: View Fama Functional Profile - v1.1
tooltip: |
View a functional profile generated by Fama
screenshots: []
icon: fama-green.png
#
# define a set of similar apps that might be useful to the user
#
suggestions:
apps:
related:
[]
next:
[]
#
# Configure the display and description of parameters
#
parameters :
func_profile_ref :
ui-name : |
Functional profile
short-hint : |
Select a functional profile
long-hint : |
Select a functional profile generated by Fama
description : |
<p><strong>SUMMARY</strong></p>
<p>Functional profile captures genetic potential of microbial community for biological processes of interest, just like taxonomic profile captures microbial diversity. This app generates visualization of a functional profile generated by the "Run Fama Read Profiling" app. Depending on the type and size of the input read libraries, Fama Read Profiling generates either normalized scores or raw read counts.</p>
<p>For comparison between functions and between samples, raw read counts are normalized by library size, target gene size and predicted average genome size in the sample. For normalization by average genome size, Fama employs MicrobeCensus tool. The normalization metric for single-read libraries is ERPKG (number of reads per kb of effective gene length per genome-equivalent):</p>
<p>ERPKG = (reads mapped to to gene) / (effective gene length in kb) / (genome equivalents),</p>
<p>where effective gene length = (actual gene length) + (read length) - 2 * (minimal alignment length) + 1,<\br>
genome equivalents = (number of reads in library) / (average genome size)</p>
<p>The normalization metric for paired-end read libraries is EFPKG(number of fragments per kb of effective gene length per genome-equivalent), which is calculated similarly to ERPKG, but with fragment count instead of read count, and some minor differences in calculation of effective gene length.</p>
<p>If calculation of normalized scores is not possible (because of small number of reads etc.), only read counts and fragment counts are reported for single read and paired-end read libraries, respectively.</p>
<p><strong>INPUT</strong></p>
<p>Functional profile object, output of the "Run Fama Read Profiling" app.</p>
<p><strong>OUTPUT</strong></p>
<p>HTML page with two tabs: "Profile Statistics" and "Profile Heatmap". The "Profile Statistics" tab shows aggregating statistics for rows and columns. The "Profile Heatmap" tab contains interactive 2D heatmap of scores with samples in columns and functions in rows.</p>
<p><strong>Additional resources</strong></p>
<ul><li><a href="https://iseq.lbl.gov/fama/">More info about Fama</a></li>
<li><a href="https://narrative.kbase.us/narrative/74172">Example narrative</a></li></ul>
<p><strong>Team members who implemented App in KBase:</strong> <a href="https://narrative.kbase.us/#people/kazakov"><NAME></a>.For questions, please <a href=”mailto:<EMAIL>&subject=ViewFamaFunctionalProfile”>contact us</a>.</p>
publications :
-
display-text: |
<NAME>, <NAME>. Fama: a computational tool for comparative analysis of shotgun metagenomic data. Great Lakes Bioinformatics conference (poster presentation). 2019.
link: https://iseq.lbl.gov/mydocs/fama_glbio2019_poster.pdf
-
pmid: 25402007
display-text: |
<NAME>, <NAME>, <NAME>. Fast and sensitive protein alignment using DIAMOND. Nature Methods. 2015;12: 59-60. doi: 10.1038/nmeth.3176. Publication about third-party program used by Fama.
link: https://pubmed.ncbi.nlm.nih.gov/25402007/
-
pmid: 25853934
display-text: |
<NAME>, <NAME>. Average genome size estimation improves comparative metagenomics and sheds light on the functional ecology of the human microbiome. Genome Biology. 2015;16: 51. doi: 10.1186/s13059-015-0611-7. Publication about third-party program used by Fama.
link: https://www.ncbi.nlm.nih.gov/pmc/articles/pmid/25853934/
-
pmid: 21961884
display-text: |
<NAME>, <NAME> al. Interactive metagenomic visualization in a Web browser. BMC Bioinformatics. 2011;12: 385. doi: 10.1186/1471-2105-12-385. Publication about third-party program used by Fama.
link: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3190407/
|
ui/narrative/methods/view_FamaFunctionalProfile/display.yaml
|
version: 2
workflows:
version: 2
test:
jobs:
- test-full-3.6
- test-full-3.5
- test-minimal-3.6
- test-minimal-3.5
jobs:
# define the "test-full-3.6" job and register it as a template
test-full-3.6: &test-full-template
docker:
# Specify the base docker image
- image: circleci/python:3.6.1
working_directory: ~/repo-test-full-3.6
steps:
- checkout
# <FAST DEPENDENCIES>
# We will install the dependencies and then cache them for very fast CI tests
# To do this we first try to restore the cache, if that doesn't work we simply
# execute the pip command to install the deps. Lastly we cache the state.
# This is very analogous to ub.Cacher where cfgstr is the checksum of the
# requirements.
- restore_cache:
keys:
- v1-dependencies-{{ checksum "requirements.txt" }}
# fallback to using the latest cache if no exact match is found
- v1-dependencies-
- run:
name: install dependencies
command: |
python -m venv venv || virtualenv -v venv # first command is python3 || second is python2
. venv/bin/activate
# The "full" tests install all requirements
pip install -r requirements.txt
- save_cache:
paths:
- ./venv
key: v1-dependencies-{{ checksum "requirements.txt" }}
# </FAST DEPENDENCIES>
# run tests!
- run:
name: run tests
command: |
. venv/bin/activate
python run_tests.py
- store_artifacts:
path: test-reports
destination: test-reports
# DEFINE minimal dependency install and tests
# define the "test-minimal-3.6" job and register it as a template
test-minimal-3.6: &test-minimal-template
docker:
- image: circleci/python:3.6.1
working_directory: ~/repo-test-minimal-3.6
steps:
- checkout
# <FAST DEPENDENCIES>
- restore_cache:
keys:
- v1-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}
- v1-dependencies-
- run:
name: install dependencies
command: |
python -m venv venv || virtualenv venv # first command is python3 || second is python2
. venv/bin/activate
# The "full" tests install all requirements
pip install -r requirements/runtime.txt
pip install -r requirements/tests.txt
- save_cache:
paths:
- ./venv
key: v1-dependencies-{{ checksum "requirements/runtime.txt" }}-{{ checksum "requirements/tests.txt" }}
# </FAST DEPENDENCIES>
# run tests!
- run:
name: run tests
command: |
. venv/bin/activate
python run_tests.py
- store_artifacts:
path: test-reports
destination: test-reports
# Define tests fo the other python verisons using the "test3.6" template
# and indicating what needs to be modified.
test-full-3.5:
<<: *test-full-template
docker:
# All we need to do is change the base docker image so python is the
# version we want we can reuse everything else from the template
- image: circleci/python:3.5
working_directory: ~/repo-test-full-3.5
test-minimal-3.5:
<<: *test-minimal-template
docker:
# All we need to do is change the base docker image so python is the
# version we want we can reuse everything else from the template
- image: circleci/python:3.5
working_directory: ~/repo-test-minimal-3.5
|
.circleci/config.yml
|
---
##################
# Below config params are used to configure contrailctl
# All of below variables are a dict which form individual section of
# contrailctl/*.conf (controller.conf, agent.conf, etc)
# they are just a dictionary form of config files found under contrailctl/*
# i.e below mentioned global_config is a dictionary form of [GLOBAL]
# section in contrailct/*.conf - since this is
# global_config, they are available to all config files where,
# control_config is dictionary form of [CONTROL] section in contrailctl/controller.conf
# webui_config is dictionary form of [WEBUI] section in contrailctl/controller.conf
# for example, if one add configuration "webui_config: {http_listen_port: 8085}",
# [WEBUI] section of contrailctl/controller.conf will be configured as below -
# which infact configure webui to listen 8085 for http
# instead of 8080 which is default
#
# [WEBUI]
# http_listen_port = 8085
#
# REFERENCE: For configuration details, please refer example configurations found in
# https://github.com/Juniper/contrail-docker/tree/master/tools/python-contrailctl/examples/configs/controller.conf
#
##################
#### *** NOTE: Entries in this file can be part of all.yml *** ####
####
# Controller specific config vars - this will be added to contrailctl/controller.conf
####
# controller_config: controller specific global configs - contrailctl/controller.conf
# will have [GLOBAL] section with
# configs added in global_config + controller_config
# controller_config: {}
# control_config: config params for control - these will go to [CONTROL] section
# of contrailctl/controller.conf
# control_config: {}
# dns_config: config params for dns - these will go to [DNS] section of
# contrailctl/controller.conf
# dns_config: {}
# config_config: config params for config - these will go to [CONFIG] section
# of contrailctl/controller.conf
# config_config: {}
# cassandra_config: config params for cassandra - these will go to [CASSANDRA]
# section of contrailctl/controller.conf
# cassandra_config: {}
# api_config: config params for api - these will go to [API] section
# of contrailctl/controller.conf
# api_config: {}
# schema_config: config params for schema - these will go to [SCHEMA] section of
# contrailctl/controller.conf
# schema_config: {}
# device_manager_config: config params for device_manager - these will go to
# [DEVICE_MANAGER] section of contrailctl/controller.conf
# device_manager_config: {}
# svc_monitor_config: config params for svc_monitor - these will go to
# [SVC_MONITOR] section of contrailctl/controller.conf
# svc_monitor_config: {}
# webui_config: config params for webui - these will go to [WEBUI] section
#of contrailctl/controller.conf
# webui_config: {}
|
contrail-ansible/playbooks/inventory/examples/fully_commented_inventory/group_vars/contrail-controllers.yml
|
- term: Mark
slug: mark
desc: |
Lock Seam Rollformer section of ProFabriduct line. We use "Mark" to reference the rollformer and any mechanism that positions the rollformer(s) that produce the seam.
<br>
This machinery produces Large and Small Pittsburgh as well as Snaplock female seams and may also include the forming of the male with button punch depending on purchased options.
cats: [equipment]
- term: Array
slug: array
desc: A serialized (enumerable) data onbject type that hosts lists of scalar or enumerable objects. Sometimes capitalized along with other data object types.
cats: [data-types]
- term: AsciiDoc
slug: asciidoc
desc: A programmatic markup language for structuring _and_ formatting all kinds of technical writing for conversion to various rich-text and other formats.
xref:
also: [markup,markdown]
refs:
- text:
href:
- term: flat files
slug: flat-files
desc: Files containing only ASCII or UTF-8 text.
also: [binary-files]
- term: inline semantics
slug: inline-semantics
desc: Stylized text or inserted icons meant to tag specific content with a purpose or kind.
xref: docs/style/asciidoc/semantics
- term: lightweight markup
slug: markup-lightweight
desc: Tag-free plaintext syntax formats like AsciiDoc, Markdown, reStructuredText, lwDITA, and most Wiki markups; often an abstraction of an XML-tagged format (DocBook, HTML, DITA).
cats: [markup]
- term: Markdown
slug: markdown
desc: A lightweight markup that shorthands a limited number of non-semantic HTML elements, which many find to be adequate for technical documentation.
- term: markup
slug: markup
desc: |
Plaintext syntax intended to designate text with formatting, semantic, or functional intent, to be parsed into rich-text or other more-complex output.
also: [lightweight]
- term: data object
slug: data-object
desc: |
An instance of any data type.
A string is a data object of the String data type.
- term: data type
slug: data-type
desc: |
A classification of data objects based on form.
- term: open block (AsciiDoc)
slug: open-block-asciidoc
desc: A non-semantic container element that can be assigned block options and contain other block elements (except other open blocks).
refs:
- href: https://docs.asciidoctor.org/asciidoc/latest/blocks/open-blocks/
- term: scalar
slug: scalar
desc: A simple, non-enumerable data type such as String, Number, or Boolean.
- term: inclusion
slug: inclusion
desc: |
The process of embedding code from another file into the current file for processing.
Known as "`transclusion`" in some languages, invoked in AsciiDoc using the `include::[]` macro and in Liquid using the `{% include %}` tag.
- term: versioning
slug: versioning
desc: |
Versioning is the division of a subject according to how it is
|
_docs/_data/terms.yml
|
---
result: SUCCESS
timestamp: 2016-05-12 00:21:35 UTC
url: http://manhattan.ci.chef.co/job/chefdk-test/164/
trigger_url: http://manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/269/
duration: 58m13s
runs:
windows-2008r2-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=i386,platform=windows-2008r2,project=chefdk,role=tester/164/
duration: 10m11s
acceptance:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=acceptance,project=chefdk,role=tester/164/
duration: 58m12s
chef_acceptance_timing:
- top-cookbooks:
provision: 50m29s
verify: 28s
destroy: 29s
Total: 54m53s
Run:
Total: 54m53s
- trivial:
provision: 1m29s
verify: 13s
destroy: 7s
Total: 2m4s
Run:
Total: 2m4s
debian-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-6,project=chefdk,role=tester/164/
duration: 16m13s
debian-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-7,project=chefdk,role=tester/164/
duration: 12m30s
debian-8:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-8,project=chefdk,role=tester/164/
duration: 13m21s
el-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=el-6,project=chefdk,role=tester/164/
duration: 27m37s
el-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=el-7,project=chefdk,role=tester/164/
duration: 10m57s
mac_os_x-10.10:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.10,project=chefdk,role=tester/164/
duration: 9m48s
mac_os_x-10.11:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.11,project=chefdk,role=tester/164/
duration: 7m54s
mac_os_x-10.9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.9,project=chefdk,role=tester/164/
duration: 9m58s
ubuntu-12.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=ubuntu-12.04,project=chefdk,role=tester/164/
duration: 11m49s
ubuntu-14.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=ubuntu-14.04,project=chefdk,role=tester/164/
duration: 15m28s
|
reports/stages/manhattan.ci.chef.co/job/chefdk-test/164.yaml
|
--- !<MODEL>
contentType: "MODEL"
firstIndex: "2018-12-26 12:19"
game: "Unreal Tournament 2004"
name: "CalamityOL"
author: "Unknown"
description: "None"
releaseDate: "2005-08"
attachments:
- type: "IMAGE"
name: "CalamityOL_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Models/C/CalamityOL_shot_2.png"
- type: "IMAGE"
name: "CalamityOL_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Models/C/CalamityOL_shot_1.png"
originalFilename: "ut2k4mdl_calamity.zip"
hash: "0b93071048bc6f65b6b7f2b0f66babcf79413d9d"
fileSize: 8673139
files:
- name: "Calamity.utx"
fileSize: 13427833
hash: "5ab564ccfa70cd64c6a30b27f01522b82af53fac"
- name: "CalamityAnim.ukx"
fileSize: 2107917
hash: "bdb99be10a77918dada29f726b82decda3001974"
- name: "Calamity.ka"
fileSize: 26167
hash: "1fc9553dc24582fd44cd86b88925aacccdee5569"
- name: "CalamityForOnLine.upl"
fileSize: 1638
hash: "7e014ace970a4b6a92b46b317f40d822acc4c592"
- name: "Calamity.upl"
fileSize: 1602
hash: "9ac70538f103db71d2776d7341db64330badadf2"
- name: "SPECIES_Calamity.u"
fileSize: 270508
hash: "cd1c929c8c03fe1f36146799aecd78fcabbd2734"
otherFiles: 1
dependencies: {}
downloads:
- url: "https://gamefront.online/files2/service/thankyou?id=4047719"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Models/C/ut2k4mdl_calamity.zip"
main: true
repack: false
state: "OK"
- url: "http://ut2004.ut-files.com/index.php?dir=UT2004Models/PlayerModels/ModelsC/"
main: false
repack: false
state: "OK"
- url: "http://ut2004.ut-files.com/index.php?dir=Skins/SkinsU/&file=ut2k4mdl-calamity.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Models/C/0/b/930710/ut2k4mdl_calamity.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Models/C/0/b/930710/ut2k4mdl_calamity.zip"
main: false
repack: false
state: "OK"
deleted: false
models:
- "CalamityOL"
skins:
- "CalamityOL"
- "Calamity"
|
content/Unreal Tournament 2004/Models/C/0/b/930710/calamityol_[0b930710].yml
|
---
# Copyright 2016, Rackspace US, Inc.
# Copyright 2018, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
tempest_distro_packages:
- git
- libxslt # required by the python module unittest2
- gcc # required to build tempest plugins (TODO) remove this when the repo server can satisfy the tempest plugin requirements
- iputils
tempest_service_distro_packages:
- openstack-tempest
- python-subunit
- subunit-filters
tempest_devel_distro_packages:
- python3-devel # required for netifaces module for stackviz
tempest_plugin_distro_packages:
- "{{ (tempest_service_available_aodh | bool) | ternary('python-telemetry-tests-tempest', '') }}"
- "{{ (tempest_service_available_barbican | bool) | ternary('python-barbican-tests-tempest', '') }}"
- "{{ (tempest_service_available_ceilometer | bool) | ternary('python-telemetry-tests-tempest', '') }}"
- "{{ (tempest_service_available_cinder | bool) | ternary('python-cinder-tests-tempest', '') }}"
- "{{ (tempest_service_available_congress | bool) | ternary('python-congress-tests-tempest', '') }}"
- "{{ (tempest_service_available_designate | bool) | ternary('python-designate-tests-tempest', '') }}"
- "{{ (tempest_service_available_heat | bool) | ternary('python-heat-tests-tempest', '') }}"
- "{{ tempest_service_available_horizon | bool | ternary('python-horizon-tests-tempest', '') }}"
- "{{ (tempest_service_available_ironic | bool) | ternary('python-ironic-tests-tempest', '') }}"
- python-keystone-tests-tempest
- "{{ (tempest_service_available_magnum | bool) | ternary('python-magnum-tests-tempest', '') }}"
- "{{ (tempest_service_available_manila | bool) | ternary('python-manila-tests-tempest', '') }}"
- "{{ (tempest_service_available_mistral | bool) | ternary('python-mistral-tests-tempest', '') }}"
- "{{ (tempest_service_available_murano | bool) | ternary('python-murano-tests-tempest', '') }}"
- "{{ (tempest_service_available_neutron | bool) | ternary('python-neutron-tests-tempest', '') }}"
- "{{ (tempest_service_available_neutron_vpnaas | bool) | ternary('python-networking-vpnaas-tests', '') }}"
- "{{ (tempest_service_available_novajoin | bool) | ternary('python-novajoin-tests-tempest', '') }}"
- "{{ (tempest_service_available_octavia | bool) | ternary('python-octavia-tests-tempest', '') }}"
- "{{ (tempest_service_available_sahara | bool) | ternary('python-sahara-tests-tempest', '') }}"
- "{{ (tempest_service_available_zaqar | bool) | ternary('python-zaqar-tests-tempest', '') }}"
|
vars/redhat-7.yml
|
uid: azure.mgmt.datafactory.models.LookupActivity
name: LookupActivity
fullName: azure.mgmt.datafactory.models.LookupActivity
module: azure.mgmt.datafactory.models
inheritances:
- azure.mgmt.datafactory.models._models_py3.ExecutionActivity
summary: 'Lookup activity.
All required parameters must be populated in order to send to Azure.'
constructor:
syntax: 'LookupActivity(*, name: str, source, dataset, additional_properties=None,
description: str = None, depends_on=None, user_properties=None, linked_service_name=None,
policy=None, first_row_only=None, **kwargs) -> None'
parameters:
- name: additional_properties
description: 'Unmatched properties from the message are
deserialized this collection'
types:
- <xref:dict>[<xref:str>, <xref:object>]
- name: name
description: Required. Activity name.
types:
- <xref:str>
- name: description
description: Activity description.
types:
- <xref:str>
- name: depends_on
description: Activity depends on condition.
types:
- <xref:list>[<xref:azure.mgmt.datafactory.models.ActivityDependency>]
- name: user_properties
description: Activity user properties.
types:
- <xref:list>[<xref:azure.mgmt.datafactory.models.UserProperty>]
- name: type
description: Required. Constant filled by server.
types:
- <xref:str>
- name: linked_service_name
description: Linked service reference.
types:
- <xref:azure.mgmt.datafactory.models.LinkedServiceReference>
- name: policy
description: Activity policy.
types:
- <xref:azure.mgmt.datafactory.models.ActivityPolicy>
- name: source
description: 'Required. Dataset-specific source properties, same as copy
activity source.'
types:
- <xref:azure.mgmt.datafactory.models.CopySource>
- name: dataset
description: Required. Lookup activity dataset reference.
types:
- <xref:azure.mgmt.datafactory.models.DatasetReference>
- name: first_row_only
description: 'Whether to return first row or all rows. Default
value is true. Type: boolean (or Expression with resultType boolean).'
types:
- <xref:object>
|
docs-ref-autogen/azure-mgmt-datafactory/azure.mgmt.datafactory.models.LookupActivity.yml
|
version: '3.7'
x-app: &app
build:
context: .
dockerfile: ./.docker/Dockerfile
args:
RUBY_VERSION: ${RUBY_VERSION}
BUNDLER_VERSION: ${BUNDLER_VERSION}
NODE_MAJOR: ${NODE_MAJOR}
YARN_VERSION: ${YARN_VERSION}
POSTGRES_MAJOR: ${POSTGRES_MAJOR}
environment: &env
NODE_ENV: ${NODE_ENV}
RAILS_ENV: ${RAILS_ENV}
image: ruby-on-whales:1.0.0
tmpfs:
- /tmp
x-backend: &backend
<<: *app
stdin_open: true
tty: true
volumes:
- .:/app:cached
- rails_cache:/app/tmp/cache
- bundle:/usr/local/bundle
- node_modules:/app/node_modules
- packs:/app/public/packs
- .docker/.psqlrc:/root/.psqlrc:ro
environment:
<<: *env
REDIS_URL: redis://redis:6379/
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@postgres:5432/
BOOTSNAP_CACHE_DIR: /bundle/bootsnap
WEBPACKER_DEV_SERVER_HOST: webpacker
WEB_CONCURRENCY: 1
HISTFILE: /app/log/.bash_history
PSQL_HISTFILE: /app/log/.psql_history
EDITOR: vi
services:
shell:
<<: *backend
command: /bin/bash
rails:
<<: *backend
command: bash -c "which rails && bundle exec rails server -b 0.0.0.0"
ports:
- 3000:3000
sidekiq:
<<: *backend
command: bash -c "which sidekiq && bundle exec sidekiq -C config/sidekiq.yml"
webpacker:
<<: *app
command: bash -c "test -x /app/bin/webpack-dev-server && /app/bin/webpack-dev-server"
ports:
- 3035:3025
volumes:
- .:/app:cached
- bundle:/usr/local/bundle
- node_modules:/app/node_modules
- packs:/app/public/packs
environment:
<<: *env
WEBPACKER_DEV_SERVER_HOST: 0.0.0.0
postgres:
image: postgres:${POSTGRES_MAJOR}
volumes:
- postgres:/var/lib/postgresql/data
- .docker/.psqlrc:/root/.psqlrc:ro
- ./log:/root/log:cached
environment:
PSQL_HISTFILE: /root/log/.psql_history
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
ports:
- 5432:5432
redis:
image: redis:5.0-alpine
volumes:
- redis:/data
ports:
- 6379:6379
volumes:
postgres:
redis:
bundle:
node_modules:
rails_cache:
packs:
|
docker-compose.yml
|
fields:
active:
label: olabs.oims::lang.coupons.active
type: switch
span: full
default: true
code:
label: olabs.oims::lang.coupons.code
comment: olabs.oims::lang.coupons.code_comment
span: full
disabled: true
valid_from:
label: olabs.oims::lang.coupons.valid_from
comment: olabs.oims::lang.coupons.valid_to_comment
span: auto
type: datepicker
mode: datetime
valid_to:
label: olabs.oims::lang.coupons.valid_to
comment: olabs.oims::lang.coupons.valid_to_comment
span: auto
type: datepicker
mode: datetime
count:
label: olabs.oims::lang.coupons.count
comment: olabs.oims::lang.coupons.count_comment
type: number
span: auto
default: 0
used_count:
label: olabs.oims::lang.coupons.used_count
type: number
span: auto
disabled: true
value:
label: olabs.oims::lang.coupons.value
type: number
span: auto
type_value:
label: olabs.oims::lang.coupons.type_value
type: dropdown
span: auto
# users
users_section:
label: olabs.oims::lang.coupons.users
type: section
users:
type: partial
path: ~/plugins/olabs/oims/models/coupon/_users_relation.htm
global_section:
label: olabs.oims::lang.coupons.global_section
comment: olabs.oims::lang.coupons.global_section_comment
type: section
minimum_value_basket:
label: olabs.oims::lang.coupons.minimum_value_basket
comment: olabs.oims::lang.coupons.minimum_value_basket_comment
type: number
span: full
default: 0
# categories
categories_section:
label: olabs.oims::lang.coupons.categories
type: section
categories:
type: partial
path: ~/plugins/olabs/oims/models/coupon/_categories_relation.htm
# products
products_section:
label: olabs.oims::lang.coupons.products
type: section
products:
type: partial
path: ~/plugins/olabs/oims/models/coupon/_products_relation.htm
|
plugins/olabs/oims/models/coupon/fields.yaml
|
uid: "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol"
fullName: "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol"
name: "StreamingPolicyStreamingProtocol"
nameWithType: "StreamingPolicyStreamingProtocol"
summary: "Defines values for StreamingPolicyStreamingProtocol."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.azure.core.util.ExpandableStringEnum\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "com.azure.core.util.ExpandableStringEnum.<T>fromString(java.lang.String,java.lang.Class<T>)"
- "com.azure.core.util.ExpandableStringEnum.<T>values(java.lang.Class<T>)"
- "com.azure.core.util.ExpandableStringEnum.equals(java.lang.Object)"
- "com.azure.core.util.ExpandableStringEnum.hashCode()"
- "com.azure.core.util.ExpandableStringEnum.toString()"
- "java.lang.Object.clone()"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class StreamingPolicyStreamingProtocol extends ExpandableStringEnum<StreamingPolicyStreamingProtocol>"
constructors:
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.StreamingPolicyStreamingProtocol()"
fields:
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.DASH"
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.DOWNLOAD"
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.HLS"
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.SMOOTH_STREAMING"
methods:
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.fromString(java.lang.String)"
- "com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.values()"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.mediaservices.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-mediaservices:1.0.0-beta.2
|
preview/docs-ref-autogen/com.azure.resourcemanager.mediaservices.models.StreamingPolicyStreamingProtocol.yml
|
version: '3.7'
services:
etcd1:
image: nexcpu/etcd:latest
container_name: etcd1
ports:
- ${ETCD1_CLIENT_PORT}:${ETCD1_CLIENT_PORT}
- ${ETCD1_PEER_PORT}:${ETCD1_PEER_PORT}
entrypoint: bash -c "/opt/etcd/etcd --name etcd1 --initial-advertise-peer-urls https://etcd1:${ETCD1_PEER_PORT}
--listen-peer-urls https://${HOST_IP}:${ETCD1_PEER_PORT} --advertise-client-urls https://${HOST_IP}:${ETCD1_CLIENT_PORT}
--listen-client-urls https://${HOST_IP}:${ETCD1_CLIENT_PORT} --initial-cluster-state new
--initial-cluster etcd1=https://etcd1:${ETCD1_PEER_PORT},etcd2=https://etcd2:${ETCD2_PEER_PORT},etcd3=https://etcd3:${ETCD3_PEER_PORT}
--initial-cluster-token ${CLUSTER_TOKEN}
--auto-tls --peer-auto-tls --peer-client-cert-auth=false --client-cert-auth=false"
etcd2:
image: nexcpu/etcd:latest
container_name: etcd2
ports:
- ${ETCD2_CLIENT_PORT}:${ETCD2_CLIENT_PORT}
- ${ETCD2_PEER_PORT}:${ETCD2_PEER_PORT}
entrypoint: bash -c "/opt/etcd/etcd --name etcd2 --initial-advertise-peer-urls https://etcd2:${ETCD2_PEER_PORT}
--listen-peer-urls https://${HOST_IP}:${ETCD2_PEER_PORT} --advertise-client-urls https://${HOST_IP}:${ETCD2_CLIENT_PORT}
--listen-client-urls https://${HOST_IP}:${ETCD2_CLIENT_PORT} --initial-cluster-state new
--initial-cluster etcd1=https://etcd1:${ETCD1_PEER_PORT},etcd2=https://etcd2:${ETCD2_PEER_PORT},etcd3=https://etcd3:${ETCD3_PEER_PORT}
--initial-cluster-token ${CLUSTER_TOKEN}
--auto-tls --peer-auto-tls --peer-client-cert-auth=false --client-cert-auth=false"
depends_on:
- etcd1
etcd3:
image: nexcpu/etcd:latest
container_name: etcd3
ports:
- ${ETCD3_CLIENT_PORT}:${ETCD3_CLIENT_PORT}
- ${ETCD3_PEER_PORT}:${ETCD3_PEER_PORT}
entrypoint: bash -c "/opt/etcd/etcd --name etcd3 --initial-advertise-peer-urls https://etcd3:${ETCD3_PEER_PORT}
--listen-peer-urls https://${HOST_IP}:${ETCD3_PEER_PORT} --advertise-client-urls https://${HOST_IP}:${ETCD3_CLIENT_PORT}
--listen-client-urls https://${HOST_IP}:${ETCD3_CLIENT_PORT} --initial-cluster-state new
--initial-cluster etcd1=https://etcd1:${ETCD1_PEER_PORT},etcd2=https://etcd2:${ETCD2_PEER_PORT},etcd3=https://etcd3:${ETCD3_PEER_PORT}
--initial-cluster-token ${CLUSTER_TOKEN}
--auto-tls --peer-auto-tls --peer-client-cert-auth=false --client-cert-auth=false"
depends_on:
- etcd1
- etcd2
|
docker/docker-compose.yml
|
items:
- uid: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup"
id: "WithConsumerGroup"
parent: "com.microsoft.azure.management.eventhub"
children:
- "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(java.lang.String)"
- "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(java.lang.String,java.lang.String)"
langs:
- "java"
name: "EventHub.DefinitionStages.WithConsumerGroup"
nameWithType: "EventHub.DefinitionStages.WithConsumerGroup"
fullName: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup"
type: "Interface"
package: "com.microsoft.azure.management.eventhub"
summary: "The stage of the event hub definition allowing to add consumer group for the event hub."
syntax:
content: "public static interface EventHub.DefinitionStages.WithConsumerGroup"
- uid: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(java.lang.String)"
id: "withNewConsumerGroup(java.lang.String)"
parent: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup"
langs:
- "java"
name: "withNewConsumerGroup(String name)"
nameWithType: "EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(String name)"
fullName: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(String name)"
overload: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup*"
type: "Method"
package: "com.microsoft.azure.management.eventhub"
summary: "Specifies that a new consumer group should be created for the event hub."
syntax:
content: "public abstract EventHub.DefinitionStages.WithCreate withNewConsumerGroup(String name)"
parameters:
- id: "name"
type: "java.lang.String"
description: "consumer group name"
return:
type: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithCreate"
description: "next stage of the event hub definition"
- uid: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(java.lang.String,java.lang.String)"
id: "withNewConsumerGroup(java.lang.String,java.lang.String)"
parent: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup"
langs:
- "java"
name: "withNewConsumerGroup(String name, String metadata)"
nameWithType: "EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(String name, String metadata)"
fullName: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup(String name, String metadata)"
overload: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup*"
type: "Method"
package: "com.microsoft.azure.management.eventhub"
summary: "Specifies that a new consumer group should be created for the event hub."
syntax:
content: "public abstract EventHub.DefinitionStages.WithCreate withNewConsumerGroup(String name, String metadata)"
parameters:
- id: "name"
type: "java.lang.String"
description: "consumer group name"
- id: "metadata"
type: "java.lang.String"
description: "consumer group metadata"
return:
type: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithCreate"
description: "next stage of the event hub definition"
references:
- uid: "java.lang.String"
spec.java:
- uid: "java.lang.String"
name: "String"
fullName: "java.lang.String"
- uid: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithCreate"
name: "EventHub.DefinitionStages.WithCreate"
nameWithType: "EventHub.DefinitionStages.WithCreate"
fullName: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithCreate"
- uid: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup*"
name: "withNewConsumerGroup"
nameWithType: "EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup"
fullName: "com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.withNewConsumerGroup"
package: "com.microsoft.azure.management.eventhub"
|
docs-ref-autogen/com.microsoft.azure.management.eventhub.EventHub.DefinitionStages.WithConsumerGroup.yml
|
{% set version = "2.0.0" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-reactablefmtr
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/reactablefmtr_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/reactablefmtr/reactablefmtr_{{ version }}.tar.gz
sha256: 24ddff264ae3e0cb902c53b87a629eb9d3e1e06cb070fa9cd04a0755912406e5
build:
merge_build_host: true # [win]
number: 0
noarch: generic
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- {{ posix }}zip # [win]
- cross-r-base {{ r_base }} # [build_platform != target_platform]
host:
- r-base
- r-dplyr
- r-htmltools
- r-htmlwidgets >=1.5.3
- r-magrittr
- r-purrr
- r-reactable >=0.2.0
- r-sass >=0.4.0
- r-shiny
- r-stringr >=1.4.0
- r-tippy >=0.1.0
- r-webshot
run:
- r-base
- r-dplyr
- r-htmltools
- r-htmlwidgets >=1.5.3
- r-magrittr
- r-purrr
- r-reactable >=0.2.0
- r-sass >=0.4.0
- r-shiny
- r-stringr >=1.4.0
- r-tippy >=0.1.0
- r-webshot
test:
commands:
- $R -e "library('reactablefmtr')" # [not win]
- "\"%R%\" -e \"library('reactablefmtr')\"" # [win]
about:
home: https://kcuilla.github.io/reactablefmtr/, https://github.com/kcuilla/reactablefmtr
license: MIT
summary: Enhance the styling of interactive reactable tables with easy-to-use and highly-customizable functions. Apply conditional formatting to cells with data bars, color scales, and icon sets. Utilize custom table themes inspired by popular websites and bootstrap themes. Increase the portability and reproducibility
of reactable tables by embedding images from the web directly into cells. Save the final table output as a static image or interactive file (note this feature requires the 'webshot2' package which can be downloaded from <https://github.com/rstudio/webshot2>).
license_family: MIT
license_file:
- {{ environ["PREFIX"] }}/lib/R/share/licenses/MIT
- LICENSE
extra:
recipe-maintainers:
- conda-forge/r
|
recipe/meta.yaml
|
- hosts: localhost
gather_facts: true
tasks:
- name: test connection
ping:
- debug:
var: ansible_facts['os_family']
- debug:
var: ansible_kernel
- debug:
var: ansible_python_interpreter
- debug:
msg: "Need kernel version > 4.19.121 for gpu wsl2 gpu acceleration {{ ansible_kernel is version('4.19.121', '>=') }}"
#- hosts: windows
# tasks:
# - name: Remove Appx packages (and their hindering file assocations)
# win_shell: |
# Get-AppxPackage -name "Microsoft.ZuneMusic" | Remove-AppxPackage
# Get-AppxPackage -name "Microsoft.ZuneVideo" | Remove-AppxPackage
- hosts: localhost
roles:
- role: wsl2
become: true
when:
- ansible_facts['os_family'] == 'Debian'
- '"Microsoft" in ansible_kernel'
- hosts: localhost
vars_prompt:
- name: mas_email
prompt: "What is your Apple ID?"
private: no
- name: mas_password
prompt: "What is your password?"
vars:
- mas_installed_apps:
- { id: 411643860, name: "DaisyDisk (4.3.2)" }
- { id: 585829637, name: "Todoist: To-Do List & Tasks (7.5)"}
- { id: 450527929, name: "djay - DJ App & AI Mixer (3.6)"}
- { id: 1278508951, name: "Trello" }
- { id: 803453959, name: "Slack"}
- { id: 1147396723, name: "WhatsApp Desktop"}
- { id: 497799835, name: 'Xcode (12.0.1)'}
- homebrew_cask_apps:
# - font-anonymice-powerline
# - font-consolas-for-powerline
- font-dejavu-sans-mono-for-powerline
# - font-droid-sans-mono-for-powerline
# - font-fira-mono-for-powerline
# - font-inconsolata-dz-for-powerline
# - font-inconsolata-for-powerline
# - font-inconsolata-for-powerline-bold
# - font-inconsolata-g-for-powerline
# - font-liberation-mono-for-powerline
#- font-menlo-for-powerline
# - font-meslo-for-powerline
# - font-monofur-for-powerline
# - font-noto-mono-for-powerline
# - font-powerline-symbols
# - font-roboto-mono-for-powerline
# - font-source-code-pro-for-powerline
# - font-ubuntu-mono-derivative-powerline
- google-chrome
- visual-studio-code
- iterm2
- spectacle
- alfred
- istat-menus
- hyperswitch
- paintcode
- docker
- lens
- 1password-cli
- zoom
- spotify
- 1password
- vlc
- private-internet-access
- github
- homebrew_installed_packages:
- cocoapods
- neovim
- pv
- jq
- fastlane
- powerline-go
- nvm
- svn
- fortune
- cowsay
- gh
- awscli
- homebrew_taps:
- homebrew/cask-fonts
- homebrew/cask
- homebrew/core
# vars:
# mas_installed_apps:
# - { id: 497799835, name: "Xcode (8.1)" }
# command: check if commandline tools are present "xcode-select -p 1>/dev/null;echo $?" 0 if installed else 2
tasks:
- name: Symlinks
file:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
state: link
with_items:
# - { src: "~/dotfiles/.ansible", dest: "~/.ansible" }
- { src: "~/dotfiles/.oh-my-zsh", dest: "~/.oh-my-zsh" }
- { src: "~/dotfiles/.zshrc", dest: "~/.zshrc" }
- { src: "~/dotfiles/.vscode", dest: "~/Library/Application Support/Code/User/settings.json"}
- name: Include the some_role role
include_role:
name: "{{ item }}"
loop:
- geerlingguy.homebrew
- geerlingguy.mas
when: "ansible_facts['os_family'] == 'Darwin'"
# - name: nvm current
# command: "nvm current"
# register: nvm_current
# - debug:
# vars: nvm_current
# roles:
# - role: rolegeerlingguy.homebrew
# when:
# - ansible_facts['os_family'] == 'Darwin'
# - role: geerlingguy.mas
# when:
# - ansible_facts['os_family'] == 'Darwin'
# - role: macosx
# when:
# - ansible_facts['os_family'] == 'Darwin'
#Depending on your platform, the user settings file is located here:
# Windows %APPDATA%\Code\User\settings.json
# macOS $HOME/Library/Application Support/Code/User/settings.json
# Linux $HOME/.config/Code/User/settings.json
# The workspaqce settings file is located under the .vscode folder in your root folder.
|
playbook/site.yml
|
name: '&bUncommon'
weight: 20
value: 1000
inheritance:
- 'common'
items:
chiseled_stone_bricks:
name: CHISELED_STONE_BRICKS
amount: 32
value: 90
slime_block:
name: SLIME_BLOCK
amount: 4
value: 100
magma_block:
name: MAGMA_BLOCK
amount: 32
value: 100
obsidian:
name: OBSIDIAN
amount: 8
value: 160
bookshelf:
name: BOOKSHELF
amount: 5
value: 100
end_rod:
name: END_ROD
amount: 8
value: 80
nether_wart:
name: NETHER_WART
amount: 16
value: 60
magma_cream:
name: MAGMA_CREAM
amount: 64
value: 50
ghast_tear:
name: GHAST_TEAR
amount: 8
value: 100
glistering_melon_slice:
name: GLISTERING_MELON_SLICE
amount: 8
value: 20
blaze_powder:
name: BLAZE_POWDER
amount: 8
value: 60
brewing_stand:
name: BREWING_STAND
value: 40
cauldron:
name: CAULDRON
value: 70
pufferfish:
name: PUFFERFISH
amount: 8
value: 80
white_shulker_box:
name: WHITE_SHULKER_BOX
value: 20
weight: 6
orange_shulker_box:
name: ORANGE_SHULKER_BOX
value: 20
weight: 6
magenta_shulker_box:
name: MAGENTA_SHULKER_BOX
value: 20
weight: 6
light_blue_shulker_box:
name: LIGHT_BLUE_SHULKER_BOX
value: 20
weight: 6
yellow_shulker_box:
name: YELLOW_SHULKER_BOX
value: 20
weight: 6
lime_shulker_box:
name: LIME_SHULKER_BOX
value: 20
weight: 6
pink_shulker_box:
name: PINK_SHULKER_BOX
value: 20
weight: 6
gray_shulker_box:
name: GRAY_SHULKER_BOX
value: 20
weight: 6
light_gray_shulker_box:
name: LIGHT_GRAY_SHULKER_BOX
value: 20
weight: 6
cyan_shulker_box:
name: CYAN_SHULKER_BOX
value: 20
weight: 6
purple_shulker_box:
name: PURPLE_SHULKER_BOX
value: 20
weight: 6
blue_shulker_box:
name: BLUE_SHULKER_BOX
value: 20
weight: 6
brown_shulker_box:
name: BROWN_SHULKER_BOX
value: 20
weight: 6
green_shulker_box:
name: GREEN_SHULKER_BOX
value: 20
weight: 6
red_shulker_box:
name: RED_SHULKER_BOX
value: 20
weight: 6
black_shulker_box:
name: BLACK_SHULKER_BOX
value: 20
weight: 6
piston:
name: PISTON
value: 22
elytra:
name: ELYTRA
value: 200
ender_pearl:
name: ENDER_PEARL
amount: 4
value: 120
shield:
name: SHIELD
amount: 1
value: 14
iron_helmet:
name: IRON_HELMET
value: 50
iron_chestplate:
name: IRON_CHESTPLATE
value: 80
iron_leggings:
name: IRON_LEGGINGS
value: 70
iron_boots:
name: IRON_BOOTS
value: 40
iron_ingot:
name: IRON_INGOT
amount: 8
value: 80
gold_ingot:
name: GOLD_INGOT
amount: 8
value: 80
|
src/main/resources/treasure/uncommon.yml
|
version: '3'
services:
dsw_server:
image: datastewardshipwizard/wizard-server:2.5.0
restart: always
hostname: dsw_server
depends_on:
- rabbitmq
- mongo
volumes:
- ./dsw-server/application.yml:/application/engine-wizard/config/application.yml:ro
- ./dsw-server/integration.yml:/application/engine-wizard/config/integration.yml:ro
dsw_client:
image: datastewardshipwizard/wizard-client:2.5.0
restart: always
hostname: dsw_client
environment:
# (!) Change API_URL
# API_URL: <dsw-url>
API_URL: https://api.dsw.vodan.fairdatapoint.org
volumes:
- ./dsw-client/variables.scss:/src/scss/customizations/_variables.scss:ro
- ./dsw-client/assets:/usr/share/nginx/html/assets:ro
- ./dsw-client/configuration/provisioning.json:/configuration/provisioning.json:ro
- ./dsw-client/favicon.ico:/usr/share/nginx/html/favicon.ico:ro
docworker:
image: datastewardshipwizard/document-worker:2.5.0
restart: always
depends_on:
- rabbitmq
volumes:
- ./docworker/config.yml:/app/config.yml:ro
mongo:
image: mongo:4.2.3
restart: always
ports:
- 127.0.0.1:27017:27017
environment:
MONGO_INITDB_DATABASE: wizard
volumes:
- ./mongo/data:/data/db
- ./mongo/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:ro
rabbitmq:
image: rabbitmq:3.8.2-management
restart: always
environment:
RABBITMQ_DEFAULT_USER: guest
RABBITMQ_DEFAULT_PASS: guest
fdp:
image: fairdata/fairdatapoint:1.6.0
restart: always
hostname: fdp
volumes:
- ./fdp/application.yml:/fdp/application.yml:ro
fdp_client:
image: fairdata/fairdatapoint-client:1.6.0
restart: always
hostname: fdp_client
environment:
- FDP_HOST=fdp
volumes:
- ./fdp-client/variables.scss:/src/scss/custom/_variables.scss:ro
- ./fdp-client/assets:/usr/share/nginx/html/assets:ro
- ./fdp-client/favicon.ico:/usr/share/nginx/html/favicon.ico:ro
blazegraph:
image: metaphacts/blazegraph-basic:2.2.0-20160908.003514-6
restart: always
volumes:
- ./blazegraph:/blazegraph-data
agraph:
image: franzinc/agraph:v7.0.1
restart: always
ports:
- 10000-10035:10000-10035
- 8443:8443
hostname: agraph
shm_size: '1gb'
volumes:
- ./allegrograph/data:/agraph/data/
- ./allegrograph/agraph.cfg:/agraph/etc/agraph.cfg
- ./allegrograph/cert.pem:/certs/cert.pem
json_server:
image: clue/json-server
restart: always
hostname: json_server
volumes:
- ./json-server/db.json:/data/db.json:ro
submission_service:
image: datastewardshipwizard/triple-store-submission-service:1.1.0
restart: always
hostname: submission_service
volumes:
- ./submission-service/config.yml:/app/config.yml:ro
proxy:
image: nginx:1.17
restart: always
ports:
- 80:80
- 443:443
volumes:
- ./proxy/nginx:/etc/nginx:ro
- ./proxy/letsencrypt:/var/www/letsencrypt:ro
- /etc/letsencrypt:/etc/letsencrypt:ro
|
docker-compose.yml
|
LGC_Flask4:
Item_Information:
Item_Name: "&6&oUndying Rage"
Item_Type: 437
Sounds_Acquired: ENTITY_ITEM_PICKUP-1.0-1-0
Remove_Unused_Tag: true
Shooting:
Right_Click_To_Shoot: true
Cancel_Left_Click_Block_Damage: true
Cancel_Right_Click_Interactions: true
Delay_Between_Shots: 300
Recoil_Amount: 0
Projectile_Amount: 1
Projectile_Type: energy
Projectile_Subtype: 2-2-NONE-0
Projectile_Speed: 1
Projectile_Damage: 0
Bullet_Spread: 1.0
Reset_Fall_Distance: true
Sounds_Shoot: ENTITY_HOSTILE_SWIM-1-1-0,ENTITY_GHAST_SHOOT-1-2-0,ENTITY_GHAST_SHOOT-1-1-0,ENTITY_GHAST_SHOOT-1-0-0
Potion_Effects:
Activation: shoot
Potion_Effect_Shooter: INCREASE_DAMAGE-200-200,REGENERATION-1-1,ABSORPTION-1-1
Abilities:
Reset_Hit_Cooldown: true
No_Fall_Damage: true
LGC_Flask5:
Item_Information:
Item_Name: "&6&oStoneheart Brew"
Item_Type: 437
Sounds_Acquired: ENTITY_ITEM_PICKUP-1.0-1-0
Remove_Unused_Tag: true
Shooting:
Right_Click_To_Shoot: true
Cancel_Left_Click_Block_Damage: true
Cancel_Right_Click_Interactions: true
Delay_Between_Shots: 400
Recoil_Amount: 0
Projectile_Amount: 1
Projectile_Type: energy
Projectile_Subtype: 2-2-NONE-0
Projectile_Speed: 1
Projectile_Damage: 0
Bullet_Spread: 1.0
Reset_Fall_Distance: true
Sounds_Shoot: ENTITY_HOSTILE_SWIM-1-1-0,BLOCK_ANVIL_LAND-1-2-0,BLOCK_ANVIL_LAND-1-1-0,BLOCK_ANVIL_LAND-1-0-0
Potion_Effects:
Activation: shoot
Potion_Effect_Shooter: DAMAGE_RESISTANCE-200-5,SLOW-60-10,WEAKNESS-60-10
Abilities:
Reset_Hit_Cooldown: true
No_Fall_Damage: true
LGC_Flask11:
Item_Information:
Item_Name: "&6&oFlask of Lifeforce"
Item_Type: 437
Sounds_Acquired: ENTITY_ITEM_PICKUP-1.0-1-0
Remove_Unused_Tag: true
Shooting:
Right_Click_To_Shoot: true
Cancel_Left_Click_Block_Damage: true
Cancel_Right_Click_Interactions: true
Delay_Between_Shots: 1200
Recoil_Amount: 0
Projectile_Amount: 1
Projectile_Type: energy
Projectile_Subtype: 2-2-NONE-0
Projectile_Speed: 1
Projectile_Damage: 0
Bullet_Spread: 1.0
Reset_Fall_Distance: true
Sounds_Shoot: ENTITY_HOSTILE_SWIM-0.8-1-0,ENTITY_HOSTILE_SWIM-1-2-0,ENTITY_HOSTILE_SWIM-0.5-0-0
Potion_Effects:
Activation: shoot
Potion_Effect_Shooter: HEALTH_BOOST-6000-5,REGENERATION-200-10
Abilities:
Reset_Hit_Cooldown: true
No_Fall_Damage: true
LGC_VFlask_L1:
Item_Information:
Item_Name: "&6&oSunfoil Brew"
Item_Type: 437
Sounds_Acquired: ENTITY_ITEM_PICKUP-1.0-1-0
Remove_Unused_Tag: true
Shooting:
Right_Click_To_Shoot: true
Cancel_Left_Click_Block_Damage: true
Cancel_Right_Click_Interactions: true
Delay_Between_Shots: 600
Recoil_Amount: 0
Projectile_Amount: 1
Projectile_Type: energy
Projectile_Subtype: 2-2-NONE-0
Projectile_Speed: 1
Projectile_Damage: 0
Bullet_Spread: 1.0
Reset_Fall_Distance: true
Sounds_Shoot: ENTITY_HOSTILE_SWIM-1-1-0,ENTITY_WITHER_DEATH-0.5-1-0,ENTITY_ENDERDRAGON_GROWL-0.2-1-500,ENTITY_ENDERDRAGON_GROWL-0.2-2-500
Potion_Effects:
Activation: shoot
Potion_Effect_Shooter: ABSORPTION-400-50,DAMAGE_RESISTANCE-1-1
Abilities:
Reset_Hit_Cooldown: true
No_Fall_Damage: true
|
CrackShot/weapons/CS_Legacy_Flasks.yml
|
name: Test project
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test:
strategy:
matrix:
python-version: [3.6, 3.7, 3.8]
os: ["ubuntu-latest", "macos-latest"]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
git clone https://github.com/martinus/robin-hood-hashing ripser/robinhood
python -m pip install --upgrade pip
pip install flake8 pytest-cov Cython
pip install -e ".[testing]"
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest --cov ripser
- name: Upload coverage results
run: |
bash <(curl -s https://codecov.io/bash)
test-conda:
runs-on: macos-latest
strategy:
fail-fast: false
matrix:
python-version: [3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
activate-environment: test
- name: Set up conda
shell: bash -l {0}
run: |
conda install git numpy pandas scipy matplotlib pytest cython
git clone https://github.com/martinus/robin-hood-hashing ripser/robinhood
python setup.py install
- name: Test with pytest
shell: bash -l {0}
run: |
pytest .
test-on-windows:
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
python-version: [3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
activate-environment: test
- name: Set up conda
shell: bash -l {0}
run: |
echo -e "[build]\ncompiler=msvc\n" >> setup.cfg
conda install git numpy pandas scipy matplotlib pytest libpython cython
git clone https://github.com/martinus/robin-hood-hashing ripser/robinhood
python setup.py install
- name: Test with pytest
shell: bash -l {0}
run: |
pytest .
|
.github/workflows/python-app.yml
|
name: 'spellbook'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'spellbook'
# These configurations specify where dbt should look for different types of files.
# You don't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
target-path: "target" # directory which will store compiled SQL files
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
models:
spellbook:
nft:
+schema: nft
+materialized: view
uniswap:
+schema: uniswap
+materialized: view
ethereum:
+schema: uniswap_ethereum
+materialized: view
opensea:
+schema: opensea
+materialized: view
ethereum:
+schema: opensea_ethereum
+materialized: view
solana:
+schema: opensea_solana
+materialized: view
magiceden:
+schema: magiceden
+materialized: view
solana:
+schema: magiceden_solana
+materialized: view
balances:
+schema: balances
+materialized: view
ethereum:
+schema: balances_ethereum
+materialized: view
tokens:
+schema: tokens
+materialized: view
ethereum:
+schema: tokens_ethereum
+materialized: view
transfers:
+schema: transfers
+materialized: view
ethereum:
+schema: transfers_ethereum
+materialized: view
seeds:
spellbook:
balances:
ethereum:
+enabled: true
+schema: test_data
balances_ethereum_erc20_latest_entries:
+column_types:
timestamp: timestamp
wallet_address: string
token_address: string
amount_raw: string
balances_ethereum_erc20_specific_wallet:
+column_types:
timestamp: timestamp
wallet_address: string
token_address: string
token_symbol: string
amount_raw: string
amount: string
opensea:
ethereum:
+enabled: true
+schema: test_data
opensea_ethereum_trades_postgres:
+column_types:
evt_block_times: timestamp
evt_tx_hash: string
price: string
solana:
+enabled: true
+schema: test_data
opensea_solana_trades_postgres:
+column_types:
block_time: timestamp
tx_hash: string
amount: string
magiceden:
solana:
+enabled: true
+schema: test_data
magiceden_solana_trades_postgres:
+column_types:
block_time: timestamp
tx_hash: string
amount: string
|
spellbook/dbt_project.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-11-01 21:17"
variationOf: "fc5787c4ded3831520bd6a1cc8c29ff4e6605904"
game: "Unreal Tournament 2004"
name: "DM-Isarku"
author: "Pipux"
description: "Serpentines of Isarku mountain range surely lead those who dont pay\
\ enough attention into their certain deaths."
releaseDate: "2006-08"
attachments:
- type: "IMAGE"
name: "dm-isarku_shot_90b60fdd_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/I/dm-isarku_shot_90b60fdd_1.png"
- type: "IMAGE"
name: "dm-isarku_shot_90b60fdd_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/I/dm-isarku_shot_90b60fdd_3.png"
- type: "IMAGE"
name: "dm-isarku_shot_90b60fdd_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/I/dm-isarku_shot_90b60fdd_2.png"
- type: "IMAGE"
name: "dm-isarku_shot_90b60fdd_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/I/dm-isarku_shot_90b60fdd_4.png"
originalFilename: "dm-isarku.rar"
hash: "90b60fdd6d1475914e7344727d3606cf317d4fc8"
fileSize: 1964398
files:
- name: "DM-Isarku.ut2"
fileSize: 5866653
hash: "5457fb205299e0e068769408a9ecc03d30ee46c3"
otherFiles: 4
dependencies: {}
downloads:
- url: "https://gamebanana.com/maps/download/15680"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/I/dm-isarku.rar"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/I/9/0/b60fdd/dm-isarku.rar"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/I/9/0/b60fdd/dm-isarku.rar"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "Isarku"
playerCount: "4-6"
themes:
Natural: 1.0
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/I/9/0/b60fdd/dm-isarku_[90b60fdd].yml
|
image: "ruby:2.4.1"
# Cache gems in between builds (Disabled)
# cache:
# paths:
# - vendor/bundle
before_script:
- ruby -v # Print out ruby version for debugging
- gem install bundler --no-ri --no-rdoc # Bundler is not installed with the image
- bundle install -j $(nproc) --path vendor # Install dependencies into ./vendor/ruby
stages:
- Build
- Package
Build fluentd:
stage: Build
script:
- bundle exec rake build
- mv pkg/*gem .
- echo FLUENTD_GEM=$CI_PROJECT_URL/builds/$CI_JOB_ID/artifacts/download >> release.env
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
untracked: true
expire_in: 4 weeks
paths:
- "*gem"
- "vendor"
- Gemfile.lock
- release.env
Container:
stage: Package
script:
- apt update && apt -y install jq
- cat release.env
- source release.env
- echo $FLUENTD_GEM
- PROJECT_PIPELINE=$(curl -X POST -F token=$FLUENTD_DOCKER_CI_JOB_TOKEN -F ref=ci-master -F "variables[FLUENTD_GEM]=$FLUENTD_GEM" -F "variables[Dockerfile]=v0.12/debian" https://gitlab.cncf.ci/api/v4/projects/32/trigger/pipeline | jq '.id')
- until [ "$JOB_STATUS" == '"success"' ]; do JOB_STATUS="$(curl -s --header "PRIVATE-TOKEN:${TOKEN}" "https://gitlab.cncf.ci/api/v4/projects/32/pipelines/${PROJECT_PIPELINE}/jobs?scope=success" | jq '.[] | select(.name=="Container") | .status')" ; sleep 0.5 ; done ; echo 'Container is Ready'
- PROJECT_JOB=$(curl --header "PRIVATE-TOKEN:${TOKEN}" "https://gitlab.cncf.ci/api/v4/projects/32/pipelines/${PROJECT_PIPELINE}/jobs?scope=success" | jq '.[] | select(.name=="Container") | .id')
- export BASE_URL=${BASE_URL:-$(echo $CI_PROJECT_URL | cut -d'/' -f1-3)}
- curl -s -o fluentd.env -L "$BASE_URL/fluent/fluentd-docker-image/-/jobs/${PROJECT_JOB}/artifacts/raw/release.env"
- cat fluentd.env
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
untracked: true
expire_in: 4 weeks
paths:
- fluentd.env
|
.gitlab-ci.yml
|
---
- name: Detect if Docker is Quarantined
ignore_errors: yes
changed_when: false
command: xattr -p com.apple.quarantine /Applications/Docker.app
register: quarantined_docker
- name: Remove Docker from MacOS Quarantine
command: xattr -d -r com.apple.quarantine /Applications/Docker.app
when: quarantined_docker is success
# xattr:
# path: /Applications/Docker.app
# key: com.apple.quarantine
# state: absent
- name: Detect if Docker is already running
ignore_errors: yes
changed_when: false
command: pgrep -xq -- "Docker"
register: running_docker
- name: Stop Docker gracefully
when: running_docker is success
command: osascript -e 'quit app "Docker"'
- name: Install Docker Networking Components - PrivilegedHelperTools
become: true
copy:
src: "{{ docker_app_paths_for_privilegedhelpertools.src }}"
dest: "{{ docker_app_paths_for_privilegedhelpertools.dest }}"
remote_src: yes
- name: Install Docker Networking Components - PrivilegedHelperTools Ownership
become: true
file:
path: "{{ docker_app_paths_for_privilegedhelpertools.dest }}"
owner: root
group: wheel
mode: 0544
- name: Install Docker Networking Components - LaunchDaemons
become: true
copy:
src: "{{ docker_app_paths_for_launchdaemons.src }}"
dest: "{{ docker_app_paths_for_launchdaemons.dest }}"
remote_src: yes
- name: Install Docker Networking Components - LaunchDaemons Ownership
become: true
file:
path: "{{ docker_app_paths_for_launchdaemons.dest }}"
owner: root
group: wheel
mode: 0644
- name: Launch Docker Networking Components
become: true
changed_when: false
command: launchctl load "{{ docker_app_paths_for_launchdaemons.dest }}"
- name: Detect if Docker is running
ignore_errors: yes
changed_when: false
command: pgrep -xq -- "Docker"
register: running_docker
- name: Launch Docker
when: running_docker is failed
command: open --background -a Docker
|
tasks/main.yml
|
- name: app_state
type: group
release: beta
description: >
application state
fields:
- name: start_date
type: date
description: >
The start date
- name: end_date
type: date
description: >
The end date
- name: requests_count.sum
type: float
description: >
Request count
- name: requests_failed.sum
type: float
description: >
Request failed count
- name: users_count.unique
type: float
description: >
User count
- name: sessions_count.unique
type: float
description: >
Session count
- name: users_authenticated.unique
type: float
description: >
Authenticated users count
- name: browser_timings_network_duration.avg
type: float
description: >
Browser timings network duration
- name: browser_timings_send_duration.avg
type: float
description: >
Browser timings send duration
- name: browser_timings_receive_uration.avg
type: float
description: >
Browser timings receive duration
- name: browser_timings_processing_duration.avg
type: float
description: >
Browser timings processing duration
- name: browser_timings_total_duration.avg
type: float
description: >
Browser timings total duration
- name: exceptions_count.sum
type: float
description: >
Exception count
- name: exceptions_browser.sum
type: float
description: >
Exception count at browser level
- name: exceptions_server.sum
type: float
description: >
Exception count at server level
- name: performance_counters_memory_available_bytes.avg
type: float
description: >
Performance counters memory available bytes
- name: performance_counters_process_private_bytes.avg
type: float
description: >
Performance counters process private bytes
- name: performance_counters_process_cpu_percentage_total.avg
type: float
description: >
Performance counters process cpu percentage total
- name: performance_counters_process_cpu_percentage.avg
type: float
description: >
Performance counters process cpu percentage
- name: performance_counters_processiobytes_per_second.avg
type: float
description: >
Performance counters process IO bytes per second
|
x-pack/metricbeat/module/azure/app_state/_meta/fields.yml
|
logging.level.root: INFO
spring:
security:
user:
user: orcatag
password: <PASSWORD>
jpa:
show-sql: true
# servlet:
# multipart:
# max-file-size: 10Mb
# max-request-size: 10Mb
s3:
rootBucket: "orcatag-dev"
endpointUrl: https://s3.eu-west-1.amazonaws.com
accessKey: ${AWS_ACCESS_KEY_ID}
secretKey: ${AWS_SECRET_ACCESS_KEY}
#spring:
# jpa:
# properties:
# hibernate:
# ddl-auto: none
# dialect: org.hibernate.dialect.MySQL5Dialect
#
# datasource:
# url: jdbc:mysql://localhost:3306/db_example?useSSL=false
# username: springuser
# password: <PASSWORD>
# driverClassName: com.mysql.jdbc.Driver
## H2
#spring.h2.console.enabled=true
#spring.h2.console.path=/h2
## Datasource
#spring.datasource.url=jdbc:h2:file:~/test
#spring.datasource.username=sa
#spring.datasource.password=
#spring.datasource.driver-class-name=org.h2.Driver
## Spring DATASOURCE (DataSourceAutoConfiguration & DataSourceProperties)
spring.datasource.url: jdbc:postgresql://localhost:5432/postgres
spring.datasource.username: postgres
spring.datasource.password: <PASSWORD>
# The SQL dialect makes Hibernate generate better SQL for the chosen database
spring.jpa.properties.hibernate.dialect: org.hibernate.dialect.PostgreSQLDialect
# https://stackoverflow.com/questions/4588755/disabling-contextual-lob-creation-as-createclob-method-threw-error
spring.jpa.properties.hibernate.temp.use_jdbc_metadata_defaults: false
# Hibernate ddl auto (create, create-drop, validate, update)
spring.jpa.hibernate.ddl-auto: update
#spring:
# jpa:
# properties:
# hibernate:
# ddl-auto: none
# dialect: org.hibernate.dialect.MySQL5Dialect
#
# datasource:
# url: jdbc:mysql://localhost:3306/db_example?useSSL=false
# username: springuser
# password: <PASSWORD>
# driverClassName: com.mysql.jdbc.Driver
## H2
#spring.h2.console.enabled=true
#spring.h2.console.path=/h2
## Datasource
#spring.datasource.url=jdbc:h2:file:~/test
#spring.datasource.username=sa
#spring.datasource.password=
#spring.datasource.driver-class-name=org.h2.Driver
|
source/rest-service/src/main/resources/application.yml
|
variables:
DOCKER_DRIVER: overlay2
stages:
- build
before_script:
- rm -fr build SOURCES RPMS
.centoscommon: ¢oscommon
stage: build
tags:
- moonshot
script:
- yum -y upgrade
- sh autogen.sh
- ./configure
- make dist
- mkdir -p SOURCES
- mv libradsec*.tar.gz SOURCES
- rpmbuild -bb libradsec.spec --define "_topdir `pwd`"
artifacts:
expire_in: 6 months
paths:
- RPMS
.debiancommon: &debiancommon
stage: build
tags:
- moonshot
script:
- apt-get -y update && apt-get -y dist-upgrade
- sed -i "s/DIST/$CI_JOB_NAME/g" debian/changelog
- autoreconf -fi
- ./configure
- make dist
- mv libradsec-0.0.7.tar.gz ../libradsec_0.0.7.orig.tar.gz
- debuild -us -uc
- mkdir build
- cp ../*.deb build
artifacts:
expire_in: 6 months
paths:
- build/*.deb
.debian9common: &debian9common
stage: build
tags:
- moonshot
script:
# Rebuild libevent with ssl1.0
- apt-get -y update && apt-get -y dist-upgrade
- echo "deb-src $SRC_REPO" >> /etc/apt/sources.list
- apt-get update
- useradd tmpuser
- chown tmpuser .
- su tmpuser -c "apt-get source -y libevent"
- cd libevent-$VERSION
- sed -i 's/libssl-dev/libssl1.0-dev/g' debian/control
- dch -l moonshot "Build with OpenSSL 1.0"
- debuild -us -uc
- dpkg -i ../*.deb
- cp ../*.deb ../../
- cd ..
- git clean -xdf
# libradsec build
- sed -i "s/DIST/$CI_JOB_NAME/g" debian/changelog
- autoreconf -fi
- ./configure
- make dist
- mv libradsec-0.0.7.tar.gz ../libradsec_0.0.7.orig.tar.gz
- debuild -us -uc
- mkdir build
- cp ../*.deb build
artifacts:
expire_in: 6 months
paths:
- build/*.deb
.alpinecommon: &alpinecommon
stage: build
tags:
- moonshot
script:
- adduser -D tmpuser -s /bin/sh
- adduser tmpuser abuild
- sh autogen.sh
- sed -i "s/GZIP_ENV = --best//g" Makefile.in
- ./configure
- make dist
- sudo -u tmpuser abuild checksum
- sudo -u tmpuser abuild-keygen -an
- sudo -u tmpuser abuild -r
- mkdir apks
- mv /home/tmpuser/packages/moonshot/* apks
artifacts:
expire_in: 6 months
paths:
- apks/*/*.apk
centos6:
image: registry.ci.ti.ja.net/jisc/moonshot-build/centos6:latest
<<: *centoscommon
centos7:
image: registry.ci.ti.ja.net/jisc/moonshot-build/centos7:latest
<<: *centoscommon
debian8:
image: registry.ci.ti.ja.net/jisc/moonshot-build/debian8:latest
<<: *debiancommon
raspbian8:
image: registry.ci.ti.ja.net/jisc/moonshot-build/raspbian8:latest
<<: *debiancommon
debian9:
image: registry.ci.ti.ja.net/jisc/moonshot-build/debian9:latest
variables:
SRC_REPO: http://deb.debian.org/debian stretch main
VERSION: 2.0.21-stable
<<: *debian9common
raspbian9:
image: registry.ci.ti.ja.net/jisc/moonshot-build/raspbian9:latest
variables:
SRC_REPO: http://archive.raspbian.org/raspbian/ stretch main
VERSION: 2.0.21-stable
<<: *debian9common
ubuntu14:
image: registry.ci.ti.ja.net/jisc/moonshot-build/ubuntu14:latest
<<: *debiancommon
ubuntu16:
image: registry.ci.ti.ja.net/jisc/moonshot-build/ubuntu16:latest
<<: *debiancommon
ubuntu18:
image: registry.ci.ti.ja.net/jisc/moonshot-build/ubuntu18:latest
variables:
SRC_REPO: http://archive.ubuntu.com/ubuntu/ bionic main restricted
VERSION: 2.1.8-stable
<<: *debian9common
alpine38:
image: registry.ci.ti.ja.net/jisc/moonshot-build/alpine38:latest
<<: *alpinecommon
alpine38_armhf:
image: registry.ci.ti.ja.net/jisc/moonshot-build/alpine38_arm:latest
<<: *alpinecommon
|
.gitlab-ci.yml
|
--- !ruby/hash:SeasonHash
title: GAMBA ガンバと仲間たち
watchable: true
thumbnail_url: https://cs1.anime.dmkt-sp.jp/anime_kv/img/23/19/9/23199_1_8_8b.png?1577079033000
outline: 都会の片隅で、町ネズミのガンバとマンプクは楽しく暮らしていた。ある日ガンバは、「海は世界で一番広くて大きい」と聞き、海を目指す旅に出る。二匹は港で船乗りネズミの宴に参加するが、そこへ弱りきった島ネズミの忠太が助けを求めやってきた。しかし船乗りネズミたちは、敵が白イタチの「ノロイ」だと聞いたとたん、勝ち目がないと逃げ出してしまう。絶望する忠太を見たガンバは、自分だけでも島ネズミを助けようと決心する。夜が明け、出航のとき。なんとボーボ、ヨイショ、ガクシャ、イカサマ、そしてマンプクもガンバの心意気にひかれ、船に乗り込んでいた。いよいよ冒険のはじまり。ガンバと仲間たちはどうやってノロイに立ち向かうのか!?そして彼らの運命は・・・!?
tags: !ruby/array:Hashie::Array
- !ruby/hash:TagHash
name: アクション/バトル
type: genre
- !ruby/hash:TagHash
name: SF/ファンタジー
type: genre
- !ruby/hash:TagHash
name: 梶裕貴
type: cast
- !ruby/hash:TagHash
name: 神田沙也加
type: cast
- !ruby/hash:TagHash
name: 野村萬斎
type: cast
- !ruby/hash:TagHash
name: 監督:河村友宏
type: staff
- !ruby/hash:TagHash
name: 監督:小川洋一
type: staff
- !ruby/hash:TagHash
name: 原作:斎藤惇夫
type: staff
- !ruby/hash:TagHash
name: 原作:薮内正幸
type: staff
- !ruby/hash:TagHash
name: 監督:小森啓裕
type: staff
- !ruby/hash:TagHash
name: 音楽:ベンジャミン・ウォルフィッシュ
type: staff
- !ruby/hash:TagHash
name: 製作年代:2010年代
type: other
- !ruby/hash:TagHash
name: 製作年:2015年
type: other
- !ruby/hash:TagHash
name: 劇場公開作品
type: other
episodes: !ruby/array:Hashie::Array
- !ruby/hash:EpisodeHash
episode_no: Chapter.1
title: ''
description: 都会の片隅で、町ネズミのガンバとマンプクは楽しく暮らしていた。ある日ガンバは、「海は世界で一番広くて大きい」と聞き、海を目指す旅に出る。二匹は港で船乗りネズミの宴に参加するが、そこへ弱りきった島ネズミの忠太が助けを求めやってきた。しかし船乗りネズミたちは、敵が白イタチの「ノロイ」だと聞いたとたん、勝ち目がないと逃げ出してしまう。絶望する忠太を見たガンバは、自分だけでも島ネズミを助けようと決心する。夜が明け、出航のとき。なんとボーボ、ヨイショ、ガクシャ、イカサマ、そしてマンプクもガンバの心意気にひかれ、船に乗り込んでいた。いよいよ冒険のはじまり。ガンバと仲間たちはどうやってノロイに立ち向かうのか!?そして彼らの運命は・・・!?<br><br>ガンバ:梶裕貴/潮路:神田沙也加/ノロイ:野村萬斎<br><br>原作:冒険者たち
ガンバと 15ひきの仲間(斎藤惇夫 作 薮内正幸 画 岩波書店刊)/監督:河村友宏、小森啓裕/プロデューサー:紀伊宗之、藤村哲也、早船健一郎/脚本:古沢 良太/音楽:ベンジャミン・ウォルフィッシュ/製作総指揮:島村達雄/エグゼクティブプロデューサー:アヴィ・アラッド/総監督:小川洋一/CGテクニカル・スーパーバイザー:初鹿雄太/CGキャラクター・スーパーバイザー:大橋真矢/製作会社:株式会社
白組<br><br>©SHIROGUMI INC., GAMBA<br><br>次話→so36129264
length_seconds: 1777
content_id: so36129392
default_thread_id: 1577184243
channel_id: 2632720
thumbnail_url: https://nicovideo.cdn.nimg.jp/thumbnails/36129392/36129392.66489373
- !ruby/hash:EpisodeHash
episode_no: Chapter.2
title: ''
description: 都会の片隅で、町ネズミのガンバとマンプクは楽しく暮らしていた。ある日ガンバは、「海は世界で一番広くて大きい」と聞き、海を目指す旅に出る。二匹は港で船乗りネズミの宴に参加するが、そこへ弱りきった島ネズミの忠太が助けを求めやってきた。しかし船乗りネズミたちは、敵が白イタチの「ノロイ」だと聞いたとたん、勝ち目がないと逃げ出してしまう。絶望する忠太を見たガンバは、自分だけでも島ネズミを助けようと決心する。夜が明け、出航のとき。なんとボーボ、ヨイショ、ガクシャ、イカサマ、そしてマンプクもガンバの心意気にひかれ、船に乗り込んでいた。いよいよ冒険のはじまり。ガンバと仲間たちはどうやってノロイに立ち向かうのか!?そして彼らの運命は・・・!?<br><br>ガンバ:梶裕貴/潮路:神田沙也加/ノロイ:野村萬斎<br><br>原作:冒険者たち
ガンバと 15ひきの仲間(斎藤惇夫 作 薮内正幸 画 岩波書店刊)/監督:河村友宏、小森啓裕/プロデューサー:紀伊宗之、藤村哲也、早船健一郎/脚本:古沢 良太/音楽:ベンジャミン・ウォルフィッシュ/製作総指揮:島村達雄/エグゼクティブプロデューサー:アヴィ・アラッド/総監督:小川洋一/CGテクニカル・スーパーバイザー:初鹿雄太/CGキャラクター・スーパーバイザー:大橋真矢/製作会社:株式会社
白組<br><br>©SHIROGUMI INC., GAMBA<br><br>so36129392←前話|次話→so36129148 第一話→so36129392
length_seconds: 1766
content_id: so36129264
default_thread_id: 1577183223
channel_id: 2632720
thumbnail_url: https://nicovideo.cdn.nimg.jp/thumbnails/36129264/36129264.30966100
- !ruby/hash:EpisodeHash
episode_no: Chapter.3
title: ''
description: 都会の片隅で、町ネズミのガンバとマンプクは楽しく暮らしていた。ある日ガンバは、「海は世界で一番広くて大きい」と聞き、海を目指す旅に出る。二匹は港で船乗りネズミの宴に参加するが、そこへ弱りきった島ネズミの忠太が助けを求めやってきた。しかし船乗りネズミたちは、敵が白イタチの「ノロイ」だと聞いたとたん、勝ち目がないと逃げ出してしまう。絶望する忠太を見たガンバは、自分だけでも島ネズミを助けようと決心する。夜が明け、出航のとき。なんとボーボ、ヨイショ、ガクシャ、イカサマ、そしてマンプクもガンバの心意気にひかれ、船に乗り込んでいた。いよいよ冒険のはじまり。ガンバと仲間たちはどうやってノロイに立ち向かうのか!?そして彼らの運命は・・・!?<br><br>ガンバ:梶裕貴/潮路:神田沙也加/ノロイ:野村萬斎<br><br>原作:冒険者たち
ガンバと 15ひきの仲間(斎藤惇夫 作 薮内正幸 画 岩波書店刊)/監督:河村友宏、小森啓裕/プロデューサー:紀伊宗之、藤村哲也、早船健一郎/脚本:古沢 良太/音楽:ベンジャミン・ウォルフィッシュ/製作総指揮:島村達雄/エグゼクティブプロデューサー:アヴィ・アラッド/総監督:小川洋一/CGテクニカル・スーパーバイザー:初鹿雄太/CGキャラクター・スーパーバイザー:大橋真矢/製作会社:株式会社
白組<br><br>©SHIROGUMI INC., GAMBA<br><br>so36129264←前話 第一話→so36129392
length_seconds: 2108
content_id: so36129148
default_thread_id: 1577181783
channel_id: 2632720
thumbnail_url: https://nicovideo.cdn.nimg.jp/thumbnails/36129148/36129148.79363466
cast: "[キャスト]<br>ガンバ:梶裕貴/潮路:神田沙也加/ノロイ:野村萬斎"
staff: "[スタッフ]<br>原作:冒険者たち ガンバと 15ひきの仲間(斎藤惇夫 作 薮内正幸 画 岩波書店刊)/監督:河村友宏、小森啓裕/プロデューサー:紀伊宗之、藤村哲也、早船健一郎/脚本:古沢
良太/音楽:ベンジャミン・ウォルフィッシュ/製作総指揮:島村達雄/エグゼクティブプロデューサー:アヴィ・アラッド/総監督:小川洋一/CGテクニカル・スーパーバイザー:初鹿雄太/CGキャラクター・スーパーバイザー:大橋真矢/製作会社:株式会社
白組"
produced_year: "[製作年]<br>2015年"
copyright: "©SHIROGUMI INC., GAMBA"
related_seasons: !ruby/array:Hashie::Array
- !ruby/hash:SeasonHash
title: ベビーガンバ
|
db/fixtures/seasons/season_02684.yml
|
version: "3.7"
services:
php_fpm:
build:
context: ./docker
dockerfile: ./php/Dockerfile
args:
- XDEBUG_REMOTE_HOST=${XDEBUG_REMOTE_HOST}
- XDEBUG_REMOTE_PORT=${XDEBUG_REMOTE_PORT}
- TIMEZONE=${TIMEZONE}
restart: always
container_name: symfony_demo_php_fpm
environment:
PHP_IDE_CONFIG: "serverName=docker"
volumes:
- type: bind
source: ./
target: /var/www/symfony_demo
networks:
backend:
ipv4_address: 11.10.10.5
labels:
- "traefik.enable=false"
nginx:
image: nginx
restart: always
container_name: symfony_demo_nginx
depends_on:
- php_fpm
volumes:
- type: bind
source: ./docker/nginx/default.nginx
target: /etc/nginx/conf.d/default.conf
- type: bind
source: ./
target: /var/www/symfony_demo
networks:
backend:
ipv4_address: 11.10.10.2
labels:
- "traefik.enable=true"
- "traefik.http.routers.nginx.rule=Host(`symfony_demo.local`)"
- "traefik.http.routers.nginx.entrypoints=web"
# Login:admin Password:password
portainer:
image: portainer/portainer:1.22.2
container_name: symfony_demo_portainer
restart: always
command: --admin-password '<PASSWORD>' -H unix:///var/run/docker.sock
volumes:
- type: bind
source: /var/run/docker.sock
target: /var/run/docker.sock
networks:
backend:
ipv4_address: 11.10.10.3
labels:
- "traefik.enable=true"
- "traefik.http.routers.portainer.rule=Host(`portainer.local`)"
- "traefik.http.services.portainer.loadbalancer.server.port=9000"
- "traefik.http.routers.portainer.entrypoints=web"
traefik:
image: traefik:v2.0
restart: always
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--entrypoints.web.address=:80"
container_name: symfony_demo_traefik
ports:
- "80:80"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
backend:
ipv4_address: 11.10.10.4
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.rule=Host(`traefik.local`)"
- "traefik.http.services.traefik.loadbalancer.server.port=8080"
- "traefik.http.routers.traefik.entrypoints=web"
networks:
backend:
ipam:
driver: default
config:
- subnet: "11.10.10.0/24"
|
docker-compose.yml
|
const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const Air = @import("Air.zig");
const Zir = @import("Zir.zig");
const Liveness = @import("Liveness.zig");
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const link = @import("link.zig");
const Module = @import("Module.zig");
const Compilation = @import("Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("tracy.zig").trace;
const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const RegisterManager = @import("register_manager.zig").RegisterManager;
const X8664Encoder = @import("codegen/x86_64.zig").Encoder;
pub const FnResult = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
appended: void,
fail: *ErrorMsg,
};
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
appended: void,
/// The value is available externally, `code` is unused.
externally_managed: []const u8,
fail: *ErrorMsg,
};
pub const GenerateSymbolError = error{
OutOfMemory,
/// A Decl that this symbol depends on had a semantic analysis failure.
AnalysisFail,
};
pub const DebugInfoOutput = union(enum) {
dwarf: struct {
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
},
none,
};
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
func: *Module.Fn,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult {
switch (bin_file.options.target.cpu.arch) {
.wasm32 => unreachable, // has its own code path
.wasm64 => unreachable, // has its own code path
.arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.aarch64 => return Function(.aarch64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.aarch64_be => return Function(.aarch64_be).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.aarch64_32 => return Function(.aarch64_32).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.riscv64 => return Function(.riscv64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.x86_64 => return Function(.x86_64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
}
pub fn generateSymbol(
bin_file: *link.File,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol function pointers",
.{},
),
};
},
.Array => {
// TODO populate .debug_info for the array
if (typed_value.val.castTag(.bytes)) |payload| {
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code, debug_output)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
return Result{ .appended = {} };
},
.fail => |em| return Result{ .fail = em },
}
} else {
return Result{ .externally_managed = payload.data };
}
}
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for more kinds of arrays",
.{},
),
};
},
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for slice {}",
.{typed_value.val},
),
};
},
else => {
// TODO populate .debug_info for the pointer
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
if (decl.analysis != .complete) return error.AnalysisFail;
decl.alive = true;
// TODO handle the dependency of this symbol on the decl's vaddr.
// If the decl changes vaddr, then this symbol needs to get regenerated.
const vaddr = bin_file.getDeclVAddr(decl);
const endian = bin_file.options.target.cpu.arch.endian();
switch (bin_file.options.target.cpu.arch.ptrBitWidth()) {
16 => {
try code.resize(2);
mem.writeInt(u16, code.items[0..2], @intCast(u16, vaddr), endian);
},
32 => {
try code.resize(4);
mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian);
},
64 => {
try code.resize(8);
mem.writeInt(u64, code.items[0..8], vaddr, endian);
},
else => unreachable,
}
return Result{ .appended = {} };
}
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for pointer {}",
.{typed_value.val},
),
};
},
},
.Int => {
// TODO populate .debug_info for the integer
const endian = bin_file.options.target.cpu.arch.endian();
const info = typed_value.ty.intInfo(bin_file.options.target);
if (info.bits <= 8) {
const x = @intCast(u8, typed_value.val.toUnsignedInt());
try code.append(x);
return Result{ .appended = {} };
}
if (info.bits > 64) {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for big ints ('{}')",
.{typed_value.ty},
),
};
}
switch (info.signedness) {
.unsigned => {
if (info.bits <= 16) {
const x = @intCast(u16, typed_value.val.toUnsignedInt());
mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
const x = @intCast(u32, typed_value.val.toUnsignedInt());
mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
} else {
const x = typed_value.val.toUnsignedInt();
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
}
},
.signed => {
if (info.bits <= 16) {
const x = @intCast(i16, typed_value.val.toSignedInt());
mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
const x = @intCast(i32, typed_value.val.toSignedInt());
mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
} else {
const x = typed_value.val.toSignedInt();
mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
}
},
}
return Result{ .appended = {} };
},
else => |t| {
return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for type '{s}'",
.{@tagName(t)},
),
};
},
}
}
const InnerError = error{
OutOfMemory,
CodegenFail,
};
fn Function(comptime arch: std.Target.Cpu.Arch) type {
const writeInt = switch (arch.endian()) {
.Little => mem.writeIntLittle,
.Big => mem.writeIntBig,
};
return struct {
gpa: *Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
fn_type: Type,
arg_index: usize,
src_loc: Module.SrcLoc,
stack_align: u32,
prev_di_line: u32,
prev_di_column: u32,
/// Byte offset within the source file of the ending curly.
end_di_line: u32,
end_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
/// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay"
/// of the table of mappings from instructions to `MCValue` from within the branch.
/// This way we can modify the `MCValue` for an instruction in different ways
/// within different branches. Special consideration is needed when a branch
/// joins with its parent, to make sure all instructions have the same MCValue
/// across each runtime branch upon joining.
branch_stack: *std.ArrayList(Branch),
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
/// Represents the current end stack offset. If there is no existing slot
/// to place a new stack allocation, it goes here, and then bumps `max_end_stack`.
next_stack_offset: u32 = 0,
/// Debug field, used to find bugs in the compiler.
air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
/// TODO Look into deleting this tag and using `dead` instead, since every use
/// of MCValue.none should be instead looking at the type and noticing it is 0 bits.
none,
/// Control flow will not allow this value to be observed.
unreach,
/// No more references to this value remain.
dead,
/// The value is undefined.
undef,
/// A pointer-sized integer that fits in a register.
/// If the type is a pointer, this is the pointer address in virtual address space.
immediate: u64,
/// The constant was emitted into the code, at this offset.
/// If the type is a pointer, it means the pointer address is embedded in the code.
embedded_in_code: usize,
/// The value is a pointer to a constant which was emitted into the code, at this offset.
ptr_embedded_in_code: usize,
/// The value is in a target-specific register.
register: Register,
/// The value is in memory at a hard-coded address.
/// If the type is a pointer, it means the pointer address is at this memory location.
memory: u64,
/// The value is one of the stack variables.
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
stack_offset: u32,
/// The value is a pointer to one of the stack variables (payload is stack offset).
ptr_stack_offset: u32,
/// The value is in the compare flags assuming an unsigned operation,
/// with this operator applied on top of it.
compare_flags_unsigned: math.CompareOperator,
/// The value is in the compare flags assuming a signed operation,
/// with this operator applied on top of it.
compare_flags_signed: math.CompareOperator,
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
.embedded_in_code, .memory, .stack_offset => true,
else => false,
};
}
fn isImmediate(mcv: MCValue) bool {
return switch (mcv) {
.immediate => true,
else => false,
};
}
fn isMutable(mcv: MCValue) bool {
return switch (mcv) {
.none => unreachable,
.unreach => unreachable,
.dead => unreachable,
.immediate,
.embedded_in_code,
.memory,
.compare_flags_unsigned,
.compare_flags_signed,
.ptr_stack_offset,
.ptr_embedded_in_code,
.undef,
=> false,
.register,
.stack_offset,
=> true,
};
}
};
const Branch = struct {
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
fn deinit(self: *Branch, gpa: *Allocator) void {
self.inst_table.deinit(gpa);
self.* = undefined;
}
};
const StackAllocation = struct {
inst: Air.Inst.Index,
/// TODO do we need size? should be determined by inst.ty.abiSize()
size: u32,
};
const BlockData = struct {
relocs: std.ArrayListUnmanaged(Reloc),
/// The first break instruction encounters `null` here and chooses a
/// machine code value for the block result, populating this field.
/// Following break instructions encounter that value and use it for
/// the location to store their block results.
mcv: MCValue,
};
const Reloc = union(enum) {
/// The value is an offset into the `Function` `code` from the beginning.
/// To perform the reloc, write 32-bit signed little-endian integer
/// which is a relative jump, based on the address following the reloc.
rel32: usize,
/// A branch in the ARM instruction set
arm_branch: struct {
pos: usize,
cond: @import("codegen/arm.zig").Condition,
},
};
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
tomb_bits: Liveness.Bpi,
big_tomb_bits: u32,
bit_index: usize,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const this_bit_index = bt.bit_index;
bt.bit_index += 1;
const op_int = @enumToInt(op_ref);
if (op_int < Air.Inst.Ref.typed_value_map.len) return;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
if (this_bit_index < Liveness.bpi - 1) {
const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
if (!dies) return;
} else {
const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1));
const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0;
if (!dies) return;
}
bt.function.processDeath(op_index);
}
fn finishAir(bt: *BigTomb, result: MCValue) void {
const is_used = !bt.function.liveness.isUnused(bt.inst);
if (is_used) {
log.debug("%{d} => {}", .{ bt.inst, result });
const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result);
}
bt.function.finishAirBookkeeping();
}
};
const Self = @This();
fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
module_fn: *Module.Fn,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult {
if (build_options.skip_non_native and std.Target.current.cpu.arch != arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
assert(branch_stack.items.len == 1);
branch_stack.items[0].deinit(bin_file.allocator);
branch_stack.deinit();
}
try branch_stack.append(.{});
var function = Self{
.gpa = bin_file.allocator,
.air = air,
.liveness = liveness,
.target = &bin_file.options.target,
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
.debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
.fn_type = fn_type,
.arg_index = 0,
.branch_stack = &branch_stack,
.src_loc = src_loc,
.stack_align = undefined,
.prev_di_pc = 0,
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
.end_di_line = module_fn.rbrace_line,
.end_di_column = module_fn.rbrace_column,
};
defer function.stack.deinit(bin_file.allocator);
defer function.blocks.deinit(bin_file.allocator);
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
else => |e| return e,
};
defer call_info.deinit(&function);
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.stack_align = call_info.stack_align;
function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
else => |e| return e,
};
if (function.err_msg) |em| {
return FnResult{ .fail = em };
} else {
return FnResult{ .appended = {} };
}
}
fn gen(self: *Self) !void {
switch (arch) {
.x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 11);
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// We want to subtract the aligned stack frame size from rsp here, but we don't
// yet know how big it will be, so we leave room for a 4-byte stack size.
// TODO During semantic analysis, check if there are no function calls. If there
// are none, here we can omit the part where we subtract and then add rsp.
self.code.appendSliceAssumeCapacity(&[_]u8{
0x55, // push rbp
0x48, 0x89, 0xe5, // mov rbp, rsp
0x48, 0x81, 0xec, // sub rsp, imm32 (with reloc)
});
const reloc_index = self.code.items.len;
self.code.items.len += 4;
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
const stack_end = self.max_end_stack;
if (stack_end > math.maxInt(i32))
return self.failSymbol("too much stack used in call parameters", .{});
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
mem.writeIntLittle(u32, self.code.items[reloc_index..][0..4], @intCast(u32, aligned_stack_end));
if (self.code.items.len >= math.maxInt(i32)) {
return self.failSymbol("unable to perform relocation: jump too far", .{});
}
if (self.exitlude_jump_relocs.items.len == 1) {
self.code.items.len -= 5;
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
const amt = self.code.items.len - (jmp_reloc + 4);
const s32_amt = @intCast(i32, amt);
mem.writeIntLittle(i32, self.code.items[jmp_reloc..][0..4], s32_amt);
}
// Important to be after the possible self.code.items.len -= 5 above.
try self.dbgSetEpilogueBegin();
try self.code.ensureCapacity(self.code.items.len + 9);
// add rsp, x
if (aligned_stack_end > math.maxInt(i8)) {
// example: 48 81 c4 ff ff ff 7f add rsp,0x7fffffff
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x48, 0x81, 0xc4 });
const x = @intCast(u32, aligned_stack_end);
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), x);
} else if (aligned_stack_end != 0) {
// example: 48 83 c4 7f add rsp,0x7f
const x = @intCast(u8, aligned_stack_end);
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x48, 0x83, 0xc4, x });
}
self.code.appendSliceAssumeCapacity(&[_]u8{
0x5d, // pop rbp
0xc3, // ret
});
} else {
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
try self.dbgSetEpilogueBegin();
}
},
.arm, .armeb => {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// push {fp, lr}
// mov fp, sp
// sub sp, sp, #reloc
const prologue_reloc = self.code.items.len;
try self.code.resize(prologue_reloc + 12);
writeInt(u32, self.code.items[prologue_reloc + 4 ..][0..4], Instruction.mov(.al, .fp, Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none)).toU32());
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
// Backpatch push callee saved regs
var saved_regs = Instruction.RegisterList{
.r11 = true, // fp
.r14 = true, // lr
};
inline for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg)) {
@field(saved_regs, @tagName(reg)) = true;
}
}
writeInt(u32, self.code.items[prologue_reloc..][0..4], Instruction.stmdb(.al, .sp, true, saved_regs).toU32());
// Backpatch stack offset
const stack_end = self.max_end_stack;
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
if (Instruction.Operand.fromU32(@intCast(u32, aligned_stack_end))) |op| {
writeInt(u32, self.code.items[prologue_reloc + 8 ..][0..4], Instruction.sub(.al, .sp, .sp, op).toU32());
} else {
return self.failSymbol("TODO ARM: allow larger stacks", .{});
}
try self.dbgSetEpilogueBegin();
// exitlude jumps
if (self.exitlude_jump_relocs.items.len == 1) {
// There is only one relocation. Hence,
// this relocation must be at the end of
// the code. Therefore, we can just delete
// the space initially reserved for the
// jump
self.code.items.len -= 4;
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
const amt = @intCast(i32, self.code.items.len) - @intCast(i32, jmp_reloc + 8);
if (amt == -4) {
// This return is at the end of the
// code block. We can't just delete
// the space because there may be
// other jumps we already relocated to
// the address. Instead, insert a nop
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.nop().toU32());
} else {
if (math.cast(i26, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(.al, offset).toU32());
} else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
}
// Epilogue: pop callee saved registers (swap lr with pc in saved_regs)
saved_regs.r14 = false; // lr
saved_regs.r15 = true; // pc
// mov sp, fp
// pop {fp, pc}
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .sp, Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32());
} else {
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
try self.dbgSetEpilogueBegin();
}
},
.aarch64, .aarch64_be, .aarch64_32 => {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for aarch64.
// stp fp, lr, [sp, #-16]!
// mov fp, sp
// sub sp, sp, #reloc
writeInt(u32, try self.code.addManyAsArray(4), Instruction.stp(
.x29,
.x30,
Register.sp,
Instruction.LoadStorePairOffset.pre_index(-16),
).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.x29, .xzr, 0, false).toU32());
const backpatch_reloc = self.code.items.len;
try self.code.resize(backpatch_reloc + 4);
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
// Backpatch stack offset
const stack_end = self.max_end_stack;
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
if (math.cast(u12, aligned_stack_end)) |size| {
writeInt(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.xzr, .xzr, size, false).toU32());
} else |_| {
return self.failSymbol("TODO AArch64: allow larger stacks", .{});
}
try self.dbgSetEpilogueBegin();
// exitlude jumps
if (self.exitlude_jump_relocs.items.len == 1) {
// There is only one relocation. Hence,
// this relocation must be at the end of
// the code. Therefore, we can just delete
// the space initially reserved for the
// jump
self.code.items.len -= 4;
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
const amt = @intCast(i32, self.code.items.len) - @intCast(i32, jmp_reloc + 8);
if (amt == -4) {
// This return is at the end of the
// code block. We can't just delete
// the space because there may be
// other jumps we already relocated to
// the address. Instead, insert a nop
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.nop().toU32());
} else {
if (math.cast(i28, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32());
} else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
}
// ldp fp, lr, [sp], #16
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldp(
.x29,
.x30,
Register.sp,
Instruction.LoadStorePairOffset.post_index(16),
).toU32());
// add sp, sp, #stack_size
writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.xzr, .xzr, @intCast(u12, aligned_stack_end), false).toU32());
// ret lr
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32());
} else {
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
try self.dbgSetEpilogueBegin();
}
},
else => {
try self.dbgSetPrologueEnd();
try self.genBody(self.air.getMainBody());
try self.dbgSetEpilogueBegin();
},
}
// Drop them off at the rbrace.
try self.dbgAdvancePCAndLine(self.end_di_line, self.end_di_column);
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airAdd(inst),
.addwrap => try self.airAddWrap(inst),
.sub, .ptr_sub => try self.airSub(inst),
.subwrap => try self.airSubWrap(inst),
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.div => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
.cmp_eq => try self.airCmp(inst, .eq),
.cmp_gte => try self.airCmp(inst, .gte),
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_neq => try self.airCmp(inst, .neq),
.bool_and => try self.airBoolOp(inst),
.bool_or => try self.airBoolOp(inst),
.bit_and => try self.airBitAnd(inst),
.bit_or => try self.airBitOr(inst),
.xor => try self.airXor(inst),
.shr => try self.airShr(inst),
.shl => try self.airShl(inst),
.alloc => try self.airAlloc(inst),
.arg => try self.airArg(inst),
.assembly => try self.airAsm(inst),
.bitcast => try self.airBitCast(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.breakpoint => try self.airBreakpoint(),
.call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.dbg_stmt => try self.airDbgStmt(inst),
.floatcast => try self.airFloatCast(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
.bool_to_int => try self.airBoolToInt(inst),
.is_non_null => try self.airIsNonNull(inst),
.is_non_null_ptr => try self.airIsNonNullPtr(inst),
.is_null => try self.airIsNull(inst),
.is_null_ptr => try self.airIsNullPtr(inst),
.is_non_err => try self.airIsNonErr(inst),
.is_non_err_ptr => try self.airIsNonErrPtr(inst),
.is_err => try self.airIsErr(inst),
.is_err_ptr => try self.airIsErrPtr(inst),
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
.ptrtoint => try self.airPtrToInt(inst),
.ret => try self.airRet(inst),
.store => try self.airStore(inst),
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
.struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3),
.switch_br => try self.airSwitch(inst),
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
.ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.unreach => self.finishAirBookkeeping(),
.optional_payload => try self.airOptionalPayload(inst),
.optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
.unwrap_errunion_err => try self.airUnwrapErrErr(inst),
.unwrap_errunion_payload => try self.airUnwrapErrPayload(inst),
.unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst),
.unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst),
.wrap_optional => try self.airWrapOptional(inst),
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
// zig fmt: on
}
if (std.debug.runtime_safety) {
if (self.air_bookkeeping < old_air_bookkeeping + 1) {
std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] });
}
}
}
}
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.none => {},
}
}
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
},
.none => {},
}
}
fn dbgAdvancePCAndLine(self: *Self, line: u32, column: u32) InnerError!void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data.
// It lets you emit single-byte opcodes that add different numbers to
// both the PC and the line number at the same time.
try dbg_out.dbg_line.ensureUnusedCapacity(11);
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
},
.none => {},
}
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
}
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
if (air_tags[inst] == .constant) return; // Constants are immortal.
// When editing this function, note that the logic must synchronize with `reuseOperand`.
const prev_value = self.getResolvedInstValue(inst);
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacity(inst, .dead);
switch (prev_value) {
.register => |reg| {
const canon_reg = toCanonicalReg(reg);
self.register_manager.freeReg(canon_reg);
},
else => {}, // TODO process stack allocation death
}
}
/// Called when there are no operands, and the instruction is always unreferenced.
fn finishAirBookkeeping(self: *Self) void {
if (std.debug.runtime_safety) {
self.air_bookkeeping += 1;
}
}
fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
var tomb_bits = self.liveness.getTombBits(inst);
for (operands) |op| {
const dies = @truncate(u1, tomb_bits) != 0;
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
if (is_used) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(inst, result);
switch (result) {
.register => |reg| {
// In some cases (such as bitcast), an operand
// may be the same MCValue as the result. If
// that operand died and was a register, it
// was freed by processDeath. We have to
// "re-allocate" the register.
if (self.register_manager.isRegFree(reg)) {
self.register_manager.getRegAssumeFree(reg, inst);
}
},
else => {},
}
}
self.finishAirBookkeeping();
}
fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table;
try table.ensureUnusedCapacity(self.gpa, additional_count);
}
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.value_ptr.* = .{
.off = undefined,
.relocs = .{},
};
}
try gop.value_ptr.relocs.append(self.gpa, @intCast(u32, index));
},
.none => {},
}
}
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
if (abi_align > self.stack_align)
self.stack_align = abi_align;
// TODO find a free slot instead of always appending
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align);
self.next_stack_offset = offset + abi_size;
if (self.next_stack_offset > self.max_end_stack)
self.max_end_stack = self.next_stack_offset;
try self.stack.putNoClobber(self.gpa, offset, .{
.inst = inst,
.size = abi_size,
});
return offset;
}
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
return self.allocMem(inst, abi_size, abi_align);
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
const ptr_bits = arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
if (self.register_manager.tryAllocReg(inst, &.{})) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
}
}
const stack_offset = try self.allocMem(inst, abi_size, abi_align);
return MCValue{ .stack_offset = stack_offset };
}
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv });
const reg_mcv = self.getResolvedInstValue(inst);
assert(reg == toCanonicalReg(reg_mcv.register));
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
}
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
const reg = try self.register_manager.allocReg(null, &.{});
try self.genSetReg(ty, reg, mcv);
return reg;
}
/// Allocates a new register and copies `mcv` into it.
/// `reg_owner` is the instruction that gets associated with the register in the register table.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, &.{});
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
return MCValue{ .register = reg };
}
fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
const stack_offset = try self.allocMemPtr(inst);
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const info_a = operand_ty.intInfo(self.target.*);
const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
if (info_a.signedness != info_b.signedness)
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
if (info_a.bits == info_b.bits)
return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
const result: MCValue = switch (arch) {
else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const operand = try self.resolveInst(ty_op.operand);
_ = operand;
const result: MCValue = switch (arch) {
else => return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
switch (operand) {
.dead => unreachable,
.unreach => unreachable,
.compare_flags_unsigned => |op| {
const r = MCValue{
.compare_flags_unsigned = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
};
break :result r;
},
.compare_flags_signed => |op| {
const r = MCValue{
.compare_flags_signed = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
};
break :result r;
},
else => {},
}
switch (arch) {
.x86_64 => {
break :result try self.genX8664BinMath(inst, ty_op.operand, .bool_true);
},
.arm, .armeb => {
break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not);
},
else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add),
else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSub(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub),
else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
.arm, .armeb => try self.genArmMul(inst, bin_op.lhs, bin_op.rhs),
else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and),
.x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitOr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or),
.x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airXor(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor),
else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shl),
else => return self.fail("TODO implement shl for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .shr),
else => return self.fail("TODO implement shr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> E
fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
// *(E!T) -> *T
fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.air.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true
if (optional_ty.abiSize(self.target.*) == 1)
break :result MCValue{ .immediate = 1 };
switch (arch) {
else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement slice_ptr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement slice_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement ptr_slice_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airPtrPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement ptr_ptr_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
switch (mcv) {
.register => |reg| {
// If it's in the registers table, need to associate the register with the
// new instruction.
if (reg.allocIndex()) |index| {
if (!self.register_manager.isRegFree(reg)) {
self.register_manager.registers[index] = inst;
}
}
log.debug("%{d} => {} (reused)", .{ inst, reg });
},
.stack_offset => |off| {
log.debug("%{d} => stack offset {d} (reused)", .{ inst, off });
},
else => return false,
}
// Prevent the operand deaths processing code from deallocating it.
self.liveness.clearOperandDeath(inst, op_index);
// That makes us responsible for doing the rest of the stuff that processDeath would have done.
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead);
return true;
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
switch (ptr) {
.none => unreachable,
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
.ptr_embedded_in_code => |off| {
try self.setRegOrMem(elem_ty, dst_mcv, .{ .embedded_in_code = off });
},
.embedded_in_code => {
return self.fail("TODO implement loading from MCValue.embedded_in_code", .{});
},
.register => |reg| {
switch (arch) {
.arm, .armeb => switch (dst_mcv) {
.dead => unreachable,
.undef => unreachable,
.compare_flags_signed, .compare_flags_unsigned => unreachable,
.embedded_in_code => unreachable,
.register => |dst_reg| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, dst_reg, reg, .{ .offset = Instruction.Offset.none }).toU32());
},
else => return self.fail("TODO load from register into {}", .{dst_mcv}),
},
else => return self.fail("TODO implement loading from MCValue.register for {}", .{arch}),
}
},
.memory => |addr| {
const reg = try self.register_manager.allocReg(null, &.{});
try self.genSetReg(ptr_ty, reg, .{ .memory = addr });
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
},
.stack_offset => {
return self.fail("TODO implement loading from MCValue.stack_offset", .{});
},
}
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
if (self.liveness.isUnused(inst) and !is_volatile)
break :result MCValue.dead;
const dst_mcv: MCValue = blk: {
if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand));
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airStore(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr = try self.resolveInst(bin_op.lhs);
const value = try self.resolveInst(bin_op.rhs);
const elem_ty = self.air.typeOf(bin_op.rhs);
switch (ptr) {
.none => unreachable,
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.immediate => |imm| {
try self.setRegOrMem(elem_ty, .{ .memory = imm }, value);
},
.ptr_stack_offset => |off| {
try self.genSetStack(elem_ty, off, value);
},
.ptr_embedded_in_code => |off| {
try self.setRegOrMem(elem_ty, .{ .embedded_in_code = off }, value);
},
.embedded_in_code => {
return self.fail("TODO implement storing to MCValue.embedded_in_code", .{});
},
.register => {
return self.fail("TODO implement storing to MCValue.register", .{});
},
.memory => {
return self.fail("TODO implement storing to MCValue.memory", .{});
},
.stack_offset => {
return self.fail("TODO implement storing to MCValue.stack_offset", .{});
},
}
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index);
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
return self.structFieldPtr(ty_op.operand, ty_op.ty, index);
}
fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void {
_ = self;
_ = operand;
_ = ty;
_ = index;
return self.fail("TODO implement codegen struct_field_ptr", .{});
//return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
_ = extra;
return self.fail("TODO implement codegen struct_field_val", .{});
//return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
}
fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool {
return switch (mcv) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.immediate => |imm| blk: {
if (imm > std.math.maxInt(u32)) return self.fail("TODO ARM binary arithmetic immediate larger than u32", .{});
// Load immediate into register if it doesn't fit
// in an operand
break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) == null;
},
.register => true,
.stack_offset,
.embedded_in_code,
.memory,
=> true,
};
}
fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
// In the case of bitshifts, the type of rhs is different
// from the resulting type
const ty = self.air.typeOf(op_lhs);
switch (ty.zigTypeTag()) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Bool => {
return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, 1, .unsigned);
},
.Int => {
const int_info = ty.intInfo(self.target.*);
return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, int_info.bits, int_info.signedness);
},
else => unreachable,
}
}
fn genArmBinIntOp(
self: *Self,
inst: Air.Inst.Index,
op_lhs: Air.Inst.Ref,
op_rhs: Air.Inst.Ref,
op: Air.Inst.Tag,
bits: u16,
signedness: std.builtin.Signedness,
) !MCValue {
if (bits > 32) {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
const lhs_should_be_register = switch (op) {
.shr, .shl => true,
else => try self.armOperandShouldBeRegister(lhs),
};
const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs);
const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
const can_swap_lhs_and_rhs = switch (op) {
.shr, .shl => false,
else => true,
};
// Destination must be a register
var dst_mcv: MCValue = undefined;
var lhs_mcv = lhs;
var rhs_mcv = rhs;
var swap_lhs_and_rhs = false;
// Allocate registers for operands and/or destination
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
if (reuse_lhs) {
// Allocate 0 or 1 registers
if (!rhs_is_register and rhs_should_be_register) {
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
}
dst_mcv = lhs;
} else if (reuse_rhs and can_swap_lhs_and_rhs) {
// Allocate 0 or 1 registers
if (!lhs_is_register and lhs_should_be_register) {
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
}
dst_mcv = rhs;
swap_lhs_and_rhs = true;
} else {
// Allocate 1 or 2 registers
if (lhs_should_be_register and rhs_should_be_register) {
if (lhs_is_register and rhs_is_register) {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) };
} else if (lhs_is_register) {
// Move RHS to register
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
rhs_mcv = dst_mcv;
} else if (rhs_is_register) {
// Move LHS to register
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
lhs_mcv = dst_mcv;
} else {
// Move LHS and RHS to register
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
lhs_mcv = MCValue{ .register = regs[0] };
rhs_mcv = MCValue{ .register = regs[1] };
dst_mcv = lhs_mcv;
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
}
} else if (lhs_should_be_register) {
// RHS is immediate
if (lhs_is_register) {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
} else {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) };
lhs_mcv = dst_mcv;
}
} else if (rhs_should_be_register and can_swap_lhs_and_rhs) {
// LHS is immediate
if (rhs_is_register) {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
} else {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) };
rhs_mcv = dst_mcv;
}
swap_lhs_and_rhs = true;
} else unreachable; // binary operation on two immediates
}
// Move the operands to the newly allocated registers
if (lhs_mcv == .register and !lhs_is_register) {
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
}
if (rhs_mcv == .register and !rhs_is_register) {
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
}
try self.genArmBinOpCode(
dst_mcv.register,
lhs_mcv,
rhs_mcv,
swap_lhs_and_rhs,
op,
signedness,
);
return dst_mcv;
}
fn genArmBinOpCode(
self: *Self,
dst_reg: Register,
lhs_mcv: MCValue,
rhs_mcv: MCValue,
swap_lhs_and_rhs: bool,
op: Air.Inst.Tag,
signedness: std.builtin.Signedness,
) !void {
assert(lhs_mcv == .register or rhs_mcv == .register);
const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv;
const operand = switch (op2) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.immediate => |imm| Instruction.Operand.fromU32(@intCast(u32, imm)).?,
.register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none),
.stack_offset,
.embedded_in_code,
.memory,
=> unreachable,
};
switch (op) {
.add => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.add(.al, dst_reg, op1, operand).toU32());
},
.sub => {
if (swap_lhs_and_rhs) {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, op1, operand).toU32());
} else {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.sub(.al, dst_reg, op1, operand).toU32());
}
},
.bool_and, .bit_and => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, op1, operand).toU32());
},
.bool_or, .bit_or => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, op1, operand).toU32());
},
.not, .xor => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, op1, operand).toU32());
},
.cmp_eq => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, op1, operand).toU32());
},
.shl => {
assert(!swap_lhs_and_rhs);
const shift_amout = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
writeInt(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amout).toU32());
},
.shr => {
assert(!swap_lhs_and_rhs);
const shift_amout = switch (operand) {
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
};
const shr = switch (signedness) {
.signed => Instruction.asr,
.unsigned => Instruction.lsr,
};
writeInt(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amout).toU32());
},
else => unreachable, // not a binary instruction
}
}
fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
// Destination must be a register
// LHS must be a register
// RHS must be a register
var dst_mcv: MCValue = undefined;
var lhs_mcv: MCValue = lhs;
var rhs_mcv: MCValue = rhs;
// Allocate registers for operands and/or destination
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
if (reuse_lhs) {
// Allocate 0 or 1 registers
if (!rhs_is_register) {
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
}
dst_mcv = lhs;
} else if (reuse_rhs) {
// Allocate 0 or 1 registers
if (!lhs_is_register) {
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
}
dst_mcv = rhs;
} else {
// Allocate 1 or 2 registers
if (lhs_is_register and rhs_is_register) {
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) };
} else if (lhs_is_register) {
// Move RHS to register
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
rhs_mcv = dst_mcv;
} else if (rhs_is_register) {
// Move LHS to register
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
lhs_mcv = dst_mcv;
} else {
// Move LHS and RHS to register
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
lhs_mcv = MCValue{ .register = regs[0] };
rhs_mcv = MCValue{ .register = regs[1] };
dst_mcv = lhs_mcv;
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
}
}
// Move the operands to the newly allocated registers
if (!lhs_is_register) {
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
}
if (!rhs_is_register) {
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
}
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32());
return dst_mcv;
}
/// Perform "binary" operators, excluding comparisons.
/// Currently, the following ops are supported:
/// ADD, SUB, XOR, OR, AND
fn genX8664BinMath(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
// We'll handle these ops in two steps.
// 1) Prepare an output location (register or memory)
// This location will be the location of the operand that dies (if one exists)
// or just a temporary register (if one doesn't exist)
// 2) Perform the op with the other argument
// 3) Sometimes, the output location is memory but the op doesn't support it.
// In this case, copy that location to a register, then perform the op to that register instead.
//
// TODO: make this algorithm less bad
try self.code.ensureCapacity(self.code.items.len + 8);
const lhs = try self.resolveInst(op_lhs);
const rhs = try self.resolveInst(op_rhs);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
// So, if either one of the operands dies with this instruction, we can use it
// as the result MCValue.
var dst_mcv: MCValue = undefined;
var src_mcv: MCValue = undefined;
var src_inst: Air.Inst.Ref = undefined;
if (self.reuseOperand(inst, op_lhs, 0, lhs)) {
// LHS dies; use it as the destination.
// Both operands cannot be memory.
src_inst = op_rhs;
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(inst, lhs);
src_mcv = rhs;
} else {
dst_mcv = lhs;
src_mcv = rhs;
}
} else if (self.reuseOperand(inst, op_rhs, 1, rhs)) {
// RHS dies; use it as the destination.
// Both operands cannot be memory.
src_inst = op_lhs;
if (lhs.isMemory() and rhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(inst, rhs);
src_mcv = lhs;
} else {
dst_mcv = rhs;
src_mcv = lhs;
}
} else {
if (lhs.isMemory()) {
dst_mcv = try self.copyToNewRegister(inst, lhs);
src_mcv = rhs;
src_inst = op_rhs;
} else {
dst_mcv = try self.copyToNewRegister(inst, rhs);
src_mcv = lhs;
src_inst = op_lhs;
}
}
// This instruction supports only signed 32-bit immediates at most. If the immediate
// value is larger than this, we put it in a register.
// A potential opportunity for future optimization here would be keeping track
// of the fact that the instruction is available both as an immediate
// and as a register.
switch (src_mcv) {
.immediate => |imm| {
if (imm > math.maxInt(u31)) {
src_mcv = MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.u64), src_mcv) };
}
},
else => {},
}
// Now for step 2, we perform the actual op
const inst_ty = self.air.typeOfIndex(inst);
const air_tags = self.air.instructions.items(.tag);
switch (air_tags[inst]) {
// TODO: Generate wrapping and non-wrapping versions separately
.add, .addwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 0, 0x00),
.bool_or, .bit_or => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 1, 0x08),
.bool_and, .bit_and => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 4, 0x20),
.sub, .subwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 5, 0x28),
.xor, .not => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 6, 0x30),
.mul, .mulwrap => try self.genX8664Imul(inst_ty, dst_mcv, src_mcv),
else => unreachable,
}
return dst_mcv;
}
/// Wrap over Instruction.encodeInto to translate errors
fn encodeX8664Instruction(self: *Self, inst: Instruction) !void {
inst.encodeInto(self.code) catch |err| {
if (err == error.OutOfMemory)
return error.OutOfMemory
else
return self.fail("Instruction.encodeInto failed because {s}", .{@errorName(err)});
};
}
/// This function encodes a binary operation for x86_64
/// intended for use with the following opcode ranges
/// because they share the same structure.
///
/// Thus not all binary operations can be used here
/// -- multiplication needs to be done with imul,
/// which doesn't have as convenient an interface.
///
/// "opx"-style instructions use the opcode extension field to indicate which instruction to execute:
///
/// opx = /0: add
/// opx = /1: or
/// opx = /2: adc
/// opx = /3: sbb
/// opx = /4: and
/// opx = /5: sub
/// opx = /6: xor
/// opx = /7: cmp
///
/// opcode | operand shape
/// --------+----------------------
/// 80 /opx | *r/m8*, imm8
/// 81 /opx | *r/m16/32/64*, imm16/32
/// 83 /opx | *r/m16/32/64*, imm8
///
/// "mr"-style instructions use the low bits of opcode to indicate shape of instruction:
///
/// mr = 00: add
/// mr = 08: or
/// mr = 10: adc
/// mr = 18: sbb
/// mr = 20: and
/// mr = 28: sub
/// mr = 30: xor
/// mr = 38: cmp
///
/// opcode | operand shape
/// -------+-------------------------
/// mr + 0 | *r/m8*, r8
/// mr + 1 | *r/m16/32/64*, r16/32/64
/// mr + 2 | *r8*, r/m8
/// mr + 3 | *r16/32/64*, r/m16/32/64
/// mr + 4 | *AL*, imm8
/// mr + 5 | *rAX*, imm16/32
///
/// TODO: rotates and shifts share the same structure, so we can potentially implement them
/// at a later date with very similar code.
/// They have "opx"-style instructions, but no "mr"-style instructions.
///
/// opx = /0: rol,
/// opx = /1: ror,
/// opx = /2: rcl,
/// opx = /3: rcr,
/// opx = /4: shl sal,
/// opx = /5: shr,
/// opx = /6: sal shl,
/// opx = /7: sar,
///
/// opcode | operand shape
/// --------+------------------
/// c0 /opx | *r/m8*, imm8
/// c1 /opx | *r/m16/32/64*, imm8
/// d0 /opx | *r/m8*, 1
/// d1 /opx | *r/m16/32/64*, 1
/// d2 /opx | *r/m8*, CL (for context, CL is register 1)
/// d3 /opx | *r/m16/32/64*, CL (for context, CL is register 1)
fn genX8664BinMathCode(
self: *Self,
dst_ty: Type,
dst_mcv: MCValue,
src_mcv: MCValue,
opx: u3,
mr: u8,
) !void {
switch (dst_mcv) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach, .immediate => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |dst_reg| {
switch (src_mcv) {
.none => unreachable,
.undef => try self.genSetReg(dst_ty, dst_reg, .undef),
.dead, .unreach => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |src_reg| {
// for register, register use mr + 1
// addressing mode: *r/m16/32/64*, r16/32/64
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 3);
encoder.rex(.{
.w = abi_size == 8,
.r = src_reg.isExtended(),
.b = dst_reg.isExtended(),
});
encoder.opcode_1byte(mr + 1);
encoder.modRm_direct(
src_reg.low_id(),
dst_reg.low_id(),
);
},
.immediate => |imm| {
// register, immediate use opx = 81 or 83 addressing modes:
// opx = 81: r/m16/32/64, imm16/32
// opx = 83: r/m16/32/64, imm8
const imm32 = @intCast(i32, imm); // This case must be handled before calling genX8664BinMathCode.
if (imm32 <= math.maxInt(i8)) {
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 4);
encoder.rex(.{
.w = abi_size == 8,
.b = dst_reg.isExtended(),
});
encoder.opcode_1byte(0x83);
encoder.modRm_direct(
opx,
dst_reg.low_id(),
);
encoder.imm8(@intCast(i8, imm32));
} else {
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 7);
encoder.rex(.{
.w = abi_size == 8,
.b = dst_reg.isExtended(),
});
encoder.opcode_1byte(0x81);
encoder.modRm_direct(
opx,
dst_reg.low_id(),
);
encoder.imm32(@intCast(i32, imm32));
}
},
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
.stack_offset => |off| {
// register, indirect use mr + 3
// addressing mode: *r16/32/64*, r/m16/32/64
const abi_size = dst_ty.abiSize(self.target.*);
const adj_off = off + abi_size;
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
const encoder = try X8664Encoder.init(self.code, 7);
encoder.rex(.{
.w = abi_size == 8,
.r = dst_reg.isExtended(),
});
encoder.opcode_1byte(mr + 3);
if (adj_off <= std.math.maxInt(i8)) {
encoder.modRm_indirectDisp8(
dst_reg.low_id(),
Register.ebp.low_id(),
);
encoder.disp8(-@intCast(i8, adj_off));
} else {
encoder.modRm_indirectDisp32(
dst_reg.low_id(),
Register.ebp.low_id(),
);
encoder.disp32(-@intCast(i32, adj_off));
}
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{});
},
}
},
.stack_offset => |off| {
switch (src_mcv) {
.none => unreachable,
.undef => return self.genSetStack(dst_ty, off, .undef),
.dead, .unreach => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |src_reg| {
try self.genX8664ModRMRegToStack(dst_ty, off, src_reg, mr + 0x1);
},
.immediate => |imm| {
_ = imm;
return self.fail("TODO implement x86 ADD/SUB/CMP source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{});
},
}
},
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{});
},
}
}
/// Performs integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv.
fn genX8664Imul(
self: *Self,
dst_ty: Type,
dst_mcv: MCValue,
src_mcv: MCValue,
) !void {
switch (dst_mcv) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach, .immediate => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |dst_reg| {
switch (src_mcv) {
.none => unreachable,
.undef => try self.genSetReg(dst_ty, dst_reg, .undef),
.dead, .unreach => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |src_reg| {
// register, register
//
// Use the following imul opcode
// 0F AF /r: IMUL r32/64, r/m32/64
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 4);
encoder.rex(.{
.w = abi_size == 8,
.r = dst_reg.isExtended(),
.b = src_reg.isExtended(),
});
encoder.opcode_2byte(0x0f, 0xaf);
encoder.modRm_direct(
dst_reg.low_id(),
src_reg.low_id(),
);
},
.immediate => |imm| {
// register, immediate:
// depends on size of immediate.
//
// immediate fits in i8:
// 6B /r ib: IMUL r32/64, r/m32/64, imm8
//
// immediate fits in i32:
// 69 /r id: IMUL r32/64, r/m32/64, imm32
//
// immediate is huge:
// split into 2 instructions
// 1) copy the 64 bit immediate into a tmp register
// 2) perform register,register mul
// 0F AF /r: IMUL r32/64, r/m32/64
if (math.minInt(i8) <= imm and imm <= math.maxInt(i8)) {
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 4);
encoder.rex(.{
.w = abi_size == 8,
.r = dst_reg.isExtended(),
.b = dst_reg.isExtended(),
});
encoder.opcode_1byte(0x6B);
encoder.modRm_direct(
dst_reg.low_id(),
dst_reg.low_id(),
);
encoder.imm8(@intCast(i8, imm));
} else if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) {
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 7);
encoder.rex(.{
.w = abi_size == 8,
.r = dst_reg.isExtended(),
.b = dst_reg.isExtended(),
});
encoder.opcode_1byte(0x69);
encoder.modRm_direct(
dst_reg.low_id(),
dst_reg.low_id(),
);
encoder.imm32(@intCast(i32, imm));
} else {
const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genX8664Imul(dst_ty, dst_mcv, MCValue{ .register = src_reg });
}
},
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 multiply source compare flag (signed)", .{});
},
}
},
.stack_offset => |off| {
switch (src_mcv) {
.none => unreachable,
.undef => return self.genSetStack(dst_ty, off, .undef),
.dead, .unreach => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.register => |src_reg| {
// copy dst to a register
const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
// multiply into dst_reg
// register, register
// Use the following imul opcode
// 0F AF /r: IMUL r32/64, r/m32/64
const abi_size = dst_ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 4);
encoder.rex(.{
.w = abi_size == 8,
.r = dst_reg.isExtended(),
.b = src_reg.isExtended(),
});
encoder.opcode_2byte(0x0f, 0xaf);
encoder.modRm_direct(
dst_reg.low_id(),
src_reg.low_id(),
);
// copy dst_reg back out
return self.genSetStack(dst_ty, off, MCValue{ .register = dst_reg });
},
.immediate => |imm| {
_ = imm;
return self.fail("TODO implement x86 multiply source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
return self.fail("TODO implement x86 multiply source memory", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 multiply source compare flag (signed)", .{});
},
}
},
.embedded_in_code, .memory => {
return self.fail("TODO implement x86 multiply destination memory", .{});
},
}
}
fn genX8664ModRMRegToStack(self: *Self, ty: Type, off: u32, reg: Register, opcode: u8) !void {
const abi_size = ty.abiSize(self.target.*);
const adj_off = off + abi_size;
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
const i_adj_off = -@intCast(i32, adj_off);
const encoder = try X8664Encoder.init(self.code, 7);
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
});
encoder.opcode_1byte(opcode);
if (i_adj_off < std.math.maxInt(i8)) {
// example: 48 89 55 7f mov QWORD PTR [rbp+0x7f],rdx
encoder.modRm_indirectDisp8(
reg.low_id(),
Register.ebp.low_id(),
);
encoder.disp8(@intCast(i8, i_adj_off));
} else {
// example: 48 89 95 80 00 00 00 mov QWORD PTR [rbp+0x80],rdx
encoder.modRm_indirectDisp32(
reg.low_id(),
Register.ebp.low_id(),
);
encoder.disp32(i_adj_off);
}
}
fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void {
const ty_str = self.air.instructions.items(.data)[inst].ty_str;
const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir;
const name = zir.nullTerminatedString(ty_str.str);
const name_with_null = name.ptr[0 .. name.len + 1];
const ty = self.air.getRefType(ty_str.ty);
switch (mcv) {
.register => |reg| {
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 3);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
},
.none => {},
}
},
.stack_offset => |offset| {
switch (self.debug_output) {
.dwarf => |dbg_out| {
switch (arch) {
.arm, .armeb => {
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{ty});
};
const adjusted_stack_offset = math.negateCast(offset + abi_size) catch {
return self.fail("Stack offset too large for arguments", .{});
};
try dbg_out.dbg_info.append(link.File.Elf.abbrev_parameter);
// Get length of the LEB128 stack offset
var counting_writer = std.io.countingWriter(std.io.null_writer);
leb128.writeILEB128(counting_writer.writer(), adjusted_stack_offset) catch unreachable;
// DW.AT_location, DW.FORM_exprloc
// ULEB128 dwarf expression length
try leb128.writeULEB128(dbg_out.dbg_info.writer(), counting_writer.bytes_written + 1);
try dbg_out.dbg_info.append(DW.OP_breg11);
try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset);
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len);
try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
},
else => {},
}
},
.none => {},
}
},
else => {},
}
}
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const arg_index = self.arg_index;
self.arg_index += 1;
const ty = self.air.typeOfIndex(inst);
const result = self.args[arg_index];
const mcv = switch (arch) {
// TODO support stack-only arguments on all target architectures
.arm, .armeb, .aarch64, .aarch64_32, .aarch64_be => switch (result) {
// Copy registers to the stack
.register => |reg| blk: {
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{ty});
};
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(inst, abi_size, abi_align);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
break :blk MCValue{ .stack_offset = stack_offset };
},
else => result,
},
else => result,
};
try self.genArgDbgInfo(inst, mcv);
if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping();
switch (mcv) {
.register => |reg| {
self.register_manager.getRegAssumeFree(toCanonicalReg(reg), inst);
},
else => {},
}
return self.finishAir(inst, mcv, .{ .none, .none, .none });
}
fn airBreakpoint(self: *Self) !void {
switch (arch) {
.i386, .x86_64 => {
try self.code.append(0xcc); // int3
},
.riscv64 => {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ebreak.toU32());
},
.arm, .armeb => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.bkpt(0).toU32());
},
.aarch64 => {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.brk(1).toU32());
},
else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}),
}
return self.finishAirBookkeeping();
}
fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const fn_ty = self.air.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => |off| {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
try self.genSetStack(arg_ty, off, arg_mcv);
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
else
unreachable;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
.riscv64 => {
if (info.args.len > 0) return self.fail("TODO implement fn args for {}", .{self.target.cpu.arch});
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
.arm, .armeb => {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
switch (mc_arg) {
.none => continue,
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => {
return self.fail("TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
// TODO: add Instruction.supportedOn
// function for ARM
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.blx(.al, .lr).toU32());
} else {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .lr, Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32());
}
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
.aarch64 => {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
switch (mc_arg) {
.none => continue,
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => {
return self.fail("TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
else => return self.fail("TODO implement call for {}", .{self.target.cpu.arch}),
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
// TODO prevent this macho if block to be generated for all archs
switch (arch) {
.x86_64, .aarch64 => try self.register_manager.getReg(reg, null),
else => unreachable,
}
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
return self.fail("TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const got_addr = blk: {
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
const got = seg.sections.items[macho_file.got_section_index.?];
const got_index = macho_file.got_entries_map.get(.{
.where = .local,
.where_index = func.owner_decl.link.macho.local_sym_index,
}) orelse unreachable;
break :blk got.addr + got_index * @sizeOf(u64);
};
switch (arch) {
.x86_64 => {
try self.genSetReg(Type.initTag(.u64), .rax, .{ .memory = got_addr });
// callq *%rax
try self.code.ensureCapacity(self.code.items.len + 2);
self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 });
},
.aarch64 => {
try self.genSetReg(Type.initTag(.u64), .x30, .{ .memory = got_addr });
// blr x30
writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
},
else => unreachable, // unsupported architecture on MachO
}
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const decl = func_payload.data;
const where_index = try macho_file.addExternFn(mem.spanZ(decl.name));
const offset = blk: {
switch (arch) {
.x86_64 => {
// callq
try self.code.ensureCapacity(self.code.items.len + 5);
self.code.appendSliceAssumeCapacity(&[5]u8{ 0xe8, 0x0, 0x0, 0x0, 0x0 });
break :blk @intCast(u32, self.code.items.len) - 4;
},
.aarch64 => {
const offset = @intCast(u32, self.code.items.len);
// bl
writeInt(u32, try self.code.addManyAsArray(4), Instruction.bl(0).toU32());
break :blk offset;
},
else => unreachable, // unsupported architecture on MachO
}
};
// Add relocation to the decl.
try macho_file.active_decl.?.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset,
.where = .undef,
.where_index = where_index,
.payload = .{ .branch = .{
.arch = arch,
} },
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
return self.fail("TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
const fn_got_addr = got_addr + got_index * ptr_bytes;
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr));
} else return self.fail("TODO implement calling extern fn on plan9", .{});
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
.aarch64 => {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
switch (mc_arg) {
.none => continue,
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => {
return self.fail("TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
},
else => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
}
} else unreachable;
const result: MCValue = result: {
switch (info.return_value) {
.register => |reg| {
if (Register.allocIndex(reg) == null) {
// Save function return value in a callee saved register
break :result try self.copyToNewRegister(inst, info.return_value);
}
},
else => {},
}
break :result info.return_value;
};
if (args.len <= Liveness.bpi - 2) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
std.mem.copy(Air.Inst.Ref, buf[1..], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
bt.feed(callee);
for (args) |arg| {
bt.feed(arg);
}
return bt.finishAir(result);
}
fn ret(self: *Self, mcv: MCValue) !void {
const ret_ty = self.fn_type.fnReturnType();
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
switch (arch) {
.i386 => {
try self.code.append(0xc3); // ret
},
.x86_64 => {
// TODO when implementing defer, this will need to jump to the appropriate defer expression.
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xe9; // jmp rel32
try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
},
.riscv64 => {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.zero, 0, .ra).toU32());
},
.arm, .armeb => {
// Just add space for an instruction, patch this later
try self.code.resize(self.code.items.len + 4);
try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
},
.aarch64 => {
// Just add space for an instruction, patch this later
try self.code.resize(self.code.items.len + 4);
try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
},
else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}),
}
}
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
try self.ret(operand);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
const ty = self.air.typeOf(bin_op.lhs);
assert(ty.eql(self.air.typeOf(bin_op.rhs)));
if (ty.zigTypeTag() == .ErrorSet)
return self.fail("TODO implement cmp for errors", .{});
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const result: MCValue = switch (arch) {
.x86_64 => result: {
try self.code.ensureCapacity(self.code.items.len + 8);
// There are 2 operands, destination and source.
// Either one, but not both, can be a memory operand.
// Source operand can be an immediate, 8 bits or 32 bits.
const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory()))
try self.copyToNewRegister(inst, lhs)
else
lhs;
// This instruction supports only signed 32-bit immediates at most.
const src_mcv = try self.limitImmediateType(bin_op.rhs, i32);
try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38);
break :result switch (ty.isSignedInt()) {
true => MCValue{ .compare_flags_signed = op },
false => MCValue{ .compare_flags_unsigned = op },
};
},
.arm, .armeb => result: {
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
// lhs should always be a register
const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs);
var lhs_mcv = lhs;
var rhs_mcv = rhs;
// Allocate registers
if (rhs_should_be_register) {
if (!lhs_is_register and !rhs_is_register) {
const regs = try self.register_manager.allocRegs(2, .{
Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?,
}, &.{});
lhs_mcv = MCValue{ .register = regs[0] };
rhs_mcv = MCValue{ .register = regs[1] };
} else if (!rhs_is_register) {
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) };
}
}
if (!lhs_is_register) {
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) };
}
// Move the operands to the newly allocated registers
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
if (lhs_mcv == .register and !lhs_is_register) {
try self.genSetReg(ty, lhs_mcv.register, lhs);
branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs);
}
if (rhs_mcv == .register and !rhs_is_register) {
try self.genSetReg(ty, rhs_mcv.register, rhs);
branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs);
}
// The destination register is not present in the cmp instruction
// The signedness of the integer does not matter for the cmp instruction
try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq, undefined);
break :result switch (ty.isSignedInt()) {
true => MCValue{ .compare_flags_signed = op },
false => MCValue{ .compare_flags_unsigned = op },
};
},
else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column);
return self.finishAirBookkeeping();
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
const liveness_condbr = self.liveness.getCondBr(inst);
const reloc: Reloc = switch (arch) {
.i386, .x86_64 => reloc: {
try self.code.ensureCapacity(self.code.items.len + 6);
const opcode: u8 = switch (cond) {
.compare_flags_signed => |cmp_op| blk: {
// Here we map to the opposite opcode because the jump is to the false branch.
const opcode: u8 = switch (cmp_op) {
.gte => 0x8c,
.gt => 0x8e,
.neq => 0x84,
.lt => 0x8d,
.lte => 0x8f,
.eq => 0x85,
};
break :blk opcode;
},
.compare_flags_unsigned => |cmp_op| blk: {
// Here we map to the opposite opcode because the jump is to the false branch.
const opcode: u8 = switch (cmp_op) {
.gte => 0x82,
.gt => 0x86,
.neq => 0x84,
.lt => 0x83,
.lte => 0x87,
.eq => 0x85,
};
break :blk opcode;
},
.register => |reg| blk: {
// test reg, 1
// TODO detect al, ax, eax
const encoder = try X8664Encoder.init(self.code, 4);
encoder.rex(.{
// TODO audit this codegen: we force w = true here to make
// the value affect the big register
.w = true,
.b = reg.isExtended(),
});
encoder.opcode_1byte(0xf6);
encoder.modRm_direct(
0,
reg.low_id(),
);
encoder.disp8(1);
break :blk 0x84;
},
else => return self.fail("TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }),
};
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode });
const reloc = Reloc{ .rel32 = self.code.items.len };
self.code.items.len += 4;
break :reloc reloc;
},
.arm, .armeb => reloc: {
const condition: Condition = switch (cond) {
.compare_flags_signed => |cmp_op| blk: {
// Here we map to the opposite condition because the jump is to the false branch.
const condition = Condition.fromCompareOperatorSigned(cmp_op);
break :blk condition.negate();
},
.compare_flags_unsigned => |cmp_op| blk: {
// Here we map to the opposite condition because the jump is to the false branch.
const condition = Condition.fromCompareOperatorUnsigned(cmp_op);
break :blk condition.negate();
},
.register => |reg| blk: {
// cmp reg, 1
// bne ...
const op = Instruction.Operand.imm(1, 0);
writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, reg, op).toU32());
break :blk .ne;
},
else => return self.fail("TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }),
};
const reloc = Reloc{
.arm_branch = .{
.pos = self.code.items.len,
.cond = condition,
},
};
try self.code.resize(self.code.items.len + 4);
break :reloc reloc;
},
else => return self.fail("TODO implement condbr {}", .{self.target.cpu.arch}),
};
// Capture the state of register and stack allocation state so that we can revert to it.
const parent_next_stack_offset = self.next_stack_offset;
const parent_free_registers = self.register_manager.free_registers;
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
const parent_registers = self.register_manager.registers;
try self.branch_stack.append(.{});
try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
for (liveness_condbr.then_deaths) |operand| {
self.processDeath(operand);
}
try self.genBody(then_body);
// Revert to the previous register and stack allocation state.
var saved_then_branch = self.branch_stack.pop();
defer saved_then_branch.deinit(self.gpa);
self.register_manager.registers = parent_registers;
self.stack.deinit(self.gpa);
self.stack = parent_stack;
parent_stack = .{};
self.next_stack_offset = parent_next_stack_offset;
self.register_manager.free_registers = parent_free_registers;
try self.performReloc(reloc);
const else_branch = self.branch_stack.addOneAssumeCapacity();
else_branch.* = .{};
try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
for (liveness_condbr.else_deaths) |operand| {
self.processDeath(operand);
}
try self.genBody(else_body);
// At this point, each branch will possibly have conflicting values for where
// each instruction is stored. They agree, however, on which instructions are alive/dead.
// We use the first ("then") branch as canonical, and here emit
// instructions into the second ("else") branch to make it conform.
// We continue respect the data structure semantic guarantees of the else_branch so
// that we can use all the code emitting abstractions. This is why at the bottom we
// assert that parent_branch.free_registers equals the saved_then_branch.free_registers
// rather than assigning it.
const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2];
try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count());
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
for (else_keys) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value);
if (else_value == .dead) {
assert(then_entry.value == .dead);
continue;
}
break :blk then_entry.value;
} else blk: {
if (else_value == .dead)
continue;
// The instruction is only overridden in the else branch.
var i: usize = self.branch_stack.items.len - 2;
while (true) {
i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead?
if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| {
assert(mcv != .dead);
break :blk mcv;
}
}
};
log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv });
// TODO make sure the destination stack offset / register does not already have something
// going on there.
try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value);
// TODO track the new register / stack allocation
}
try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count());
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
for (then_keys) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
parent_branch.inst_table.putAssumeCapacity(then_key, then_value);
if (then_value == .dead)
continue;
const parent_mcv = blk: {
var i: usize = self.branch_stack.items.len - 2;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| {
assert(mcv != .dead);
break :blk mcv;
}
}
};
log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value });
// TODO make sure the destination stack offset / register does not already have something
// going on there.
try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value);
// TODO track the new register / stack allocation
}
self.branch_stack.pop().deinit(self.gpa);
return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none });
}
fn isNull(self: *Self, operand: MCValue) !MCValue {
_ = operand;
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call isNonNull and invert the result.
switch (arch) {
else => return self.fail("TODO call isNonNull and invert the result", .{}),
}
}
fn isNonNull(self: *Self, operand: MCValue) !MCValue {
_ = operand;
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call isNull and invert the result.
switch (arch) {
else => return self.fail("TODO call isNull and invert the result", .{}),
}
}
fn isErr(self: *Self, operand: MCValue) !MCValue {
_ = operand;
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call isNonNull and invert the result.
switch (arch) {
else => return self.fail("TODO call isNonErr and invert the result", .{}),
}
}
fn isNonErr(self: *Self, operand: MCValue) !MCValue {
_ = operand;
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call isNull and invert the result.
switch (arch) {
else => return self.fail("TODO call isErr and invert the result", .{}),
}
}
fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
break :result try self.isNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk operand_ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
break :result try self.isNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
break :result try self.isNonNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk operand_ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
break :result try self.isNonNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
break :result try self.isErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk operand_ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
break :result try self.isErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
break :result try self.isNonErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const operand: MCValue = blk: {
if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk operand_ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
break :result try self.isNonErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
const start_index = self.code.items.len;
try self.genBody(body);
try self.jump(start_index);
return self.finishAirBookkeeping();
}
/// Send control flow to the `index` of `self.code`.
fn jump(self: *Self, index: usize) !void {
switch (arch) {
.i386, .x86_64 => {
try self.code.ensureCapacity(self.code.items.len + 5);
if (math.cast(i8, @intCast(i32, index) - (@intCast(i32, self.code.items.len + 2)))) |delta| {
self.code.appendAssumeCapacity(0xeb); // jmp rel8
self.code.appendAssumeCapacity(@bitCast(u8, delta));
} else |_| {
const delta = @intCast(i32, index) - (@intCast(i32, self.code.items.len + 5));
self.code.appendAssumeCapacity(0xe9); // jmp rel32
mem.writeIntLittle(i32, self.code.addManyAsArrayAssumeCapacity(4), delta);
}
},
.arm, .armeb => {
if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32());
} else |_| {
return self.fail("TODO: enable larger branch offset", .{});
}
},
.aarch64, .aarch64_be, .aarch64_32 => {
if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32());
} else |_| {
return self.fail("TODO: enable larger branch offset", .{});
}
},
else => return self.fail("TODO implement jump for {}", .{self.target.cpu.arch}),
}
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
try self.blocks.putNoClobber(self.gpa, inst, .{
// A block is a setup to be able to jump to the end.
.relocs = .{},
// It also acts as a receptical for break operands.
// Here we use `MCValue.none` to represent a null value so that the first
// break instruction will choose a MCValue for the block result and overwrite
// this field. Following break instructions will use that MCValue to put their
// block results.
.mcv = MCValue{ .none = {} },
});
const block_data = self.blocks.getPtr(inst).?;
defer block_data.relocs.deinit(self.gpa);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
for (block_data.relocs.items) |reloc| try self.performReloc(reloc);
const result = @bitCast(MCValue, block_data.mcv);
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const condition = pl_op.operand;
switch (arch) {
else => return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}),
}
return self.finishAir(inst, .dead, .{ condition, .none, .none });
}
fn performReloc(self: *Self, reloc: Reloc) !void {
switch (reloc) {
.rel32 => |pos| {
const amt = self.code.items.len - (pos + 4);
// Here it would be tempting to implement testing for amt == 0 and then elide the
// jump. However, that will cause a problem because other jumps may assume that they
// can jump to this code. Or maybe I didn't understand something when I was debugging.
// It could be worth another look. Anyway, that's why that isn't done here. Probably the
// best place to elide jumps will be in semantic analysis, by inlining blocks that only
// only have 1 break instruction.
const s32_amt = math.cast(i32, amt) catch
return self.fail("unable to perform relocation: jump too far", .{});
mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt);
},
.arm_branch => |info| {
switch (arch) {
.arm, .armeb => {
const amt = @intCast(i32, self.code.items.len) - @intCast(i32, info.pos + 8);
if (math.cast(i26, amt)) |delta| {
writeInt(u32, self.code.items[info.pos..][0..4], Instruction.b(info.cond, delta).toU32());
} else |_| {
return self.fail("TODO: enable larger branch offset", .{});
}
},
else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch
}
},
}
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
const branch = self.air.instructions.items(.data)[inst].br;
try self.br(branch.block_inst, branch.operand);
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const air_tags = self.air.instructions.items(.tag);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
.x86_64 => switch (air_tags[inst]) {
// lhs AND rhs
.bool_and => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
// lhs OR rhs
.bool_or => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs),
else => unreachable, // Not a boolean operation
},
.arm, .armeb => switch (air_tags[inst]) {
.bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and),
.bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or),
else => unreachable, // Not a boolean operation
},
else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
block_data.mcv = operand_mcv;
} else {
try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv);
}
}
return self.brVoid(block);
}
fn brVoid(self: *Self, block: Air.Inst.Index) !void {
const block_data = self.blocks.getPtr(block).?;
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1);
switch (arch) {
.i386, .x86_64 => {
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xe9; // jmp rel32
// Leave the jump offset undefined
block_data.relocs.appendAssumeCapacity(.{ .rel32 = self.code.items.len - 4 });
},
.arm, .armeb => {
try self.code.resize(self.code.items.len + 4);
block_data.relocs.appendAssumeCapacity(.{
.arm_branch = .{
.pos = self.code.items.len - 4,
.cond = .al,
},
});
},
else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}),
}
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const air_datas = self.air.instructions.items(.data);
const air_extra = self.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload);
const zir = self.mod_fn.owner_decl.namespace.file_scope.zir;
const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended;
const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
const outputs_len = @truncate(u5, extended.small);
const args_len = @truncate(u5, extended.small >> 5);
const clobbers_len = @truncate(u5, extended.small >> 10);
_ = clobbers_len; // TODO honor these
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end..][0..outputs_len]);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end + outputs.len ..][0..args_len]);
if (outputs_len > 1) {
return self.fail("TODO implement codegen for asm with more than 1 output", .{});
}
var extra_i: usize = zir_extra.end;
const output_constraint: ?[]const u8 = out: {
var i: usize = 0;
while (i < outputs_len) : (i += 1) {
const output = zir.extraData(Zir.Inst.Asm.Output, extra_i);
extra_i = output.end;
break :out zir.nullTerminatedString(output.data.constraint);
}
break :out null;
};
const dead = !is_volatile and self.liveness.isUnused(inst);
const result: MCValue = if (dead) .dead else switch (arch) {
.arm, .armeb => result: {
for (args) |arg| {
const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const constraint = zir.nullTerminatedString(input.data.constraint);
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
const reg_name = constraint[1 .. constraint.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv);
}
if (mem.eql(u8, asm_source, "svc #0")) {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32());
} else {
return self.fail("TODO implement support for more arm assembly instructions", .{});
}
if (output_constraint) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail("unrecognized asm output constraint: '{s}'", .{output});
}
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
break :result MCValue{ .register = reg };
} else {
break :result MCValue{ .none = {} };
}
},
.aarch64 => result: {
for (args) |arg| {
const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const constraint = zir.nullTerminatedString(input.data.constraint);
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
const reg_name = constraint[1 .. constraint.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv);
}
if (mem.eql(u8, asm_source, "svc #0")) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32());
} else if (mem.eql(u8, asm_source, "svc #0x80")) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32());
} else {
return self.fail("TODO implement support for more aarch64 assembly instructions", .{});
}
if (output_constraint) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail("unrecognized asm output constraint: '{s}'", .{output});
}
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
break :result MCValue{ .register = reg };
} else {
break :result MCValue{ .none = {} };
}
},
.riscv64 => result: {
for (args) |arg| {
const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const constraint = zir.nullTerminatedString(input.data.constraint);
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
const reg_name = constraint[1 .. constraint.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv);
}
if (mem.eql(u8, asm_source, "ecall")) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32());
} else {
return self.fail("TODO implement support for more riscv64 assembly instructions", .{});
}
if (output_constraint) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail("unrecognized asm output constraint: '{s}'", .{output});
}
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
break :result MCValue{ .register = reg };
} else {
break :result MCValue{ .none = {} };
}
},
.x86_64, .i386 => result: {
for (args) |arg| {
const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
const constraint = zir.nullTerminatedString(input.data.constraint);
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
}
const reg_name = constraint[1 .. constraint.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv);
}
{
var iter = std.mem.tokenize(u8, asm_source, "\n\r");
while (iter.next()) |ins| {
if (mem.eql(u8, ins, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
} else if (mem.indexOf(u8, ins, "push")) |_| {
const arg = ins[4..];
if (mem.indexOf(u8, arg, "$")) |l| {
const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch return self.fail("TODO implement more inline asm int parsing", .{});
try self.code.appendSlice(&.{ 0x6a, n });
} else if (mem.indexOf(u8, arg, "%%")) |l| {
const reg_name = ins[4 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const low_id: u8 = reg.low_id();
if (reg.isExtended()) {
try self.code.appendSlice(&.{ 0x41, 0b1010000 | low_id });
} else {
try self.code.append(0b1010000 | low_id);
}
} else return self.fail("TODO more push operands", .{});
} else if (mem.indexOf(u8, ins, "pop")) |_| {
const arg = ins[3..];
if (mem.indexOf(u8, arg, "%%")) |l| {
const reg_name = ins[3 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const low_id: u8 = reg.low_id();
if (reg.isExtended()) {
try self.code.appendSlice(&.{ 0x41, 0b1011000 | low_id });
} else {
try self.code.append(0b1011000 | low_id);
}
} else return self.fail("TODO more pop operands", .{});
} else {
return self.fail("TODO implement support for more x86 assembly instructions", .{});
}
}
}
if (output_constraint) |output| {
if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
return self.fail("unrecognized asm output constraint: '{s}'", .{output});
}
const reg_name = output[2 .. output.len - 1];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
break :result MCValue{ .register = reg };
} else {
break :result MCValue{ .none = {} };
}
},
else => return self.fail("TODO implement inline asm support for more architectures", .{}),
};
if (outputs.len + args.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
std.mem.copy(Air.Inst.Ref, &buf, outputs);
std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + args.len);
for (outputs) |output| {
bt.feed(output);
}
for (args) |arg| {
bt.feed(arg);
}
return bt.finishAir(result);
}
fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
try self.ensureProcessDeathCapacity(operand_count + 1);
return BigTomb{
.function = self,
.inst = inst,
.tomb_bits = self.liveness.getTombBits(inst),
.big_tomb_bits = self.liveness.special.get(inst) orelse 0,
.bit_index = 0,
};
}
/// Sets the value without any modifications to register allocation metadata or stack allocation metadata.
fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
switch (loc) {
.none => return,
.register => |reg| return self.genSetReg(ty, reg, val),
.stack_offset => |off| return self.genSetStack(ty, off, val),
.memory => {
return self.fail("TODO implement setRegOrMem for memory", .{});
},
else => unreachable,
}
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
switch (arch) {
.arm, .armeb => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
switch (ty.abiSize(self.target.*)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
}
},
.compare_flags_unsigned,
.compare_flags_signed,
.immediate,
=> {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
_ = code_offset;
return self.fail("TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + abi_size;
switch (abi_size) {
1, 4 => {
const offset = if (math.cast(u12, adj_off)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
} else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
const str = switch (abi_size) {
1 => Instruction.strb,
4 => Instruction.str,
else => unreachable,
};
writeInt(u32, try self.code.addManyAsArray(4), str(.al, reg, .fp, .{
.offset = offset,
.positive = false,
}).toU32());
},
2 => {
const offset = if (adj_off <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
writeInt(u32, try self.code.addManyAsArray(4), Instruction.strh(.al, reg, .fp, .{
.offset = offset,
.positive = false,
}).toU32());
},
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
}
},
.memory => |vaddr| {
_ = vaddr;
return self.fail("TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
if (stack_offset == off)
return; // Copy stack variable to itself; nothing to do.
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
},
.x86_64 => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
switch (ty.abiSize(self.target.*)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
}
},
.compare_flags_unsigned => |op| {
_ = op;
return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
_ = op;
return self.fail("TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => |x_big| {
const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + abi_size;
if (adj_off > 128) {
return self.fail("TODO implement set stack variable with large stack offset", .{});
}
try self.code.ensureCapacity(self.code.items.len + 8);
switch (abi_size) {
1 => {
return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{});
},
2 => {
return self.fail("TODO implement set abi_size=2 stack variable with immediate", .{});
},
4 => {
const x = @intCast(u32, x_big);
// We have a positive stack offset value but we want a twos complement negative
// offset from rbp, which is at the top of the stack frame.
const negative_offset = @intCast(i8, -@intCast(i32, adj_off));
const twos_comp = @bitCast(u8, negative_offset);
// mov DWORD PTR [rbp+offset], immediate
self.code.appendSliceAssumeCapacity(&[_]u8{ 0xc7, 0x45, twos_comp });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), x);
},
8 => {
// We have a positive stack offset value but we want a twos complement negative
// offset from rbp, which is at the top of the stack frame.
const negative_offset = @intCast(i8, -@intCast(i32, adj_off));
const twos_comp = @bitCast(u8, negative_offset);
// 64 bit write to memory would take two mov's anyways so we
// insted just use two 32 bit writes to avoid register allocation
try self.code.ensureCapacity(self.code.items.len + 14);
var buf: [8]u8 = undefined;
mem.writeIntLittle(u64, &buf, x_big);
// mov DWORD PTR [rbp+offset+4], immediate
self.code.appendSliceAssumeCapacity(&[_]u8{ 0xc7, 0x45, twos_comp + 4 });
self.code.appendSliceAssumeCapacity(buf[4..8]);
// mov DWORD PTR [rbp+offset], immediate
self.code.appendSliceAssumeCapacity(&[_]u8{ 0xc7, 0x45, twos_comp });
self.code.appendSliceAssumeCapacity(buf[0..4]);
},
else => {
return self.fail("TODO implement set abi_size=large stack variable with immediate", .{});
},
}
},
.embedded_in_code => {
// TODO this and `.stack_offset` below need to get improved to support types greater than
// register size, and do general memcpy
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
.register => |reg| {
try self.genX8664ModRMRegToStack(ty, stack_offset, reg, 0x89);
},
.memory => |vaddr| {
_ = vaddr;
return self.fail("TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
// TODO this and `.embedded_in_code` above need to get improved to support types greater than
// register size, and do general memcpy
if (stack_offset == off)
return; // Copy stack variable to itself; nothing to do.
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
},
.aarch64, .aarch64_be, .aarch64_32 => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
switch (ty.abiSize(self.target.*)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
}
},
.compare_flags_unsigned,
.compare_flags_signed,
.immediate,
=> {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
_ = code_offset;
return self.fail("TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + abi_size;
switch (abi_size) {
1, 2, 4, 8 => {
const offset = if (math.cast(i9, adj_off)) |imm|
Instruction.LoadStoreOffset.imm_post_index(-imm)
else |_|
Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off }));
const rn: Register = switch (arch) {
.aarch64, .aarch64_be => .x29,
.aarch64_32 => .w29,
else => unreachable,
};
const str = switch (abi_size) {
1 => Instruction.strb,
2 => Instruction.strh,
4, 8 => Instruction.str,
else => unreachable, // unexpected abi size
};
writeInt(u32, try self.code.addManyAsArray(4), str(reg, rn, .{
.offset = offset,
}).toU32());
},
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
}
},
.memory => |vaddr| {
_ = vaddr;
return self.fail("TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
if (stack_offset == off)
return; // Copy stack variable to itself; nothing to do.
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
},
},
else => return self.fail("TODO implement getSetStack for {}", .{self.target.cpu.arch}),
}
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (arch) {
.arm, .armeb => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// Write the debug undefined value.
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa });
},
.compare_flags_unsigned,
.compare_flags_signed,
=> |op| {
const condition = switch (mcv) {
.compare_flags_unsigned => Condition.fromCompareOperatorUnsigned(op),
.compare_flags_signed => Condition.fromCompareOperatorSigned(op),
else => unreachable,
};
// mov reg, 0
// moveq reg, 1
const zero = Instruction.Operand.imm(0, 0);
const one = Instruction.Operand.imm(1, 0);
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, zero).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(condition, reg, one).toU32());
},
.immediate => |x| {
if (x > math.maxInt(u32)) return self.fail("ARM registers are 32-bit wide", .{});
if (Instruction.Operand.fromU32(@intCast(u32, x))) |op| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, op).toU32());
} else if (Instruction.Operand.fromU32(~@intCast(u32, x))) |op| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mvn(.al, reg, op).toU32());
} else if (x <= math.maxInt(u16)) {
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v7)) {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.movw(.al, reg, @intCast(u16, x)).toU32());
} else {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
}
} else {
// TODO write constant to code and load
// relative to pc
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v7)) {
// immediate: 0xaaaabbbb
// movw reg, #0xbbbb
// movt reg, #0xaaaa
writeInt(u32, try self.code.addManyAsArray(4), Instruction.movw(.al, reg, @truncate(u16, x)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.movt(.al, reg, @truncate(u16, x >> 16)).toU32());
} else {
// immediate: 0xaabbccdd
// mov reg, #0xaa
// orr reg, reg, #0xbb, 24
// orr reg, reg, #0xcc, 16
// orr reg, reg, #0xdd, 8
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 16), 8)).toU32());
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 24), 4)).toU32());
}
}
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;
// mov reg, src_reg
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none)).toU32());
},
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32());
},
.stack_offset => |unadjusted_off| {
// TODO: maybe addressing from sp instead of fp
const abi_size = ty.abiSize(self.target.*);
const adj_off = unadjusted_off + abi_size;
switch (abi_size) {
1, 4 => {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0);
const ldr = switch (abi_size) {
1 => Instruction.ldrb,
4 => Instruction.ldr,
else => unreachable,
};
writeInt(u32, try self.code.addManyAsArray(4), ldr(.al, reg, .fp, .{
.offset = offset,
.positive = false,
}).toU32());
},
2 => {
const offset = if (adj_off <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrh(.al, reg, .fp, .{
.offset = offset,
.positive = false,
}).toU32());
},
else => return self.fail("TODO a type of size {} is not allowed in a register", .{abi_size}),
}
},
else => return self.fail("TODO implement getSetReg for arm {}", .{mcv}),
},
.aarch64 => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// Write the debug undefined value.
switch (reg.size()) {
32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }),
64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => unreachable, // unexpected register size
}
},
.immediate => |x| {
if (x <= math.maxInt(u16)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @intCast(u16, x), 0).toU32());
} else if (x <= math.maxInt(u32)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 16), 16).toU32());
} else if (x <= math.maxInt(u32)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 16), 16).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 32), 32).toU32());
} else {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 16), 16).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 32), 32).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 48), 48).toU32());
}
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;
// mov reg, src_reg
writeInt(u32, try self.code.addManyAsArray(4), Instruction.orr(
reg,
.xzr,
src_reg,
Instruction.Shift.none,
).toU32());
},
.memory => |addr| {
if (self.bin_file.options.pie) {
// PC-relative displacement to the entry in the GOT table.
// adrp
const offset = @intCast(u32, self.code.items.len);
mem.writeIntLittle(
u32,
try self.code.addManyAsArray(4),
Instruction.adrp(reg, 0).toU32(),
);
// ldr reg, reg, offset
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{
.register = .{
.rn = reg,
.offset = Instruction.LoadStoreOffset.imm(0),
},
}).toU32());
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
// TODO this is super awkward. We are reversing the address of the GOT entry here.
// We should probably have it cached or move the reloc adding somewhere else.
const got_addr = blk: {
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
const got = seg.sections.items[macho_file.got_section_index.?];
break :blk got.addr;
};
const where_index = blk: for (macho_file.got_entries.items) |key, id| {
if (got_addr + id * @sizeOf(u64) == addr) break :blk key.where_index;
} else unreachable;
const decl = macho_file.active_decl.?;
// Page reloc for adrp instruction.
try decl.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset,
.where = .local,
.where_index = where_index,
.payload = .{ .page = .{ .kind = .got } },
});
// Pageoff reloc for adrp instruction.
try decl.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset + 4,
.where = .local,
.where_index = where_index,
.payload = .{ .page_off = .{ .kind = .got } },
});
} else {
return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{});
}
} else {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(Type.initTag(.usize), reg, .{ .immediate = addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32());
}
},
.stack_offset => |unadjusted_off| {
// TODO: maybe addressing from sp instead of fp
const abi_size = ty.abiSize(self.target.*);
const adj_off = unadjusted_off + abi_size;
const rn: Register = switch (arch) {
.aarch64, .aarch64_be => .x29,
.aarch64_32 => .w29,
else => unreachable,
};
const offset = if (math.cast(i9, adj_off)) |imm|
Instruction.LoadStoreOffset.imm_post_index(-imm)
else |_|
Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off }));
switch (abi_size) {
1, 2 => {
const ldr = switch (abi_size) {
1 => Instruction.ldrb,
2 => Instruction.ldrh,
else => unreachable, // unexpected abi size
};
writeInt(u32, try self.code.addManyAsArray(4), ldr(reg, rn, .{
.offset = offset,
}).toU32());
},
4, 8 => {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{
.rn = rn,
.offset = offset,
} }).toU32());
},
else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}),
}
},
else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}),
},
.riscv64 => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// Write the debug undefined value.
return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa });
},
.immediate => |unsigned_x| {
const x = @bitCast(i64, unsigned_x);
if (math.minInt(i12) <= x and x <= math.maxInt(i12)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.addi(reg, .zero, @truncate(i12, x)).toU32());
return;
}
if (math.minInt(i32) <= x and x <= math.maxInt(i32)) {
const lo12 = @truncate(i12, x);
const carry: i32 = if (lo12 < 0) 1 else 0;
const hi20 = @truncate(i20, (x >> 12) +% carry);
// TODO: add test case for 32-bit immediate
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.lui(reg, hi20).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.addi(reg, reg, lo12).toU32());
return;
}
// li rd, immediate
// "Myriad sequences"
return self.fail("TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf
},
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ld(reg, 0, reg).toU32());
// LOAD imm=[i12 offset = 0], rs1 =
// return self.fail("TODO implement genSetReg memory for riscv64");
},
else => return self.fail("TODO implement getSetReg for riscv64 {}", .{mcv}),
},
.x86_64 => switch (mcv) {
.dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
return; // The already existing value will do just fine.
// Write the debug undefined value.
switch (reg.size()) {
8 => return self.genSetReg(ty, reg, .{ .immediate = 0xaa }),
16 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaa }),
32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }),
64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => unreachable,
}
},
.compare_flags_unsigned => |op| {
const encoder = try X8664Encoder.init(self.code, 7);
// TODO audit this codegen: we force w = true here to make
// the value affect the big register
encoder.rex(.{
.w = true,
.b = reg.isExtended(),
});
encoder.opcode_2byte(0x0f, switch (op) {
.gte => 0x93,
.gt => 0x97,
.neq => 0x95,
.lt => 0x92,
.lte => 0x96,
.eq => 0x94,
});
encoder.modRm_direct(
0,
reg.low_id(),
);
},
.compare_flags_signed => |op| {
_ = op;
return self.fail("TODO set register with compare flags value (signed)", .{});
},
.immediate => |x| {
// 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
// register is the fastest way to zero a register.
if (x == 0) {
// The encoding for `xor r32, r32` is `0x31 /r`.
const encoder = try X8664Encoder.init(self.code, 3);
// If we're accessing e.g. r8d, we need to use a REX prefix before the actual operation. Since
// this is a 32-bit operation, the W flag is set to zero. X is also zero, as we're not using a SIB.
// Both R and B are set, as we're extending, in effect, the register bits *and* the operand.
encoder.rex(.{
.r = reg.isExtended(),
.b = reg.isExtended(),
});
encoder.opcode_1byte(0x31);
// Section 3.1.1.1 of the Intel x64 Manual states that "/r indicates that the
// ModR/M byte of the instruction contains a register operand and an r/m operand."
encoder.modRm_direct(
reg.low_id(),
reg.low_id(),
);
return;
}
if (x <= math.maxInt(i32)) {
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
//
// The encoding for `mov IMM32 -> REG` is (0xB8 + R) IMM.
const encoder = try X8664Encoder.init(self.code, 6);
// Just as with XORing, we need a REX prefix. This time though, we only
// need the B bit set, as we're extending the opcode's register field,
// and there is no Mod R/M byte.
encoder.rex(.{
.b = reg.isExtended(),
});
encoder.opcode_withReg(0xB8, reg.low_id());
// no ModR/M byte
// IMM
encoder.imm32(@intCast(i32, x));
return;
}
// Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls
// this `movabs`, though this is officially just a different variant of the plain `mov`
// instruction.
//
// This encoding is, in fact, the *same* as the one used for 32-bit loads. The only
// difference is that we set REX.W before the instruction, which extends the load to
// 64-bit and uses the full bit-width of the register.
{
const encoder = try X8664Encoder.init(self.code, 10);
encoder.rex(.{
.w = true,
.b = reg.isExtended(),
});
encoder.opcode_withReg(0xB8, reg.low_id());
encoder.imm64(x);
}
},
.embedded_in_code => |code_offset| {
// We need the offset from RIP in a signed i32 twos complement.
// The instruction is 7 bytes long and RIP points to the next instruction.
// 64-bit LEA is encoded as REX.W 8D /r.
const rip = self.code.items.len + 7;
const big_offset = @intCast(i64, code_offset) - @intCast(i64, rip);
const offset = @intCast(i32, big_offset);
const encoder = try X8664Encoder.init(self.code, 7);
// byte 1, always exists because w = true
encoder.rex(.{
.w = true,
.r = reg.isExtended(),
});
// byte 2
encoder.opcode_1byte(0x8D);
// byte 3
encoder.modRm_RIPDisp32(reg.low_id());
// byte 4-7
encoder.disp32(offset);
// Double check that we haven't done any math errors
assert(rip == self.code.items.len);
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;
// This is a variant of 8B /r.
const abi_size = ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 3);
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
.b = src_reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
encoder.modRm_direct(reg.low_id(), src_reg.low_id());
},
.memory => |x| {
if (self.bin_file.options.pie) {
// RIP-relative displacement to the entry in the GOT table.
const abi_size = ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 10);
// LEA reg, [<offset>]
// We encode the instruction FIRST because prefixes may or may not appear.
// After we encode the instruction, we will know that the displacement bytes
// for [<offset>] will be at self.code.items.len - 4.
encoder.rex(.{
.w = true, // force 64 bit because loading an address (to the GOT)
.r = reg.isExtended(),
});
encoder.opcode_1byte(0x8D);
encoder.modRm_RIPDisp32(reg.low_id());
encoder.disp32(0);
const offset = @intCast(u32, self.code.items.len);
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
// TODO this is super awkward. We are reversing the address of the GOT entry here.
// We should probably have it cached or move the reloc adding somewhere else.
const got_addr = blk: {
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
const got = seg.sections.items[macho_file.got_section_index.?];
break :blk got.addr;
};
const where_index = blk: for (macho_file.got_entries.items) |key, id| {
if (got_addr + id * @sizeOf(u64) == x) break :blk key.where_index;
} else unreachable;
const decl = macho_file.active_decl.?;
// Load reloc for LEA instruction.
try decl.link.macho.relocs.append(self.bin_file.allocator, .{
.offset = offset - 4,
.where = .local,
.where_index = where_index,
.payload = .{ .load = .{ .kind = .got } },
});
} else {
return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{});
}
// MOV reg, [reg]
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
.b = reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
encoder.modRm_indirectDisp0(reg.low_id(), reg.low_id());
} else if (x <= math.maxInt(i32)) {
// Moving from memory to a register is a variant of `8B /r`.
// Since we're using 64-bit moves, we require a REX.
// This variant also requires a SIB, as it would otherwise be RIP-relative.
// We want mode zero with the lower three bits set to four to indicate an SIB with no other displacement.
// The SIB must be 0x25, to indicate a disp32 with no scaled index.
// 0b00RRR100, where RRR is the lower three bits of the register ID.
// The instruction is thus eight bytes; REX 0x8B 0b00RRR100 0x25 followed by a four-byte disp32.
const abi_size = ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 8);
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
// effective address = [SIB]
encoder.modRm_SIBDisp0(reg.low_id());
// SIB = disp32
encoder.sib_disp32();
encoder.disp32(@intCast(i32, x));
} else {
// If this is RAX, we can use a direct load; otherwise, we need to load the address, then indirectly load
// the value.
if (reg.id() == 0) {
// REX.W 0xA1 moffs64*
// moffs64* is a 64-bit offset "relative to segment base", which really just means the
// absolute address for all practical purposes.
const encoder = try X8664Encoder.init(self.code, 10);
encoder.rex(.{
.w = true,
});
encoder.opcode_1byte(0xA1);
encoder.writeIntLittle(u64, x);
} else {
// This requires two instructions; a move imm as used above, followed by an indirect load using the register
// as the address and the register as the destination.
//
// This cannot be used if the lower three bits of the id are equal to four or five, as there
// is no way to possibly encode it. This means that RSP, RBP, R12, and R13 cannot be used with
// this instruction.
const id3 = @truncate(u3, reg.id());
assert(id3 != 4 and id3 != 5);
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
try self.genSetReg(ty, reg, MCValue{ .immediate = x });
// Now, the register contains the address of the value to load into it
// Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant.
// TODO: determine whether to allow other sized registers, and if so, handle them properly.
// mov reg, [reg]
const abi_size = ty.abiSize(self.target.*);
const encoder = try X8664Encoder.init(self.code, 3);
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
.b = reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
encoder.modRm_indirectDisp0(reg.low_id(), reg.low_id());
}
}
},
.stack_offset => |unadjusted_off| {
const abi_size = ty.abiSize(self.target.*);
const off = unadjusted_off + abi_size;
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
const ioff = -@intCast(i32, off);
const encoder = try X8664Encoder.init(self.code, 3);
encoder.rex(.{
.w = abi_size == 8,
.r = reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
if (std.math.minInt(i8) <= ioff and ioff <= std.math.maxInt(i8)) {
// Example: 48 8b 4d 7f mov rcx,QWORD PTR [rbp+0x7f]
encoder.modRm_indirectDisp8(reg.low_id(), Register.ebp.low_id());
encoder.disp8(@intCast(i8, ioff));
} else {
// Example: 48 8b 8d 80 00 00 00 mov rcx,QWORD PTR [rbp+0x80]
encoder.modRm_indirectDisp32(reg.low_id(), Register.ebp.low_id());
encoder.disp32(ioff);
}
},
},
else => return self.fail("TODO implement getSetReg for {}", .{self.target.cpu.arch}),
}
}
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result = try self.resolveInst(un_op);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand);
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
}
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
switch (self.air.instructions.items(.tag)[inst_index]) {
.constant => {
// Constants have static lifetimes, so they are always memoized in the outer most table.
const branch = &self.branch_stack.items[0];
const gop = try branch.inst_table.getOrPut(self.gpa, inst_index);
if (!gop.found_existing) {
const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
gop.value_ptr.* = try self.genTypedValue(.{
.ty = inst_ty,
.val = self.air.values[ty_pl.payload],
});
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst_index),
}
}
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
// Treat each stack item as a "layer" on top of the previous one.
var i: usize = self.branch_stack.items.len;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| {
assert(mcv != .dead);
return mcv;
}
}
}
/// If the MCValue is an immediate, and it does not fit within this type,
/// we put it in a register.
/// A potential opportunity for future optimization here would be keeping track
/// of the fact that the instruction is available both as an immediate
/// and as a register.
fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue {
const mcv = try self.resolveInst(operand);
const ti = @typeInfo(T).Int;
switch (mcv) {
.immediate => |imm| {
// This immediate is unsigned.
const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed));
if (imm >= math.maxInt(U)) {
return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) };
}
},
else => {},
}
return mcv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
var buf: Type.Payload.ElemType = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen();
// Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
// the Sema code needs to use anonymous Decls or alloca instructions to store data.
const ptr_imm = ptr_mcv.memory;
_ = slice_len;
_ = ptr_imm;
// We need more general support for const data being stored in memory to make this work.
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const got_addr = blk: {
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
const got = seg.sections.items[macho_file.got_section_index.?];
const got_index = macho_file.got_entries_map.get(.{
.where = .local,
.where_index = decl.link.macho.local_sym_index,
}) orelse unreachable;
break :blk got.addr + got_index * ptr_bytes;
};
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
return self.fail("TODO codegen more kinds of const pointers", .{});
},
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
if (info.bits > ptr_bits or info.signedness == .signed) {
return self.fail("TODO const int bigger than ptr and signed int", .{});
}
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
},
.ComptimeInt => unreachable, // semantic analysis prevents this
.ComptimeFloat => unreachable, // semantic analysis prevents this
.Optional => {
if (typed_value.ty.isPtrLikeOptional()) {
if (typed_value.val.isNull())
return MCValue{ .immediate = 0 };
var buf: Type.Payload.ElemType = undefined;
return self.genTypedValue(.{
.ty = typed_value.ty.optionalChild(&buf),
.val = typed_value.val,
});
} else if (typed_value.ty.abiSize(self.target.*) == 1) {
return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) };
}
return self.fail("TODO non pointer optionals", .{});
},
.Enum => {
if (typed_value.val.castTag(.enum_field_index)) |field_index| {
switch (typed_value.ty.tag()) {
.enum_simple => {
return MCValue{ .immediate = field_index.data };
},
.enum_full, .enum_nonexhaustive => {
const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data;
if (enum_full.values.count() != 0) {
const tag_val = enum_full.values.keys()[field_index.data];
return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val });
} else {
return MCValue{ .immediate = field_index.data };
}
},
else => unreachable,
}
} else {
var int_tag_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val });
}
},
.ErrorSet => {
switch (typed_value.val.tag()) {
.@"error" => {
const err_name = typed_value.val.castTag(.@"error").?.data.name;
const module = self.bin_file.options.module.?;
const global_error_set = module.global_error_set;
const error_index = global_error_set.get(err_name).?;
return MCValue{ .immediate = error_index };
},
else => {
// In this case we are rendering an error union which has a 0 bits payload.
return MCValue{ .immediate = 0 };
},
}
},
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet();
const payload_type = typed_value.ty.errorUnionPayload();
const sub_val = typed_value.val.castTag(.eu_payload).?.data;
if (!payload_type.hasCodeGenBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty});
},
else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}),
}
}
const CallMCValues = struct {
args: []MCValue,
return_value: MCValue,
stack_byte_count: u32,
stack_align: u32,
fn deinit(self: *CallMCValues, func: *Self) void {
func.gpa.free(self.args);
self.* = undefined;
}
};
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const cc = fn_ty.fnCallingConvention();
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
defer self.gpa.free(param_types);
fn_ty.fnParamTypes(param_types);
var result: CallMCValues = .{
.args = try self.gpa.alloc(MCValue, param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
.stack_align = undefined,
};
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
switch (arch) {
.x86_64 => {
switch (cc) {
.Naked => {
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
return result;
},
.Unspecified, .C => {
var next_int_reg: usize = 0;
var next_stack_offset: u32 = 0;
for (param_types) |ty, i| {
if (!ty.hasCodeGenBits()) {
assert(cc != .C);
result.args[i] = .{ .none = {} };
continue;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const pass_in_reg = switch (ty.zigTypeTag()) {
.Bool => true,
.Int => param_size <= 8,
.Pointer => ty.ptrSize() != .Slice,
.Optional => ty.isPtrLikeOptional(),
else => false,
};
if (pass_in_reg) {
if (next_int_reg >= c_abi_int_param_regs.len) {
result.args[i] = .{ .stack_offset = next_stack_offset };
next_stack_offset += param_size;
} else {
const aliased_reg = registerAlias(
c_abi_int_param_regs[next_int_reg],
param_size,
);
result.args[i] = .{ .register = aliased_reg };
next_int_reg += 1;
}
} else {
// For simplicity of codegen, slices and other types are always pushed onto the stack.
// TODO: look into optimizing this by passing things as registers sometimes,
// such as ptr and len of slices as separate registers.
// TODO: also we need to honor the C ABI for relevant types rather than passing on
// the stack here.
result.args[i] = .{ .stack_offset = next_stack_offset };
next_stack_offset += param_size;
}
}
result.stack_byte_count = next_stack_offset;
result.stack_align = 16;
},
else => return self.fail("TODO implement function parameters for {} on x86_64", .{cc}),
}
},
.arm, .armeb => {
switch (cc) {
.Naked => {
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
return result;
},
.Unspecified, .C => {
// ARM Procedure Call Standard, Chapter 6.5
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
for (param_types) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
}
} else if (ncrn < 4 and nsaa == 0) {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
if (ty.abiAlignment(self.target.*) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
result.args[i] = .{ .stack_offset = nsaa };
nsaa += param_size;
}
}
result.stack_byte_count = nsaa;
result.stack_align = 8;
},
else => return self.fail("TODO implement function parameters for {} on arm", .{cc}),
}
},
.aarch64 => {
switch (cc) {
.Naked => {
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
return result;
},
.Unspecified, .C => {
// ARM64 Procedure Call Standard
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
for (param_types) |ty, i| {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
if (param_size <= 8) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
}
} else if (ncrn < 8 and nsaa == 0) {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
if (ty.abiAlignment(self.target.*) == 8) {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
}
result.args[i] = .{ .stack_offset = nsaa };
nsaa += param_size;
}
}
result.stack_byte_count = nsaa;
result.stack_align = 16;
},
else => return self.fail("TODO implement function parameters for {} on aarch64", .{cc}),
}
},
else => if (param_types.len != 0)
return self.fail("TODO implement codegen parameters for {}", .{self.target.cpu.arch}),
}
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
result.return_value = .{ .none = {} };
} else switch (arch) {
.x86_64 => switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size);
result.return_value = .{ .register = aliased_reg };
},
else => return self.fail("TODO implement function return values for {}", .{cc}),
},
.arm, .armeb => switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size <= 4) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
} else {
return self.fail("TODO support more return types for ARM backend", .{});
}
},
else => return self.fail("TODO implement function return values for {}", .{cc}),
},
.aarch64 => switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size <= 8) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
} else {
return self.fail("TODO support more return types for ARM backend", .{});
}
},
else => return self.fail("TODO implement function return values for {}", .{cc}),
},
else => return self.fail("TODO implement codegen return values for {}", .{self.target.cpu.arch}),
}
return result;
}
/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`.
fn wantSafety(self: *Self) bool {
return switch (self.bin_file.options.optimize_mode) {
.Debug => true,
.ReleaseSafe => true,
.ReleaseFast => false,
.ReleaseSmall => false,
};
}
fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(self.err_msg == null);
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args);
return error.CodegenFail;
}
fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(self.err_msg == null);
self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args);
return error.CodegenFail;
}
const Register = switch (arch) {
.i386 => @import("codegen/x86.zig").Register,
.x86_64 => @import("codegen/x86_64.zig").Register,
.riscv64 => @import("codegen/riscv64.zig").Register,
.arm, .armeb => @import("codegen/arm.zig").Register,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Register,
else => enum {
dummy,
pub fn allocIndex(self: Register) ?u4 {
_ = self;
return null;
}
},
};
const Instruction = switch (arch) {
.riscv64 => @import("codegen/riscv64.zig").Instruction,
.arm, .armeb => @import("codegen/arm.zig").Instruction,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").Instruction,
else => void,
};
const Condition = switch (arch) {
.arm, .armeb => @import("codegen/arm.zig").Condition,
else => void,
};
const callee_preserved_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").callee_preserved_regs,
.x86_64 => @import("codegen/x86_64.zig").callee_preserved_regs,
.riscv64 => @import("codegen/riscv64.zig").callee_preserved_regs,
.arm, .armeb => @import("codegen/arm.zig").callee_preserved_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").callee_preserved_regs,
else => [_]Register{},
};
const c_abi_int_param_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").c_abi_int_param_regs,
.x86_64 => @import("codegen/x86_64.zig").c_abi_int_param_regs,
.arm, .armeb => @import("codegen/arm.zig").c_abi_int_param_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_param_regs,
else => [_]Register{},
};
const c_abi_int_return_regs = switch (arch) {
.i386 => @import("codegen/x86.zig").c_abi_int_return_regs,
.x86_64 => @import("codegen/x86_64.zig").c_abi_int_return_regs,
.arm, .armeb => @import("codegen/arm.zig").c_abi_int_return_regs,
.aarch64, .aarch64_be, .aarch64_32 => @import("codegen/aarch64.zig").c_abi_int_return_regs,
else => [_]Register{},
};
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);
}
return std.meta.stringToEnum(Register, name);
}
fn registerAlias(reg: Register, size_bytes: u32) Register {
switch (arch) {
// For x86_64 we have to pick a smaller register alias depending on abi size.
.x86_64 => switch (size_bytes) {
1 => return reg.to8(),
2 => return reg.to16(),
4 => return reg.to32(),
8 => return reg.to64(),
else => unreachable,
},
else => return reg,
}
}
/// For most architectures this does nothing. For x86_64 it resolves any aliased registers
/// to the 64-bit wide ones.
fn toCanonicalReg(reg: Register) Register {
return switch (arch) {
.x86_64 => reg.to64(),
else => reg,
};
}
};
}
|
src/codegen.zig
|
const std = @import("std");
const path = std.fs.path;
const Allocator = std.mem.Allocator;
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const target_util = @import("target.zig");
pub fn buildWasiLibcSysroot(comp: *Compilation) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
{
// Compile crt sources.
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, false);
try args.appendSlice(&[_][]const u8{
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"headers",
"private",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"cloudlibc",
"src",
"include",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"cloudlibc",
"src",
}),
});
var comp_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
for (crt_src_files) |file_path| {
try comp_sources.append(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
});
}
try comp.build_crt_file("crt", .Obj, comp_sources.items);
}
{
// Compile WASI libc (sysroot).
var comp_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
{
// Compile dlmalloc.
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, true);
try args.appendSlice(&[_][]const u8{
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"dlmalloc",
"include",
}),
});
for (dlmalloc_src_files) |file_path| {
try comp_sources.append(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
});
}
}
{
// Compile libc-bottom-half.
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, true);
try args.appendSlice(&[_][]const u8{
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"headers",
"private",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"cloudlibc",
"src",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-bottom-half",
"cloudlibc",
"src",
"include",
}),
});
for (libc_bottom_half_src_files) |file_path| {
try comp_sources.append(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
});
}
}
{
// Compile libc-top-half.
var args = std.ArrayList([]const u8).init(arena);
try addCCArgs(comp, arena, &args, true);
try args.appendSlice(&[_][]const u8{
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-top-half",
"musl",
"src",
"include",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-top-half",
"musl",
"src",
"internal",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-top-half",
"musl",
"arch",
"wasm32",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-top-half",
"musl",
"arch",
"generic",
}),
"-I",
try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc",
"wasi",
"libc-top-half",
"headers",
"private",
}),
});
for (libc_top_half_src_files) |file_path| {
try comp_sources.append(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
});
}
}
try comp.build_crt_file("c", .Lib, comp_sources.items);
}
}
fn sanitize(arena: *Allocator, file_path: []const u8) ![]const u8 {
// TODO do this at comptime on the comptime data rather than at runtime
// probably best to wait until self-hosted is done and our comptime execution
// is faster and uses less memory.
const out_path = if (path.sep != '/') blk: {
const mutable_file_path = try arena.dupe(u8, file_path);
for (mutable_file_path) |*c| {
if (c.* == '/') {
c.* = path.sep;
}
}
break :blk mutable_file_path;
} else file_path;
return out_path;
}
fn addCCArgs(
comp: *Compilation,
arena: *Allocator,
args: *std.ArrayList([]const u8),
want_O3: bool,
) error{OutOfMemory}!void {
const target = comp.getTarget();
const arch_name = target_util.archMuslName(target.cpu.arch);
const os_name = @tagName(target.os.tag);
const triple = try std.fmt.allocPrint(arena, "{s}-{s}-musl", .{ arch_name, os_name });
const o_arg = if (want_O3) "-O3" else "-Os";
try args.appendSlice(&[_][]const u8{
"-std=gnu17",
"-fno-trapping-math",
"-fno-stack-protector",
"-w", // ignore all warnings
o_arg,
"-mthread-model",
"single",
"-iwithsysroot",
try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", triple }),
});
}
const dlmalloc_src_files = [_][]const u8{
"wasi/dlmalloc/src/dlmalloc.c",
};
const libc_bottom_half_src_files = [_][]const u8{
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/closedir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/dirfd.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/fdopendir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/readdir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/scandirat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/telldir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/fdclosedir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/opendirat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/rewinddir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/dirent/seekdir.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/errno/errno.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/fcntl/fcntl.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/fcntl/openat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/fcntl/posix_fadvise.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/fcntl/posix_fallocate.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/poll/poll.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sched/sched_yield.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/stdio/renameat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/stdlib/_Exit.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/ioctl/ioctl.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/select/pselect.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/select/select.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/socket/getsockopt.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/socket/recv.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/socket/send.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/socket/shutdown.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/stat/fstatat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/stat/fstat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/stat/futimens.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/stat/mkdirat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/stat/utimensat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/time/gettimeofday.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/uio/preadv.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/uio/pwritev.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/uio/readv.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/sys/uio/writev.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/clock_getres.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/clock_gettime.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/CLOCK_MONOTONIC.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/clock_nanosleep.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/CLOCK_PROCESS_CPUTIME_ID.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/CLOCK_REALTIME.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/CLOCK_THREAD_CPUTIME_ID.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/nanosleep.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/time/time.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/close.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/faccessat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/fdatasync.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/fsync.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/ftruncate.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/linkat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/lseek.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/pread.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/pwrite.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/read.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/readlinkat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/sleep.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/symlinkat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/unlinkat.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/usleep.c",
"wasi/libc-bottom-half/cloudlibc/src/libc/unistd/write.c",
"wasi/libc-bottom-half/sources/abort.c",
"wasi/libc-bottom-half/sources/at_fdcwd.c",
"wasi/libc-bottom-half/sources/complex-builtins.c",
"wasi/libc-bottom-half/sources/environ.c",
"wasi/libc-bottom-half/sources/errno.c",
"wasi/libc-bottom-half/sources/getcwd.c",
"wasi/libc-bottom-half/sources/getentropy.c",
"wasi/libc-bottom-half/sources/isatty.c",
"wasi/libc-bottom-half/sources/__main_argc_argv.c",
"wasi/libc-bottom-half/sources/__main_void.c",
"wasi/libc-bottom-half/sources/__original_main.c",
"wasi/libc-bottom-half/sources/posix.c",
"wasi/libc-bottom-half/sources/preopens.c",
"wasi/libc-bottom-half/sources/reallocarray.c",
"wasi/libc-bottom-half/sources/sbrk.c",
"wasi/libc-bottom-half/sources/truncate.c",
"wasi/libc-bottom-half/sources/__wasilibc_fd_renumber.c",
"wasi/libc-bottom-half/sources/__wasilibc_initialize_environ.c",
"wasi/libc-bottom-half/sources/__wasilibc_real.c",
"wasi/libc-bottom-half/sources/__wasilibc_rmdirat.c",
"wasi/libc-bottom-half/sources/__wasilibc_tell.c",
"wasi/libc-bottom-half/sources/__wasilibc_unlinkat.c",
"wasi/libc-bottom-half/sources/math/fmin-fmax.c",
"wasi/libc-bottom-half/sources/math/math-builtins.c",
// TODO apparently, due to a bug in LLD, the weak refs are garbled
// unless chdir.c is last in the archive
// https://reviews.llvm.org/D85567
"wasi/libc-bottom-half/sources/chdir.c",
};
const libc_top_half_src_files = [_][]const u8{
"wasi/libc-top-half/musl/src/misc/a64l.c",
"wasi/libc-top-half/musl/src/misc/basename.c",
"wasi/libc-top-half/musl/src/misc/dirname.c",
"wasi/libc-top-half/musl/src/misc/ffs.c",
"wasi/libc-top-half/musl/src/misc/ffsl.c",
"wasi/libc-top-half/musl/src/misc/ffsll.c",
"wasi/libc-top-half/musl/src/misc/fmtmsg.c",
"wasi/libc-top-half/musl/src/misc/getdomainname.c",
"wasi/libc-top-half/musl/src/misc/gethostid.c",
"wasi/libc-top-half/musl/src/misc/getopt.c",
"wasi/libc-top-half/musl/src/misc/getopt_long.c",
"wasi/libc-top-half/musl/src/misc/getsubopt.c",
"wasi/libc-top-half/musl/src/misc/uname.c",
"wasi/libc-top-half/musl/src/misc/nftw.c",
"wasi/libc-top-half/musl/src/errno/strerror.c",
"wasi/libc-top-half/musl/src/network/htonl.c",
"wasi/libc-top-half/musl/src/network/htons.c",
"wasi/libc-top-half/musl/src/network/ntohl.c",
"wasi/libc-top-half/musl/src/network/ntohs.c",
"wasi/libc-top-half/musl/src/network/inet_ntop.c",
"wasi/libc-top-half/musl/src/network/inet_pton.c",
"wasi/libc-top-half/musl/src/network/inet_aton.c",
"wasi/libc-top-half/musl/src/network/in6addr_any.c",
"wasi/libc-top-half/musl/src/network/in6addr_loopback.c",
"wasi/libc-top-half/musl/src/fenv/fenv.c",
"wasi/libc-top-half/musl/src/fenv/fesetround.c",
"wasi/libc-top-half/musl/src/fenv/feupdateenv.c",
"wasi/libc-top-half/musl/src/fenv/fesetexceptflag.c",
"wasi/libc-top-half/musl/src/fenv/fegetexceptflag.c",
"wasi/libc-top-half/musl/src/fenv/feholdexcept.c",
"wasi/libc-top-half/musl/src/exit/exit.c",
"wasi/libc-top-half/musl/src/exit/atexit.c",
"wasi/libc-top-half/musl/src/exit/assert.c",
"wasi/libc-top-half/musl/src/exit/quick_exit.c",
"wasi/libc-top-half/musl/src/exit/at_quick_exit.c",
"wasi/libc-top-half/musl/src/time/strftime.c",
"wasi/libc-top-half/musl/src/time/asctime.c",
"wasi/libc-top-half/musl/src/time/asctime_r.c",
"wasi/libc-top-half/musl/src/time/ctime.c",
"wasi/libc-top-half/musl/src/time/ctime_r.c",
"wasi/libc-top-half/musl/src/time/wcsftime.c",
"wasi/libc-top-half/musl/src/time/strptime.c",
"wasi/libc-top-half/musl/src/time/difftime.c",
"wasi/libc-top-half/musl/src/time/timegm.c",
"wasi/libc-top-half/musl/src/time/ftime.c",
"wasi/libc-top-half/musl/src/time/gmtime.c",
"wasi/libc-top-half/musl/src/time/gmtime_r.c",
"wasi/libc-top-half/musl/src/time/timespec_get.c",
"wasi/libc-top-half/musl/src/time/getdate.c",
"wasi/libc-top-half/musl/src/time/localtime.c",
"wasi/libc-top-half/musl/src/time/localtime_r.c",
"wasi/libc-top-half/musl/src/time/mktime.c",
"wasi/libc-top-half/musl/src/time/__tm_to_secs.c",
"wasi/libc-top-half/musl/src/time/__month_to_secs.c",
"wasi/libc-top-half/musl/src/time/__secs_to_tm.c",
"wasi/libc-top-half/musl/src/time/__year_to_secs.c",
"wasi/libc-top-half/musl/src/time/__tz.c",
"wasi/libc-top-half/musl/src/fcntl/creat.c",
"wasi/libc-top-half/musl/src/dirent/alphasort.c",
"wasi/libc-top-half/musl/src/dirent/versionsort.c",
"wasi/libc-top-half/musl/src/env/clearenv.c",
"wasi/libc-top-half/musl/src/env/getenv.c",
"wasi/libc-top-half/musl/src/env/putenv.c",
"wasi/libc-top-half/musl/src/env/setenv.c",
"wasi/libc-top-half/musl/src/env/unsetenv.c",
"wasi/libc-top-half/musl/src/unistd/posix_close.c",
"wasi/libc-top-half/musl/src/internal/defsysinfo.c",
"wasi/libc-top-half/musl/src/internal/floatscan.c",
"wasi/libc-top-half/musl/src/internal/intscan.c",
"wasi/libc-top-half/musl/src/internal/libc.c",
"wasi/libc-top-half/musl/src/internal/shgetc.c",
"wasi/libc-top-half/musl/src/stdio/asprintf.c",
"wasi/libc-top-half/musl/src/stdio/clearerr.c",
"wasi/libc-top-half/musl/src/stdio/dprintf.c",
"wasi/libc-top-half/musl/src/stdio/ext2.c",
"wasi/libc-top-half/musl/src/stdio/ext.c",
"wasi/libc-top-half/musl/src/stdio/fclose.c",
"wasi/libc-top-half/musl/src/stdio/__fclose_ca.c",
"wasi/libc-top-half/musl/src/stdio/__fdopen.c",
"wasi/libc-top-half/musl/src/stdio/feof.c",
"wasi/libc-top-half/musl/src/stdio/ferror.c",
"wasi/libc-top-half/musl/src/stdio/fflush.c",
"wasi/libc-top-half/musl/src/stdio/fgetc.c",
"wasi/libc-top-half/musl/src/stdio/fgetln.c",
"wasi/libc-top-half/musl/src/stdio/fgetpos.c",
"wasi/libc-top-half/musl/src/stdio/fgets.c",
"wasi/libc-top-half/musl/src/stdio/fgetwc.c",
"wasi/libc-top-half/musl/src/stdio/fgetws.c",
"wasi/libc-top-half/musl/src/stdio/fileno.c",
"wasi/libc-top-half/musl/src/stdio/fmemopen.c",
"wasi/libc-top-half/musl/src/stdio/__fmodeflags.c",
"wasi/libc-top-half/musl/src/stdio/fopen.c",
"wasi/libc-top-half/musl/src/stdio/fopencookie.c",
"wasi/libc-top-half/musl/src/stdio/__fopen_rb_ca.c",
"wasi/libc-top-half/musl/src/stdio/fprintf.c",
"wasi/libc-top-half/musl/src/stdio/fputc.c",
"wasi/libc-top-half/musl/src/stdio/fputs.c",
"wasi/libc-top-half/musl/src/stdio/fputwc.c",
"wasi/libc-top-half/musl/src/stdio/fputws.c",
"wasi/libc-top-half/musl/src/stdio/fread.c",
"wasi/libc-top-half/musl/src/stdio/freopen.c",
"wasi/libc-top-half/musl/src/stdio/fscanf.c",
"wasi/libc-top-half/musl/src/stdio/fseek.c",
"wasi/libc-top-half/musl/src/stdio/fsetpos.c",
"wasi/libc-top-half/musl/src/stdio/ftell.c",
"wasi/libc-top-half/musl/src/stdio/fwide.c",
"wasi/libc-top-half/musl/src/stdio/fwprintf.c",
"wasi/libc-top-half/musl/src/stdio/fwrite.c",
"wasi/libc-top-half/musl/src/stdio/fwscanf.c",
"wasi/libc-top-half/musl/src/stdio/getc.c",
"wasi/libc-top-half/musl/src/stdio/getchar.c",
"wasi/libc-top-half/musl/src/stdio/getchar_unlocked.c",
"wasi/libc-top-half/musl/src/stdio/getc_unlocked.c",
"wasi/libc-top-half/musl/src/stdio/getdelim.c",
"wasi/libc-top-half/musl/src/stdio/getline.c",
"wasi/libc-top-half/musl/src/stdio/getw.c",
"wasi/libc-top-half/musl/src/stdio/getwc.c",
"wasi/libc-top-half/musl/src/stdio/getwchar.c",
"wasi/libc-top-half/musl/src/stdio/ofl_add.c",
"wasi/libc-top-half/musl/src/stdio/ofl.c",
"wasi/libc-top-half/musl/src/stdio/open_memstream.c",
"wasi/libc-top-half/musl/src/stdio/open_wmemstream.c",
"wasi/libc-top-half/musl/src/stdio/__overflow.c",
"wasi/libc-top-half/musl/src/stdio/perror.c",
"wasi/libc-top-half/musl/src/stdio/printf.c",
"wasi/libc-top-half/musl/src/stdio/putc.c",
"wasi/libc-top-half/musl/src/stdio/putchar.c",
"wasi/libc-top-half/musl/src/stdio/putchar_unlocked.c",
"wasi/libc-top-half/musl/src/stdio/putc_unlocked.c",
"wasi/libc-top-half/musl/src/stdio/puts.c",
"wasi/libc-top-half/musl/src/stdio/putw.c",
"wasi/libc-top-half/musl/src/stdio/putwc.c",
"wasi/libc-top-half/musl/src/stdio/putwchar.c",
"wasi/libc-top-half/musl/src/stdio/rewind.c",
"wasi/libc-top-half/musl/src/stdio/scanf.c",
"wasi/libc-top-half/musl/src/stdio/setbuf.c",
"wasi/libc-top-half/musl/src/stdio/setbuffer.c",
"wasi/libc-top-half/musl/src/stdio/setlinebuf.c",
"wasi/libc-top-half/musl/src/stdio/setvbuf.c",
"wasi/libc-top-half/musl/src/stdio/snprintf.c",
"wasi/libc-top-half/musl/src/stdio/sprintf.c",
"wasi/libc-top-half/musl/src/stdio/sscanf.c",
"wasi/libc-top-half/musl/src/stdio/stderr.c",
"wasi/libc-top-half/musl/src/stdio/stdin.c",
"wasi/libc-top-half/musl/src/stdio/__stdio_close.c",
"wasi/libc-top-half/musl/src/stdio/__stdio_exit.c",
"wasi/libc-top-half/musl/src/stdio/__stdio_read.c",
"wasi/libc-top-half/musl/src/stdio/__stdio_seek.c",
"wasi/libc-top-half/musl/src/stdio/__stdio_write.c",
"wasi/libc-top-half/musl/src/stdio/stdout.c",
"wasi/libc-top-half/musl/src/stdio/__stdout_write.c",
"wasi/libc-top-half/musl/src/stdio/swprintf.c",
"wasi/libc-top-half/musl/src/stdio/swscanf.c",
"wasi/libc-top-half/musl/src/stdio/__toread.c",
"wasi/libc-top-half/musl/src/stdio/__towrite.c",
"wasi/libc-top-half/musl/src/stdio/__uflow.c",
"wasi/libc-top-half/musl/src/stdio/ungetc.c",
"wasi/libc-top-half/musl/src/stdio/ungetwc.c",
"wasi/libc-top-half/musl/src/stdio/vasprintf.c",
"wasi/libc-top-half/musl/src/stdio/vdprintf.c",
"wasi/libc-top-half/musl/src/stdio/vfprintf.c",
"wasi/libc-top-half/musl/src/stdio/vfscanf.c",
"wasi/libc-top-half/musl/src/stdio/vfwprintf.c",
"wasi/libc-top-half/musl/src/stdio/vfwscanf.c",
"wasi/libc-top-half/musl/src/stdio/vprintf.c",
"wasi/libc-top-half/musl/src/stdio/vscanf.c",
"wasi/libc-top-half/musl/src/stdio/vsnprintf.c",
"wasi/libc-top-half/musl/src/stdio/vsprintf.c",
"wasi/libc-top-half/musl/src/stdio/vsscanf.c",
"wasi/libc-top-half/musl/src/stdio/vswprintf.c",
"wasi/libc-top-half/musl/src/stdio/vswscanf.c",
"wasi/libc-top-half/musl/src/stdio/vwprintf.c",
"wasi/libc-top-half/musl/src/stdio/vwscanf.c",
"wasi/libc-top-half/musl/src/stdio/wprintf.c",
"wasi/libc-top-half/musl/src/stdio/wscanf.c",
"wasi/libc-top-half/musl/src/string/bcmp.c",
"wasi/libc-top-half/musl/src/string/bcopy.c",
"wasi/libc-top-half/musl/src/string/bzero.c",
"wasi/libc-top-half/musl/src/string/explicit_bzero.c",
"wasi/libc-top-half/musl/src/string/index.c",
"wasi/libc-top-half/musl/src/string/memccpy.c",
"wasi/libc-top-half/musl/src/string/memchr.c",
"wasi/libc-top-half/musl/src/string/memcmp.c",
"wasi/libc-top-half/musl/src/string/memcpy.c",
"wasi/libc-top-half/musl/src/string/memmem.c",
"wasi/libc-top-half/musl/src/string/memmove.c",
"wasi/libc-top-half/musl/src/string/mempcpy.c",
"wasi/libc-top-half/musl/src/string/memrchr.c",
"wasi/libc-top-half/musl/src/string/memset.c",
"wasi/libc-top-half/musl/src/string/rindex.c",
"wasi/libc-top-half/musl/src/string/stpcpy.c",
"wasi/libc-top-half/musl/src/string/stpncpy.c",
"wasi/libc-top-half/musl/src/string/strcasecmp.c",
"wasi/libc-top-half/musl/src/string/strcasestr.c",
"wasi/libc-top-half/musl/src/string/strcat.c",
"wasi/libc-top-half/musl/src/string/strchr.c",
"wasi/libc-top-half/musl/src/string/strchrnul.c",
"wasi/libc-top-half/musl/src/string/strcmp.c",
"wasi/libc-top-half/musl/src/string/strcpy.c",
"wasi/libc-top-half/musl/src/string/strcspn.c",
"wasi/libc-top-half/musl/src/string/strdup.c",
"wasi/libc-top-half/musl/src/string/strerror_r.c",
"wasi/libc-top-half/musl/src/string/strlcat.c",
"wasi/libc-top-half/musl/src/string/strlcpy.c",
"wasi/libc-top-half/musl/src/string/strlen.c",
"wasi/libc-top-half/musl/src/string/strncasecmp.c",
"wasi/libc-top-half/musl/src/string/strncat.c",
"wasi/libc-top-half/musl/src/string/strncmp.c",
"wasi/libc-top-half/musl/src/string/strncpy.c",
"wasi/libc-top-half/musl/src/string/strndup.c",
"wasi/libc-top-half/musl/src/string/strnlen.c",
"wasi/libc-top-half/musl/src/string/strpbrk.c",
"wasi/libc-top-half/musl/src/string/strrchr.c",
"wasi/libc-top-half/musl/src/string/strsep.c",
"wasi/libc-top-half/musl/src/string/strspn.c",
"wasi/libc-top-half/musl/src/string/strstr.c",
"wasi/libc-top-half/musl/src/string/strtok.c",
"wasi/libc-top-half/musl/src/string/strtok_r.c",
"wasi/libc-top-half/musl/src/string/strverscmp.c",
"wasi/libc-top-half/musl/src/string/swab.c",
"wasi/libc-top-half/musl/src/string/wcpcpy.c",
"wasi/libc-top-half/musl/src/string/wcpncpy.c",
"wasi/libc-top-half/musl/src/string/wcscasecmp.c",
"wasi/libc-top-half/musl/src/string/wcscasecmp_l.c",
"wasi/libc-top-half/musl/src/string/wcscat.c",
"wasi/libc-top-half/musl/src/string/wcschr.c",
"wasi/libc-top-half/musl/src/string/wcscmp.c",
"wasi/libc-top-half/musl/src/string/wcscpy.c",
"wasi/libc-top-half/musl/src/string/wcscspn.c",
"wasi/libc-top-half/musl/src/string/wcsdup.c",
"wasi/libc-top-half/musl/src/string/wcslen.c",
"wasi/libc-top-half/musl/src/string/wcsncasecmp.c",
"wasi/libc-top-half/musl/src/string/wcsncasecmp_l.c",
"wasi/libc-top-half/musl/src/string/wcsncat.c",
"wasi/libc-top-half/musl/src/string/wcsncmp.c",
"wasi/libc-top-half/musl/src/string/wcsncpy.c",
"wasi/libc-top-half/musl/src/string/wcsnlen.c",
"wasi/libc-top-half/musl/src/string/wcspbrk.c",
"wasi/libc-top-half/musl/src/string/wcsrchr.c",
"wasi/libc-top-half/musl/src/string/wcsspn.c",
"wasi/libc-top-half/musl/src/string/wcsstr.c",
"wasi/libc-top-half/musl/src/string/wcstok.c",
"wasi/libc-top-half/musl/src/string/wcswcs.c",
"wasi/libc-top-half/musl/src/string/wmemchr.c",
"wasi/libc-top-half/musl/src/string/wmemcmp.c",
"wasi/libc-top-half/musl/src/string/wmemcpy.c",
"wasi/libc-top-half/musl/src/string/wmemmove.c",
"wasi/libc-top-half/musl/src/string/wmemset.c",
"wasi/libc-top-half/musl/src/locale/catclose.c",
"wasi/libc-top-half/musl/src/locale/catgets.c",
"wasi/libc-top-half/musl/src/locale/catopen.c",
"wasi/libc-top-half/musl/src/locale/c_locale.c",
"wasi/libc-top-half/musl/src/locale/duplocale.c",
"wasi/libc-top-half/musl/src/locale/freelocale.c",
"wasi/libc-top-half/musl/src/locale/iconv.c",
"wasi/libc-top-half/musl/src/locale/iconv_close.c",
"wasi/libc-top-half/musl/src/locale/langinfo.c",
"wasi/libc-top-half/musl/src/locale/__lctrans.c",
"wasi/libc-top-half/musl/src/locale/localeconv.c",
"wasi/libc-top-half/musl/src/locale/locale_map.c",
"wasi/libc-top-half/musl/src/locale/__mo_lookup.c",
"wasi/libc-top-half/musl/src/locale/newlocale.c",
"wasi/libc-top-half/musl/src/locale/pleval.c",
"wasi/libc-top-half/musl/src/locale/setlocale.c",
"wasi/libc-top-half/musl/src/locale/strcoll.c",
"wasi/libc-top-half/musl/src/locale/strfmon.c",
"wasi/libc-top-half/musl/src/locale/strxfrm.c",
"wasi/libc-top-half/musl/src/locale/uselocale.c",
"wasi/libc-top-half/musl/src/locale/wcscoll.c",
"wasi/libc-top-half/musl/src/locale/wcsxfrm.c",
"wasi/libc-top-half/musl/src/stdlib/abs.c",
"wasi/libc-top-half/musl/src/stdlib/atof.c",
"wasi/libc-top-half/musl/src/stdlib/atoi.c",
"wasi/libc-top-half/musl/src/stdlib/atol.c",
"wasi/libc-top-half/musl/src/stdlib/atoll.c",
"wasi/libc-top-half/musl/src/stdlib/bsearch.c",
"wasi/libc-top-half/musl/src/stdlib/div.c",
"wasi/libc-top-half/musl/src/stdlib/ecvt.c",
"wasi/libc-top-half/musl/src/stdlib/fcvt.c",
"wasi/libc-top-half/musl/src/stdlib/gcvt.c",
"wasi/libc-top-half/musl/src/stdlib/imaxabs.c",
"wasi/libc-top-half/musl/src/stdlib/imaxdiv.c",
"wasi/libc-top-half/musl/src/stdlib/labs.c",
"wasi/libc-top-half/musl/src/stdlib/ldiv.c",
"wasi/libc-top-half/musl/src/stdlib/llabs.c",
"wasi/libc-top-half/musl/src/stdlib/lldiv.c",
"wasi/libc-top-half/musl/src/stdlib/qsort.c",
"wasi/libc-top-half/musl/src/stdlib/strtod.c",
"wasi/libc-top-half/musl/src/stdlib/strtol.c",
"wasi/libc-top-half/musl/src/stdlib/wcstod.c",
"wasi/libc-top-half/musl/src/stdlib/wcstol.c",
"wasi/libc-top-half/musl/src/search/hsearch.c",
"wasi/libc-top-half/musl/src/search/insque.c",
"wasi/libc-top-half/musl/src/search/lsearch.c",
"wasi/libc-top-half/musl/src/search/tdelete.c",
"wasi/libc-top-half/musl/src/search/tdestroy.c",
"wasi/libc-top-half/musl/src/search/tfind.c",
"wasi/libc-top-half/musl/src/search/tsearch.c",
"wasi/libc-top-half/musl/src/search/twalk.c",
"wasi/libc-top-half/musl/src/multibyte/btowc.c",
"wasi/libc-top-half/musl/src/multibyte/c16rtomb.c",
"wasi/libc-top-half/musl/src/multibyte/c32rtomb.c",
"wasi/libc-top-half/musl/src/multibyte/internal.c",
"wasi/libc-top-half/musl/src/multibyte/mblen.c",
"wasi/libc-top-half/musl/src/multibyte/mbrlen.c",
"wasi/libc-top-half/musl/src/multibyte/mbrtoc16.c",
"wasi/libc-top-half/musl/src/multibyte/mbrtoc32.c",
"wasi/libc-top-half/musl/src/multibyte/mbrtowc.c",
"wasi/libc-top-half/musl/src/multibyte/mbsinit.c",
"wasi/libc-top-half/musl/src/multibyte/mbsnrtowcs.c",
"wasi/libc-top-half/musl/src/multibyte/mbsrtowcs.c",
"wasi/libc-top-half/musl/src/multibyte/mbstowcs.c",
"wasi/libc-top-half/musl/src/multibyte/mbtowc.c",
"wasi/libc-top-half/musl/src/multibyte/wcrtomb.c",
"wasi/libc-top-half/musl/src/multibyte/wcsnrtombs.c",
"wasi/libc-top-half/musl/src/multibyte/wcsrtombs.c",
"wasi/libc-top-half/musl/src/multibyte/wcstombs.c",
"wasi/libc-top-half/musl/src/multibyte/wctob.c",
"wasi/libc-top-half/musl/src/multibyte/wctomb.c",
"wasi/libc-top-half/musl/src/regex/fnmatch.c",
"wasi/libc-top-half/musl/src/regex/glob.c",
"wasi/libc-top-half/musl/src/regex/regcomp.c",
"wasi/libc-top-half/musl/src/regex/regerror.c",
"wasi/libc-top-half/musl/src/regex/regexec.c",
"wasi/libc-top-half/musl/src/regex/tre-mem.c",
"wasi/libc-top-half/musl/src/prng/drand48.c",
"wasi/libc-top-half/musl/src/prng/lcong48.c",
"wasi/libc-top-half/musl/src/prng/lrand48.c",
"wasi/libc-top-half/musl/src/prng/mrand48.c",
"wasi/libc-top-half/musl/src/prng/__rand48_step.c",
"wasi/libc-top-half/musl/src/prng/rand.c",
"wasi/libc-top-half/musl/src/prng/random.c",
"wasi/libc-top-half/musl/src/prng/rand_r.c",
"wasi/libc-top-half/musl/src/prng/__seed48.c",
"wasi/libc-top-half/musl/src/prng/seed48.c",
"wasi/libc-top-half/musl/src/prng/srand48.c",
"wasi/libc-top-half/musl/src/conf/confstr.c",
"wasi/libc-top-half/musl/src/conf/fpathconf.c",
"wasi/libc-top-half/musl/src/conf/legacy.c",
"wasi/libc-top-half/musl/src/conf/pathconf.c",
"wasi/libc-top-half/musl/src/conf/sysconf.c",
"wasi/libc-top-half/musl/src/ctype/__ctype_b_loc.c",
"wasi/libc-top-half/musl/src/ctype/__ctype_get_mb_cur_max.c",
"wasi/libc-top-half/musl/src/ctype/__ctype_tolower_loc.c",
"wasi/libc-top-half/musl/src/ctype/__ctype_toupper_loc.c",
"wasi/libc-top-half/musl/src/ctype/isalnum.c",
"wasi/libc-top-half/musl/src/ctype/isalpha.c",
"wasi/libc-top-half/musl/src/ctype/isascii.c",
"wasi/libc-top-half/musl/src/ctype/isblank.c",
"wasi/libc-top-half/musl/src/ctype/iscntrl.c",
"wasi/libc-top-half/musl/src/ctype/isdigit.c",
"wasi/libc-top-half/musl/src/ctype/isgraph.c",
"wasi/libc-top-half/musl/src/ctype/islower.c",
"wasi/libc-top-half/musl/src/ctype/isprint.c",
"wasi/libc-top-half/musl/src/ctype/ispunct.c",
"wasi/libc-top-half/musl/src/ctype/isspace.c",
"wasi/libc-top-half/musl/src/ctype/isupper.c",
"wasi/libc-top-half/musl/src/ctype/iswalnum.c",
"wasi/libc-top-half/musl/src/ctype/iswalpha.c",
"wasi/libc-top-half/musl/src/ctype/iswblank.c",
"wasi/libc-top-half/musl/src/ctype/iswcntrl.c",
"wasi/libc-top-half/musl/src/ctype/iswctype.c",
"wasi/libc-top-half/musl/src/ctype/iswdigit.c",
"wasi/libc-top-half/musl/src/ctype/iswgraph.c",
"wasi/libc-top-half/musl/src/ctype/iswlower.c",
"wasi/libc-top-half/musl/src/ctype/iswprint.c",
"wasi/libc-top-half/musl/src/ctype/iswpunct.c",
"wasi/libc-top-half/musl/src/ctype/iswspace.c",
"wasi/libc-top-half/musl/src/ctype/iswupper.c",
"wasi/libc-top-half/musl/src/ctype/iswxdigit.c",
"wasi/libc-top-half/musl/src/ctype/isxdigit.c",
"wasi/libc-top-half/musl/src/ctype/toascii.c",
"wasi/libc-top-half/musl/src/ctype/tolower.c",
"wasi/libc-top-half/musl/src/ctype/toupper.c",
"wasi/libc-top-half/musl/src/ctype/towctrans.c",
"wasi/libc-top-half/musl/src/ctype/wcswidth.c",
"wasi/libc-top-half/musl/src/ctype/wctrans.c",
"wasi/libc-top-half/musl/src/ctype/wcwidth.c",
"wasi/libc-top-half/musl/src/math/acos.c",
"wasi/libc-top-half/musl/src/math/acosf.c",
"wasi/libc-top-half/musl/src/math/acosh.c",
"wasi/libc-top-half/musl/src/math/acoshf.c",
"wasi/libc-top-half/musl/src/math/acoshl.c",
"wasi/libc-top-half/musl/src/math/acosl.c",
"wasi/libc-top-half/musl/src/math/asin.c",
"wasi/libc-top-half/musl/src/math/asinf.c",
"wasi/libc-top-half/musl/src/math/asinh.c",
"wasi/libc-top-half/musl/src/math/asinhf.c",
"wasi/libc-top-half/musl/src/math/asinhl.c",
"wasi/libc-top-half/musl/src/math/asinl.c",
"wasi/libc-top-half/musl/src/math/atan2.c",
"wasi/libc-top-half/musl/src/math/atan2f.c",
"wasi/libc-top-half/musl/src/math/atan2l.c",
"wasi/libc-top-half/musl/src/math/atan.c",
"wasi/libc-top-half/musl/src/math/atanf.c",
"wasi/libc-top-half/musl/src/math/atanh.c",
"wasi/libc-top-half/musl/src/math/atanhf.c",
"wasi/libc-top-half/musl/src/math/atanhl.c",
"wasi/libc-top-half/musl/src/math/atanl.c",
"wasi/libc-top-half/musl/src/math/cbrt.c",
"wasi/libc-top-half/musl/src/math/cbrtf.c",
"wasi/libc-top-half/musl/src/math/cbrtl.c",
"wasi/libc-top-half/musl/src/math/ceill.c",
"wasi/libc-top-half/musl/src/math/copysignl.c",
"wasi/libc-top-half/musl/src/math/__cos.c",
"wasi/libc-top-half/musl/src/math/cos.c",
"wasi/libc-top-half/musl/src/math/__cosdf.c",
"wasi/libc-top-half/musl/src/math/cosf.c",
"wasi/libc-top-half/musl/src/math/cosh.c",
"wasi/libc-top-half/musl/src/math/coshf.c",
"wasi/libc-top-half/musl/src/math/coshl.c",
"wasi/libc-top-half/musl/src/math/__cosl.c",
"wasi/libc-top-half/musl/src/math/cosl.c",
"wasi/libc-top-half/musl/src/math/erf.c",
"wasi/libc-top-half/musl/src/math/erff.c",
"wasi/libc-top-half/musl/src/math/erfl.c",
"wasi/libc-top-half/musl/src/math/exp10.c",
"wasi/libc-top-half/musl/src/math/exp10f.c",
"wasi/libc-top-half/musl/src/math/exp10l.c",
"wasi/libc-top-half/musl/src/math/exp2.c",
"wasi/libc-top-half/musl/src/math/exp2f.c",
"wasi/libc-top-half/musl/src/math/exp2f_data.c",
"wasi/libc-top-half/musl/src/math/exp2l.c",
"wasi/libc-top-half/musl/src/math/exp.c",
"wasi/libc-top-half/musl/src/math/exp_data.c",
"wasi/libc-top-half/musl/src/math/expf.c",
"wasi/libc-top-half/musl/src/math/expl.c",
"wasi/libc-top-half/musl/src/math/expm1.c",
"wasi/libc-top-half/musl/src/math/expm1f.c",
"wasi/libc-top-half/musl/src/math/expm1l.c",
"wasi/libc-top-half/musl/src/math/__expo2.c",
"wasi/libc-top-half/musl/src/math/__expo2f.c",
"wasi/libc-top-half/musl/src/math/fabsl.c",
"wasi/libc-top-half/musl/src/math/fdim.c",
"wasi/libc-top-half/musl/src/math/fdimf.c",
"wasi/libc-top-half/musl/src/math/fdiml.c",
"wasi/libc-top-half/musl/src/math/finite.c",
"wasi/libc-top-half/musl/src/math/finitef.c",
"wasi/libc-top-half/musl/src/math/floorl.c",
"wasi/libc-top-half/musl/src/math/fma.c",
"wasi/libc-top-half/musl/src/math/fmaf.c",
"wasi/libc-top-half/musl/src/math/fmal.c",
"wasi/libc-top-half/musl/src/math/fmaxl.c",
"wasi/libc-top-half/musl/src/math/fminl.c",
"wasi/libc-top-half/musl/src/math/fmod.c",
"wasi/libc-top-half/musl/src/math/fmodf.c",
"wasi/libc-top-half/musl/src/math/fmodl.c",
"wasi/libc-top-half/musl/src/math/frexp.c",
"wasi/libc-top-half/musl/src/math/frexpf.c",
"wasi/libc-top-half/musl/src/math/frexpl.c",
"wasi/libc-top-half/musl/src/math/hypot.c",
"wasi/libc-top-half/musl/src/math/hypotf.c",
"wasi/libc-top-half/musl/src/math/hypotl.c",
"wasi/libc-top-half/musl/src/math/ilogb.c",
"wasi/libc-top-half/musl/src/math/ilogbf.c",
"wasi/libc-top-half/musl/src/math/ilogbl.c",
"wasi/libc-top-half/musl/src/math/__invtrigl.c",
"wasi/libc-top-half/musl/src/math/j0.c",
"wasi/libc-top-half/musl/src/math/j0f.c",
"wasi/libc-top-half/musl/src/math/j1.c",
"wasi/libc-top-half/musl/src/math/j1f.c",
"wasi/libc-top-half/musl/src/math/jn.c",
"wasi/libc-top-half/musl/src/math/jnf.c",
"wasi/libc-top-half/musl/src/math/ldexp.c",
"wasi/libc-top-half/musl/src/math/ldexpf.c",
"wasi/libc-top-half/musl/src/math/ldexpl.c",
"wasi/libc-top-half/musl/src/math/lgamma.c",
"wasi/libc-top-half/musl/src/math/lgammaf.c",
"wasi/libc-top-half/musl/src/math/lgammaf_r.c",
"wasi/libc-top-half/musl/src/math/lgammal.c",
"wasi/libc-top-half/musl/src/math/lgamma_r.c",
"wasi/libc-top-half/musl/src/math/llrint.c",
"wasi/libc-top-half/musl/src/math/llrintf.c",
"wasi/libc-top-half/musl/src/math/llrintl.c",
"wasi/libc-top-half/musl/src/math/llround.c",
"wasi/libc-top-half/musl/src/math/llroundf.c",
"wasi/libc-top-half/musl/src/math/llroundl.c",
"wasi/libc-top-half/musl/src/math/log10.c",
"wasi/libc-top-half/musl/src/math/log10f.c",
"wasi/libc-top-half/musl/src/math/log10l.c",
"wasi/libc-top-half/musl/src/math/log1p.c",
"wasi/libc-top-half/musl/src/math/log1pf.c",
"wasi/libc-top-half/musl/src/math/log1pl.c",
"wasi/libc-top-half/musl/src/math/log2.c",
"wasi/libc-top-half/musl/src/math/log2_data.c",
"wasi/libc-top-half/musl/src/math/log2f.c",
"wasi/libc-top-half/musl/src/math/log2f_data.c",
"wasi/libc-top-half/musl/src/math/log2l.c",
"wasi/libc-top-half/musl/src/math/logb.c",
"wasi/libc-top-half/musl/src/math/logbf.c",
"wasi/libc-top-half/musl/src/math/logbl.c",
"wasi/libc-top-half/musl/src/math/log.c",
"wasi/libc-top-half/musl/src/math/log_data.c",
"wasi/libc-top-half/musl/src/math/logf.c",
"wasi/libc-top-half/musl/src/math/logf_data.c",
"wasi/libc-top-half/musl/src/math/logl.c",
"wasi/libc-top-half/musl/src/math/lrint.c",
"wasi/libc-top-half/musl/src/math/lrintf.c",
"wasi/libc-top-half/musl/src/math/lrintl.c",
"wasi/libc-top-half/musl/src/math/lround.c",
"wasi/libc-top-half/musl/src/math/lroundf.c",
"wasi/libc-top-half/musl/src/math/lroundl.c",
"wasi/libc-top-half/musl/src/math/__math_divzero.c",
"wasi/libc-top-half/musl/src/math/__math_divzerof.c",
"wasi/libc-top-half/musl/src/math/__math_invalid.c",
"wasi/libc-top-half/musl/src/math/__math_invalidf.c",
"wasi/libc-top-half/musl/src/math/__math_invalidl.c",
"wasi/libc-top-half/musl/src/math/__math_oflow.c",
"wasi/libc-top-half/musl/src/math/__math_oflowf.c",
"wasi/libc-top-half/musl/src/math/__math_uflow.c",
"wasi/libc-top-half/musl/src/math/__math_uflowf.c",
"wasi/libc-top-half/musl/src/math/__math_xflow.c",
"wasi/libc-top-half/musl/src/math/__math_xflowf.c",
"wasi/libc-top-half/musl/src/math/modf.c",
"wasi/libc-top-half/musl/src/math/modff.c",
"wasi/libc-top-half/musl/src/math/modfl.c",
"wasi/libc-top-half/musl/src/math/nan.c",
"wasi/libc-top-half/musl/src/math/nanf.c",
"wasi/libc-top-half/musl/src/math/nanl.c",
"wasi/libc-top-half/musl/src/math/nearbyintl.c",
"wasi/libc-top-half/musl/src/math/nextafter.c",
"wasi/libc-top-half/musl/src/math/nextafterf.c",
"wasi/libc-top-half/musl/src/math/nextafterl.c",
"wasi/libc-top-half/musl/src/math/nexttoward.c",
"wasi/libc-top-half/musl/src/math/nexttowardf.c",
"wasi/libc-top-half/musl/src/math/nexttowardl.c",
"wasi/libc-top-half/musl/src/math/__polevll.c",
"wasi/libc-top-half/musl/src/math/pow.c",
"wasi/libc-top-half/musl/src/math/pow_data.c",
"wasi/libc-top-half/musl/src/math/powf.c",
"wasi/libc-top-half/musl/src/math/powf_data.c",
"wasi/libc-top-half/musl/src/math/powl.c",
"wasi/libc-top-half/musl/src/math/remainder.c",
"wasi/libc-top-half/musl/src/math/remainderf.c",
"wasi/libc-top-half/musl/src/math/remainderl.c",
"wasi/libc-top-half/musl/src/math/__rem_pio2.c",
"wasi/libc-top-half/musl/src/math/__rem_pio2f.c",
"wasi/libc-top-half/musl/src/math/__rem_pio2_large.c",
"wasi/libc-top-half/musl/src/math/__rem_pio2l.c",
"wasi/libc-top-half/musl/src/math/remquo.c",
"wasi/libc-top-half/musl/src/math/remquof.c",
"wasi/libc-top-half/musl/src/math/remquol.c",
"wasi/libc-top-half/musl/src/math/rintl.c",
"wasi/libc-top-half/musl/src/math/round.c",
"wasi/libc-top-half/musl/src/math/roundf.c",
"wasi/libc-top-half/musl/src/math/roundl.c",
"wasi/libc-top-half/musl/src/math/scalb.c",
"wasi/libc-top-half/musl/src/math/scalbf.c",
"wasi/libc-top-half/musl/src/math/scalbln.c",
"wasi/libc-top-half/musl/src/math/scalblnf.c",
"wasi/libc-top-half/musl/src/math/scalblnl.c",
"wasi/libc-top-half/musl/src/math/scalbn.c",
"wasi/libc-top-half/musl/src/math/scalbnf.c",
"wasi/libc-top-half/musl/src/math/scalbnl.c",
"wasi/libc-top-half/musl/src/math/signgam.c",
"wasi/libc-top-half/musl/src/math/significand.c",
"wasi/libc-top-half/musl/src/math/significandf.c",
"wasi/libc-top-half/musl/src/math/__sin.c",
"wasi/libc-top-half/musl/src/math/sin.c",
"wasi/libc-top-half/musl/src/math/sincos.c",
"wasi/libc-top-half/musl/src/math/sincosf.c",
"wasi/libc-top-half/musl/src/math/sincosl.c",
"wasi/libc-top-half/musl/src/math/__sindf.c",
"wasi/libc-top-half/musl/src/math/sinf.c",
"wasi/libc-top-half/musl/src/math/sinh.c",
"wasi/libc-top-half/musl/src/math/sinhf.c",
"wasi/libc-top-half/musl/src/math/sinhl.c",
"wasi/libc-top-half/musl/src/math/__sinl.c",
"wasi/libc-top-half/musl/src/math/sinl.c",
"wasi/libc-top-half/musl/src/math/sqrt_data.c",
"wasi/libc-top-half/musl/src/math/sqrtl.c",
"wasi/libc-top-half/musl/src/math/__tan.c",
"wasi/libc-top-half/musl/src/math/tan.c",
"wasi/libc-top-half/musl/src/math/__tandf.c",
"wasi/libc-top-half/musl/src/math/tanf.c",
"wasi/libc-top-half/musl/src/math/tanh.c",
"wasi/libc-top-half/musl/src/math/tanhf.c",
"wasi/libc-top-half/musl/src/math/tanhl.c",
"wasi/libc-top-half/musl/src/math/__tanl.c",
"wasi/libc-top-half/musl/src/math/tanl.c",
"wasi/libc-top-half/musl/src/math/tgamma.c",
"wasi/libc-top-half/musl/src/math/tgammaf.c",
"wasi/libc-top-half/musl/src/math/tgammal.c",
"wasi/libc-top-half/musl/src/math/truncl.c",
"wasi/libc-top-half/musl/src/complex/cabs.c",
"wasi/libc-top-half/musl/src/complex/cabsf.c",
"wasi/libc-top-half/musl/src/complex/cabsl.c",
"wasi/libc-top-half/musl/src/complex/cacos.c",
"wasi/libc-top-half/musl/src/complex/cacosf.c",
"wasi/libc-top-half/musl/src/complex/cacosh.c",
"wasi/libc-top-half/musl/src/complex/cacoshf.c",
"wasi/libc-top-half/musl/src/complex/cacoshl.c",
"wasi/libc-top-half/musl/src/complex/cacosl.c",
"wasi/libc-top-half/musl/src/complex/carg.c",
"wasi/libc-top-half/musl/src/complex/cargf.c",
"wasi/libc-top-half/musl/src/complex/cargl.c",
"wasi/libc-top-half/musl/src/complex/casin.c",
"wasi/libc-top-half/musl/src/complex/casinf.c",
"wasi/libc-top-half/musl/src/complex/casinh.c",
"wasi/libc-top-half/musl/src/complex/casinhf.c",
"wasi/libc-top-half/musl/src/complex/casinhl.c",
"wasi/libc-top-half/musl/src/complex/casinl.c",
"wasi/libc-top-half/musl/src/complex/catan.c",
"wasi/libc-top-half/musl/src/complex/catanf.c",
"wasi/libc-top-half/musl/src/complex/catanh.c",
"wasi/libc-top-half/musl/src/complex/catanhf.c",
"wasi/libc-top-half/musl/src/complex/catanhl.c",
"wasi/libc-top-half/musl/src/complex/catanl.c",
"wasi/libc-top-half/musl/src/complex/ccos.c",
"wasi/libc-top-half/musl/src/complex/ccosf.c",
"wasi/libc-top-half/musl/src/complex/ccosh.c",
"wasi/libc-top-half/musl/src/complex/ccoshf.c",
"wasi/libc-top-half/musl/src/complex/ccoshl.c",
"wasi/libc-top-half/musl/src/complex/ccosl.c",
"wasi/libc-top-half/musl/src/complex/__cexp.c",
"wasi/libc-top-half/musl/src/complex/cexp.c",
"wasi/libc-top-half/musl/src/complex/__cexpf.c",
"wasi/libc-top-half/musl/src/complex/cexpf.c",
"wasi/libc-top-half/musl/src/complex/cexpl.c",
"wasi/libc-top-half/musl/src/complex/clog.c",
"wasi/libc-top-half/musl/src/complex/clogf.c",
"wasi/libc-top-half/musl/src/complex/clogl.c",
"wasi/libc-top-half/musl/src/complex/conj.c",
"wasi/libc-top-half/musl/src/complex/conjf.c",
"wasi/libc-top-half/musl/src/complex/conjl.c",
"wasi/libc-top-half/musl/src/complex/cpow.c",
"wasi/libc-top-half/musl/src/complex/cpowf.c",
"wasi/libc-top-half/musl/src/complex/cpowl.c",
"wasi/libc-top-half/musl/src/complex/cproj.c",
"wasi/libc-top-half/musl/src/complex/cprojf.c",
"wasi/libc-top-half/musl/src/complex/cprojl.c",
"wasi/libc-top-half/musl/src/complex/csin.c",
"wasi/libc-top-half/musl/src/complex/csinf.c",
"wasi/libc-top-half/musl/src/complex/csinh.c",
"wasi/libc-top-half/musl/src/complex/csinhf.c",
"wasi/libc-top-half/musl/src/complex/csinhl.c",
"wasi/libc-top-half/musl/src/complex/csinl.c",
"wasi/libc-top-half/musl/src/complex/csqrt.c",
"wasi/libc-top-half/musl/src/complex/csqrtf.c",
"wasi/libc-top-half/musl/src/complex/csqrtl.c",
"wasi/libc-top-half/musl/src/complex/ctan.c",
"wasi/libc-top-half/musl/src/complex/ctanf.c",
"wasi/libc-top-half/musl/src/complex/ctanh.c",
"wasi/libc-top-half/musl/src/complex/ctanhf.c",
"wasi/libc-top-half/musl/src/complex/ctanhl.c",
"wasi/libc-top-half/musl/src/complex/ctanl.c",
"wasi/libc-top-half/musl/src/crypt/crypt_blowfish.c",
"wasi/libc-top-half/musl/src/crypt/crypt.c",
"wasi/libc-top-half/musl/src/crypt/crypt_des.c",
"wasi/libc-top-half/musl/src/crypt/crypt_md5.c",
"wasi/libc-top-half/musl/src/crypt/crypt_r.c",
"wasi/libc-top-half/musl/src/crypt/crypt_sha256.c",
"wasi/libc-top-half/musl/src/crypt/crypt_sha512.c",
"wasi/libc-top-half/musl/src/crypt/encrypt.c",
"wasi/libc-top-half/sources/arc4random.c",
};
const crt_src_files = &[_][]const u8{
"wasi/libc-bottom-half/crt/crt1.c",
"wasi/libc-bottom-half/crt/crt1-command.c",
"wasi/libc-bottom-half/crt/crt1-reactor.c",
};
const emulated_process_clocks_src_files = &[_][]const u8{
"wasi/libc-bottom-half/clocks/clock.c",
"wasi/libc-bottom-half/clocks/getrusage.c",
"wasi/libc-bottom-half/clocks/times.c",
};
const emulated_getpid_src_files = &[_][]const u8{
"wasi/libc-bottom-half/getpid/getpid.c",
};
const emulated_mman_src_files = &[_][]const u8{
"wasi/libc-bottom-half/mman/mman.c",
};
const emulated_signal_src_files = &[_][]const u8{
"wasi/libc-bottom-half/signal/signal.c",
};
|
src/wasi_libc.zig
|
const std = @import("std");
const mem = std.mem;
const ascii = std.ascii;
const testing = std.testing;
const assert = std.debug.assert;
// Percent encode the source data
pub fn encode(dest: []u8, source: []const u8) ![]const u8 {
assert(dest.len >= source.len);
var i: usize = 0;
for (source) |ch| {
if (ascii.isAlNum(ch)) {
dest[i] = ch;
i += 1;
} else if (ch == ' ') {
dest[i] = '+';
i += 1;
} else {
const end = i + 3;
if (end > dest.len) return error.NoSpaceLeft;
dest[i] = '%';
_ = try std.fmt.bufPrint(dest[i + 1 .. end], "{X}", .{ch});
i = end;
}
}
return dest[0..i];
}
test "url-encode" {
var buf: [256]u8 = undefined;
const encoded = try encode(&buf, "hOlmDALJCWWdjzfBV4ZxJPmrdCLWB/tq7Z/" ++
"fp4Q/xXbVPPREuMJMVGzKraTuhhNWxCCwi6yFEZg=");
try testing.expectEqualStrings("hOlmDALJCWWdjzfBV4ZxJPmrdCLWB%2Ftq7Z%2F" ++
"fp4Q%2FxXbVPPREuMJMVGzKraTuhhNWxCCwi6yFEZg%3D", encoded);
}
// Percent decode the source data
pub fn decode(dest: []u8, source: []const u8) ![]const u8 {
assert(dest.len >= source.len);
var i: usize = 0;
var j: usize = 0;
while (i < source.len) : (i += 1) {
const ch = source[i];
switch (ch) {
'%' => {
i += 1;
const end = i + 2;
if (source.len < end) return error.DecodeError;
dest[j] = try std.fmt.parseInt(u8, source[i..end], 16);
i += 1;
},
'+' => {
dest[j] = ' ';
},
else => {
dest[j] = ch;
},
}
j += 1;
}
return dest[0..j];
}
test "url-decode" {
var buf: [256]u8 = undefined;
const decoded = try decode(&buf, "hOlmDALJCWWdjzfBV4ZxJPmrdCLWB%2Ftq7Z%2F" ++
"fp4Q%2FxXbVPPREuMJMVGzKraTuhhNWxCCwi6yFEZg%3D");
try testing.expectEqualStrings("hOlmDALJCWWdjzfBV4ZxJPmrdCLWB/tq7Z/" ++
"fp4Q/xXbVPPREuMJMVGzKraTuhhNWxCCwi6yFEZg=", decoded);
}
// Look for host in a url
//
pub fn findHost(url: []const u8) []const u8 {
var host = url;
if (mem.indexOf(u8, host, "://")) |start| {
host = host[start + 3 ..];
if (mem.indexOf(u8, host, "/")) |end| {
host = host[0..end];
}
}
return host;
}
test "url-find-host" {
const url = "http://localhost:9000";
try testing.expectEqualStrings("localhost:9000", findHost(url));
const url2 = "localhost:9000";
try testing.expectEqualStrings("localhost:9000", findHost(url2));
}
|
src/url.zig
|
const kernel = @import("../../kernel.zig");
const PIC = @import("pic.zig");
const IDT = @import("idt.zig");
const GDT = @import("gdt.zig");
const x86_64 = @import("../x86_64.zig");
const interrupts = @This();
const Context = x86_64.Context;
const TODO = kernel.TODO;
const Thread = kernel.scheduler.Thread;
const Virtual = kernel.Virtual;
const log = kernel.log.scoped(.interrupts);
const Handler = fn () callconv(.Naked) void;
export var idt: IDT = undefined;
pub fn install_interrupt_handlers() void {
idt.add_interrupt_handler(get_handler_descriptor(0, false));
idt.add_interrupt_handler(get_handler_descriptor(1, false));
idt.add_interrupt_handler(get_handler_descriptor(2, false));
idt.add_interrupt_handler(get_handler_descriptor(3, false));
idt.add_interrupt_handler(get_handler_descriptor(4, false));
idt.add_interrupt_handler(get_handler_descriptor(5, false));
idt.add_interrupt_handler(get_handler_descriptor(6, false));
idt.add_interrupt_handler(get_handler_descriptor(7, false));
idt.add_interrupt_handler(get_handler_descriptor(8, true));
idt.add_interrupt_handler(get_handler_descriptor(9, false));
idt.add_interrupt_handler(get_handler_descriptor(10, true));
idt.add_interrupt_handler(get_handler_descriptor(11, true));
idt.add_interrupt_handler(get_handler_descriptor(12, true));
idt.add_interrupt_handler(get_handler_descriptor(13, true));
idt.add_interrupt_handler(get_handler_descriptor(14, true));
idt.add_interrupt_handler(get_handler_descriptor(15, false));
idt.add_interrupt_handler(get_handler_descriptor(16, false));
idt.add_interrupt_handler(get_handler_descriptor(17, true));
idt.add_interrupt_handler(get_handler_descriptor(18, false));
idt.add_interrupt_handler(get_handler_descriptor(19, false));
idt.add_interrupt_handler(get_handler_descriptor(20, false));
idt.add_interrupt_handler(get_handler_descriptor(21, false));
idt.add_interrupt_handler(get_handler_descriptor(22, false));
idt.add_interrupt_handler(get_handler_descriptor(23, false));
idt.add_interrupt_handler(get_handler_descriptor(24, false));
idt.add_interrupt_handler(get_handler_descriptor(25, false));
idt.add_interrupt_handler(get_handler_descriptor(26, false));
idt.add_interrupt_handler(get_handler_descriptor(27, false));
idt.add_interrupt_handler(get_handler_descriptor(28, false));
idt.add_interrupt_handler(get_handler_descriptor(29, false));
idt.add_interrupt_handler(get_handler_descriptor(30, false));
idt.add_interrupt_handler(get_handler_descriptor(31, false));
idt.add_interrupt_handler(get_handler_descriptor(32, false));
idt.add_interrupt_handler(get_handler_descriptor(33, false));
idt.add_interrupt_handler(get_handler_descriptor(34, false));
idt.add_interrupt_handler(get_handler_descriptor(35, false));
idt.add_interrupt_handler(get_handler_descriptor(36, false));
idt.add_interrupt_handler(get_handler_descriptor(37, false));
idt.add_interrupt_handler(get_handler_descriptor(38, false));
idt.add_interrupt_handler(get_handler_descriptor(39, false));
idt.add_interrupt_handler(get_handler_descriptor(40, false));
idt.add_interrupt_handler(get_handler_descriptor(41, false));
idt.add_interrupt_handler(get_handler_descriptor(42, false));
idt.add_interrupt_handler(get_handler_descriptor(43, false));
idt.add_interrupt_handler(get_handler_descriptor(44, false));
idt.add_interrupt_handler(get_handler_descriptor(45, false));
idt.add_interrupt_handler(get_handler_descriptor(46, false));
idt.add_interrupt_handler(get_handler_descriptor(47, false));
idt.add_interrupt_handler(get_handler_descriptor(48, false));
idt.add_interrupt_handler(get_handler_descriptor(49, false));
idt.add_interrupt_handler(get_handler_descriptor(50, false));
idt.add_interrupt_handler(get_handler_descriptor(51, false));
idt.add_interrupt_handler(get_handler_descriptor(52, false));
idt.add_interrupt_handler(get_handler_descriptor(53, false));
idt.add_interrupt_handler(get_handler_descriptor(54, false));
idt.add_interrupt_handler(get_handler_descriptor(55, false));
idt.add_interrupt_handler(get_handler_descriptor(56, false));
idt.add_interrupt_handler(get_handler_descriptor(57, false));
idt.add_interrupt_handler(get_handler_descriptor(58, false));
idt.add_interrupt_handler(get_handler_descriptor(59, false));
idt.add_interrupt_handler(get_handler_descriptor(60, false));
idt.add_interrupt_handler(get_handler_descriptor(61, false));
idt.add_interrupt_handler(get_handler_descriptor(62, false));
idt.add_interrupt_handler(get_handler_descriptor(63, false));
idt.add_interrupt_handler(get_handler_descriptor(64, false));
idt.add_interrupt_handler(get_handler_descriptor(65, false));
idt.add_interrupt_handler(get_handler_descriptor(66, false));
idt.add_interrupt_handler(get_handler_descriptor(67, false));
idt.add_interrupt_handler(get_handler_descriptor(68, false));
idt.add_interrupt_handler(get_handler_descriptor(69, false));
idt.add_interrupt_handler(get_handler_descriptor(70, false));
idt.add_interrupt_handler(get_handler_descriptor(71, false));
idt.add_interrupt_handler(get_handler_descriptor(72, false));
idt.add_interrupt_handler(get_handler_descriptor(73, false));
idt.add_interrupt_handler(get_handler_descriptor(74, false));
idt.add_interrupt_handler(get_handler_descriptor(75, false));
idt.add_interrupt_handler(get_handler_descriptor(76, false));
idt.add_interrupt_handler(get_handler_descriptor(77, false));
idt.add_interrupt_handler(get_handler_descriptor(78, false));
idt.add_interrupt_handler(get_handler_descriptor(79, false));
idt.add_interrupt_handler(get_handler_descriptor(80, false));
idt.add_interrupt_handler(get_handler_descriptor(81, false));
idt.add_interrupt_handler(get_handler_descriptor(82, false));
idt.add_interrupt_handler(get_handler_descriptor(83, false));
idt.add_interrupt_handler(get_handler_descriptor(84, false));
idt.add_interrupt_handler(get_handler_descriptor(85, false));
idt.add_interrupt_handler(get_handler_descriptor(86, false));
idt.add_interrupt_handler(get_handler_descriptor(87, false));
idt.add_interrupt_handler(get_handler_descriptor(88, false));
idt.add_interrupt_handler(get_handler_descriptor(89, false));
idt.add_interrupt_handler(get_handler_descriptor(90, false));
idt.add_interrupt_handler(get_handler_descriptor(91, false));
idt.add_interrupt_handler(get_handler_descriptor(92, false));
idt.add_interrupt_handler(get_handler_descriptor(93, false));
idt.add_interrupt_handler(get_handler_descriptor(94, false));
idt.add_interrupt_handler(get_handler_descriptor(95, false));
idt.add_interrupt_handler(get_handler_descriptor(96, false));
idt.add_interrupt_handler(get_handler_descriptor(97, false));
idt.add_interrupt_handler(get_handler_descriptor(98, false));
idt.add_interrupt_handler(get_handler_descriptor(99, false));
idt.add_interrupt_handler(get_handler_descriptor(100, false));
idt.add_interrupt_handler(get_handler_descriptor(101, false));
idt.add_interrupt_handler(get_handler_descriptor(102, false));
idt.add_interrupt_handler(get_handler_descriptor(103, false));
idt.add_interrupt_handler(get_handler_descriptor(104, false));
idt.add_interrupt_handler(get_handler_descriptor(105, false));
idt.add_interrupt_handler(get_handler_descriptor(106, false));
idt.add_interrupt_handler(get_handler_descriptor(107, false));
idt.add_interrupt_handler(get_handler_descriptor(108, false));
idt.add_interrupt_handler(get_handler_descriptor(109, false));
idt.add_interrupt_handler(get_handler_descriptor(110, false));
idt.add_interrupt_handler(get_handler_descriptor(111, false));
idt.add_interrupt_handler(get_handler_descriptor(112, false));
idt.add_interrupt_handler(get_handler_descriptor(113, false));
idt.add_interrupt_handler(get_handler_descriptor(114, false));
idt.add_interrupt_handler(get_handler_descriptor(115, false));
idt.add_interrupt_handler(get_handler_descriptor(116, false));
idt.add_interrupt_handler(get_handler_descriptor(117, false));
idt.add_interrupt_handler(get_handler_descriptor(118, false));
idt.add_interrupt_handler(get_handler_descriptor(119, false));
idt.add_interrupt_handler(get_handler_descriptor(120, false));
idt.add_interrupt_handler(get_handler_descriptor(121, false));
idt.add_interrupt_handler(get_handler_descriptor(122, false));
idt.add_interrupt_handler(get_handler_descriptor(123, false));
idt.add_interrupt_handler(get_handler_descriptor(124, false));
idt.add_interrupt_handler(get_handler_descriptor(125, false));
idt.add_interrupt_handler(get_handler_descriptor(126, false));
idt.add_interrupt_handler(get_handler_descriptor(127, false));
idt.add_interrupt_handler(get_handler_descriptor(128, false));
idt.add_interrupt_handler(get_handler_descriptor(129, false));
idt.add_interrupt_handler(get_handler_descriptor(130, false));
idt.add_interrupt_handler(get_handler_descriptor(131, false));
idt.add_interrupt_handler(get_handler_descriptor(132, false));
idt.add_interrupt_handler(get_handler_descriptor(133, false));
idt.add_interrupt_handler(get_handler_descriptor(134, false));
idt.add_interrupt_handler(get_handler_descriptor(135, false));
idt.add_interrupt_handler(get_handler_descriptor(136, false));
idt.add_interrupt_handler(get_handler_descriptor(137, false));
idt.add_interrupt_handler(get_handler_descriptor(138, false));
idt.add_interrupt_handler(get_handler_descriptor(139, false));
idt.add_interrupt_handler(get_handler_descriptor(140, false));
idt.add_interrupt_handler(get_handler_descriptor(141, false));
idt.add_interrupt_handler(get_handler_descriptor(142, false));
idt.add_interrupt_handler(get_handler_descriptor(143, false));
idt.add_interrupt_handler(get_handler_descriptor(144, false));
idt.add_interrupt_handler(get_handler_descriptor(145, false));
idt.add_interrupt_handler(get_handler_descriptor(146, false));
idt.add_interrupt_handler(get_handler_descriptor(147, false));
idt.add_interrupt_handler(get_handler_descriptor(148, false));
idt.add_interrupt_handler(get_handler_descriptor(149, false));
idt.add_interrupt_handler(get_handler_descriptor(150, false));
idt.add_interrupt_handler(get_handler_descriptor(151, false));
idt.add_interrupt_handler(get_handler_descriptor(152, false));
idt.add_interrupt_handler(get_handler_descriptor(153, false));
idt.add_interrupt_handler(get_handler_descriptor(154, false));
idt.add_interrupt_handler(get_handler_descriptor(155, false));
idt.add_interrupt_handler(get_handler_descriptor(156, false));
idt.add_interrupt_handler(get_handler_descriptor(157, false));
idt.add_interrupt_handler(get_handler_descriptor(158, false));
idt.add_interrupt_handler(get_handler_descriptor(159, false));
idt.add_interrupt_handler(get_handler_descriptor(160, false));
idt.add_interrupt_handler(get_handler_descriptor(161, false));
idt.add_interrupt_handler(get_handler_descriptor(162, false));
idt.add_interrupt_handler(get_handler_descriptor(163, false));
idt.add_interrupt_handler(get_handler_descriptor(164, false));
idt.add_interrupt_handler(get_handler_descriptor(165, false));
idt.add_interrupt_handler(get_handler_descriptor(166, false));
idt.add_interrupt_handler(get_handler_descriptor(167, false));
idt.add_interrupt_handler(get_handler_descriptor(168, false));
idt.add_interrupt_handler(get_handler_descriptor(169, false));
idt.add_interrupt_handler(get_handler_descriptor(170, false));
idt.add_interrupt_handler(get_handler_descriptor(171, false));
idt.add_interrupt_handler(get_handler_descriptor(172, false));
idt.add_interrupt_handler(get_handler_descriptor(173, false));
idt.add_interrupt_handler(get_handler_descriptor(174, false));
idt.add_interrupt_handler(get_handler_descriptor(175, false));
idt.add_interrupt_handler(get_handler_descriptor(176, false));
idt.add_interrupt_handler(get_handler_descriptor(177, false));
idt.add_interrupt_handler(get_handler_descriptor(178, false));
idt.add_interrupt_handler(get_handler_descriptor(179, false));
idt.add_interrupt_handler(get_handler_descriptor(180, false));
idt.add_interrupt_handler(get_handler_descriptor(181, false));
idt.add_interrupt_handler(get_handler_descriptor(182, false));
idt.add_interrupt_handler(get_handler_descriptor(183, false));
idt.add_interrupt_handler(get_handler_descriptor(184, false));
idt.add_interrupt_handler(get_handler_descriptor(185, false));
idt.add_interrupt_handler(get_handler_descriptor(186, false));
idt.add_interrupt_handler(get_handler_descriptor(187, false));
idt.add_interrupt_handler(get_handler_descriptor(188, false));
idt.add_interrupt_handler(get_handler_descriptor(189, false));
idt.add_interrupt_handler(get_handler_descriptor(190, false));
idt.add_interrupt_handler(get_handler_descriptor(191, false));
idt.add_interrupt_handler(get_handler_descriptor(192, false));
idt.add_interrupt_handler(get_handler_descriptor(193, false));
idt.add_interrupt_handler(get_handler_descriptor(194, false));
idt.add_interrupt_handler(get_handler_descriptor(195, false));
idt.add_interrupt_handler(get_handler_descriptor(196, false));
idt.add_interrupt_handler(get_handler_descriptor(197, false));
idt.add_interrupt_handler(get_handler_descriptor(198, false));
idt.add_interrupt_handler(get_handler_descriptor(199, false));
idt.add_interrupt_handler(get_handler_descriptor(200, false));
idt.add_interrupt_handler(get_handler_descriptor(201, false));
idt.add_interrupt_handler(get_handler_descriptor(202, false));
idt.add_interrupt_handler(get_handler_descriptor(203, false));
idt.add_interrupt_handler(get_handler_descriptor(204, false));
idt.add_interrupt_handler(get_handler_descriptor(205, false));
idt.add_interrupt_handler(get_handler_descriptor(206, false));
idt.add_interrupt_handler(get_handler_descriptor(207, false));
idt.add_interrupt_handler(get_handler_descriptor(208, false));
idt.add_interrupt_handler(get_handler_descriptor(209, false));
idt.add_interrupt_handler(get_handler_descriptor(210, false));
idt.add_interrupt_handler(get_handler_descriptor(211, false));
idt.add_interrupt_handler(get_handler_descriptor(212, false));
idt.add_interrupt_handler(get_handler_descriptor(213, false));
idt.add_interrupt_handler(get_handler_descriptor(214, false));
idt.add_interrupt_handler(get_handler_descriptor(215, false));
idt.add_interrupt_handler(get_handler_descriptor(216, false));
idt.add_interrupt_handler(get_handler_descriptor(217, false));
idt.add_interrupt_handler(get_handler_descriptor(218, false));
idt.add_interrupt_handler(get_handler_descriptor(219, false));
idt.add_interrupt_handler(get_handler_descriptor(220, false));
idt.add_interrupt_handler(get_handler_descriptor(221, false));
idt.add_interrupt_handler(get_handler_descriptor(222, false));
idt.add_interrupt_handler(get_handler_descriptor(223, false));
idt.add_interrupt_handler(get_handler_descriptor(224, false));
idt.add_interrupt_handler(get_handler_descriptor(225, false));
idt.add_interrupt_handler(get_handler_descriptor(226, false));
idt.add_interrupt_handler(get_handler_descriptor(227, false));
idt.add_interrupt_handler(get_handler_descriptor(228, false));
idt.add_interrupt_handler(get_handler_descriptor(229, false));
idt.add_interrupt_handler(get_handler_descriptor(230, false));
idt.add_interrupt_handler(get_handler_descriptor(231, false));
idt.add_interrupt_handler(get_handler_descriptor(232, false));
idt.add_interrupt_handler(get_handler_descriptor(233, false));
idt.add_interrupt_handler(get_handler_descriptor(234, false));
idt.add_interrupt_handler(get_handler_descriptor(235, false));
idt.add_interrupt_handler(get_handler_descriptor(236, false));
idt.add_interrupt_handler(get_handler_descriptor(237, false));
idt.add_interrupt_handler(get_handler_descriptor(238, false));
idt.add_interrupt_handler(get_handler_descriptor(239, false));
idt.add_interrupt_handler(get_handler_descriptor(240, false));
idt.add_interrupt_handler(get_handler_descriptor(241, false));
idt.add_interrupt_handler(get_handler_descriptor(242, false));
idt.add_interrupt_handler(get_handler_descriptor(243, false));
idt.add_interrupt_handler(get_handler_descriptor(244, false));
idt.add_interrupt_handler(get_handler_descriptor(245, false));
idt.add_interrupt_handler(get_handler_descriptor(246, false));
idt.add_interrupt_handler(get_handler_descriptor(247, false));
idt.add_interrupt_handler(get_handler_descriptor(248, false));
idt.add_interrupt_handler(get_handler_descriptor(249, false));
idt.add_interrupt_handler(get_handler_descriptor(250, false));
idt.add_interrupt_handler(get_handler_descriptor(251, false));
idt.add_interrupt_handler(get_handler_descriptor(252, false));
idt.add_interrupt_handler(get_handler_descriptor(253, false));
idt.add_interrupt_handler(get_handler_descriptor(254, false));
idt.add_interrupt_handler(get_handler_descriptor(255, false));
}
pub fn init() void {
// Initialize interrupts
log.debug("Initializing interrupts", .{});
PIC.disable();
install_interrupt_handlers();
log.debug("Installed interrupt handlers", .{});
idt.load();
log.debug("Loaded IDT", .{});
x86_64.enable_interrupts();
log.debug("Enabled interrupts", .{});
}
const Exception = enum(u5) {
divide_by_zero = 0x00,
debug = 0x01,
non_maskable_interrupt = 0x2,
breakpoint = 0x03,
overflow = 0x04,
bound_range_exceeded = 0x05,
invalid_opcode = 0x06,
device_not_available = 0x07,
double_fault = 0x08,
coprocessor_segment_overrun = 0x09,
invalid_tss = 0x0a,
segment_not_present = 0x0b,
stack_segment_fault = 0x0c,
general_protection_fault = 0x0d,
page_fault = 0x0e,
x87_floating_point_exception = 0x10,
alignment_check = 0x11,
machine_check = 0x12,
simd_floating_point_exception = 0x13,
virtualization_exception = 0x14,
control_protection_exception = 0x15,
hypervisor_injection_exception = 0x1c,
vmm_communication_exception = 0x1d,
security_exception = 0x1e,
};
const PageFaultErrorCode = kernel.Bitflag(false, enum(u64) {
present = 0,
write = 1,
user = 2,
reserved_write = 3,
instruction_fetch = 4,
protection_key = 5,
shadow_stack = 6,
software_guard_extensions = 15,
});
export fn interrupt_handler(context: *Context) align(0x10) callconv(.C) void {
log.debug("Getting 0x{x}", .{context.interrupt_number});
if (x86_64.are_interrupts_enabled()) {
@panic("interrupts are enabled");
}
if (x86_64.get_current_cpu()) |current_cpu| {
if (current_cpu.spinlock_count != 0 and context.cr8 != 0xe) {
@panic("spinlock count bug");
}
}
switch (context.interrupt_number) {
0x0...0x19 => {
context.debug();
const exception = @intToEnum(Exception, context.interrupt_number);
const usermode = context.cs & 3 != 0;
if (usermode) {
@panic("usermode not implemented yet");
} else {
if (context.cs != @offsetOf(GDT.Table, "code_64")) @panic("invalid cs");
switch (exception) {
.page_fault => {
const error_code = PageFaultErrorCode.from_bits(@intCast(u16, context.error_code));
const page_fault_address = x86_64.cr2.read();
log.debug("Page fault address: 0x{x}. Error code: {}", .{ page_fault_address, error_code });
if (error_code.contains(.reserved_write)) {
@panic("reserved write");
}
log.debug("why are we here", .{});
if (true) unreachable;
x86_64.disable_interrupts();
},
else => @panic("ni"),
}
log.debug("Exception: {s}", .{@tagName(exception)});
}
},
0x40 => {
kernel.scheduler.yield(context);
},
0x80 => {
log.debug("We are getting a syscall", .{});
context.debug();
unreachable;
},
else => {
log.debug("whaaaaaat", .{});
},
}
context.check(@src());
if (x86_64.are_interrupts_enabled()) {
@panic("interrupts should not be enabled");
}
}
const std = @import("std");
inline fn prologue() void {
asm volatile (
\\cld
\\push %%rax
\\push %%rbx
\\push %%rcx
\\push %%rdx
\\push %%rdi
\\push %%rsi
\\push %%rbp
\\push %%r8
\\push %%r9
\\push %%r10
\\push %%r11
\\push %%r12
\\push %%r13
\\push %%r14
\\push %%r15
\\xor %%rax, %%rax
\\mov %%ds, %%rax
\\push %% rax
\\mov %%es, %%rax
\\push %%rax
\\mov %%cr8, %%rax
\\push %%rax
\\mov %%rsp, %%rdi
);
}
pub fn get_handler(comptime interrupt_number: u64, comptime has_error_code: bool) fn handler() align(0x10) callconv(.Naked) void {
return struct {
pub fn handler() align(0x10) callconv(.Naked) void {
if (comptime !has_error_code) asm volatile ("push $0");
asm volatile ("push %[interrupt_number]"
:
: [interrupt_number] "i" (interrupt_number),
);
prologue();
asm volatile ("call interrupt_handler");
epilogue();
@panic("Interrupt epilogue didn't iret properly");
}
}.handler;
}
pub fn get_handler_descriptor(comptime interrupt_number: u64, comptime has_error_code: bool) IDT.Descriptor {
kernel.assert(@src(), interrupt_number == IDT.interrupt_i);
const handler_function = get_handler(interrupt_number, has_error_code);
const handler_address = @ptrToInt(handler_function);
return IDT.Descriptor{
.offset_low = @truncate(u16, handler_address),
.offset_mid = @truncate(u16, handler_address >> 16),
.offset_high = @truncate(u32, handler_address >> 32),
.segment_selector = @offsetOf(GDT.Table, "code_64"), // @TODO: this should change as the GDT selector changes
.interrupt_stack_table = 0,
.type = .interrupt,
.descriptor_privilege_level = 0,
.present = 1,
};
}
pub inline fn epilogue() void {
asm volatile (
\\cli
\\pop %%rax
\\mov %%rax, %%cr8
\\pop %%rax
\\mov %%rax, %%es
\\pop %%rax
\\mov %%rax, %%ds
\\pop %%r15
\\pop %%r14
\\pop %%r13
\\pop %%r12
\\pop %%r11
\\pop %%r10
\\pop %%r9
\\pop %%r8
\\pop %%rbp
\\pop %%rsi
\\pop %%rdi
\\pop %%rdx
\\pop %%rcx
\\pop %%rbx
\\pop %%rax
\\add $0x10, %%rsp
\\iretq
);
}
|
src/kernel/arch/x86_64/interrupts.zig
|
const expect = std.testing.expect;
const expectEqualSlices = std.testing.expectEqualSlices;
const expectError = std.testing.expectError;
const std = @import("std");
const mqtt_string = @import("../../mqtt_string.zig");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const FixedHeader = @import("../packet.zig").Packet.FixedHeader;
const QoS = @import("../../qos.zig").QoS;
pub const Unsubscribe = struct {
packet_id: u16,
topic_filters: [][]const u8,
pub const ParseError = error{
EmptyTopicFilters,
};
pub fn parse(fixed_header: FixedHeader, allocator: *Allocator, inner_reader: anytype) !Unsubscribe {
// Hold this so we can query remaining bytes
var limited_reader = std.io.limitedReader(inner_reader, fixed_header.remaining_length);
const reader = limited_reader.reader();
const packet_id = try reader.readIntBig(u16);
var topic_filters = ArrayList([]const u8).init(allocator);
errdefer topic_filters.deinit();
while (limited_reader.bytes_left > 0) {
// If we fail at any step, cleanup all that was allocated until now
errdefer {
for (topic_filters.items) |t| {
allocator.free(t);
}
}
var topic_filter = try mqtt_string.read(allocator, reader);
errdefer allocator.free(topic_filter);
try topic_filters.append(topic_filter);
}
if (topic_filters.items.len == 0) {
return error.EmptyTopicFilters;
}
return Unsubscribe{
.packet_id = packet_id,
.topic_filters = topic_filters.toOwnedSlice(),
};
}
pub fn serialize(self: Unsubscribe, writer: anytype) !void {
try writer.writeIntBig(u16, self.packet_id);
for (self.topic_filters) |topic_filter| {
try mqtt_string.write(topic_filter, writer);
}
}
pub fn serializedLength(self: Unsubscribe) u32 {
var length: u32 = comptime @sizeOf(@TypeOf(self.packet_id));
for (self.topic_filters) |topic_filter| {
length += mqtt_string.serializedLength(topic_filter);
}
return length;
}
pub fn fixedHeaderFlags(self: Unsubscribe) u4 {
return 0b0000;
}
pub fn deinit(self: *Unsubscribe, allocator: *Allocator) void {
for (self.topic_filters) |topic_filter| {
allocator.free(topic_filter);
}
allocator.free(self.topic_filters);
}
};
test "Unsubscribe payload parsing" {
const allocator = std.testing.allocator;
const buffer =
// Packet id, 3
"\x00\x03" ++
// Topic filter length, 7
"\x00\x07" ++
// Topic filter
"foo/bar" ++
// Topic filter length, 5
"\x00\x05" ++
// Topic filter
"baz/#";
const stream = std.io.fixedBufferStream(buffer).reader();
const PacketType = @import("../packet.zig").PacketType;
const fixed_header = FixedHeader{
.packet_type = PacketType.unsubscribe,
.flags = 0b0010,
.remaining_length = @intCast(u32, buffer.len),
};
var unsubscribe = try Unsubscribe.parse(fixed_header, allocator, stream);
defer unsubscribe.deinit(allocator);
try expect(unsubscribe.packet_id == 3);
try expect(unsubscribe.topic_filters.len == 2);
try expectEqualSlices(u8, unsubscribe.topic_filters[0], "foo/bar");
try expectEqualSlices(u8, unsubscribe.topic_filters[1], "baz/#");
}
test "Unsubscribe parsing fails with no topic_filters" {
const allocator = std.testing.allocator;
const buffer =
// Packet id, 3
"\x00\x03";
const stream = std.io.fixedBufferStream(buffer).reader();
const PacketType = @import("../packet.zig").PacketType;
const fixed_header = FixedHeader{
.packet_type = PacketType.unsubscribe,
.flags = 0b0010,
.remaining_length = @intCast(u32, buffer.len),
};
const result = Unsubscribe.parse(fixed_header, allocator, stream);
try expectError(error.EmptyTopicFilters, result);
}
test "Unsubscribe parsing with error doesn't leak" {
const allocator = std.testing.allocator;
const buffer =
// Packet id, 3
"\x00\x03" ++
// Topic filter length, 7
"\x00\x07" ++
// Topic filter
"foo/bar" ++
// Topic filter length, 9
"\x00\x09" ++
// Topic filter, shorter
"a/b";
const stream = std.io.fixedBufferStream(buffer).reader();
const PacketType = @import("../packet.zig").PacketType;
const fixed_header = FixedHeader{
.packet_type = PacketType.unsubscribe,
.flags = 0b0010,
.remaining_length = @intCast(u32, buffer.len),
};
const result = Unsubscribe.parse(fixed_header, allocator, stream);
try expectError(error.EndOfStream, result);
}
test "serialize/parse roundtrip" {
const allocator = std.testing.allocator;
var topic_filters = ArrayList([]const u8).init(allocator);
try topic_filters.append("foo/#");
try topic_filters.append("bar/baz/+");
var topic_filters_slice = topic_filters.toOwnedSlice();
defer allocator.free(topic_filters_slice);
var unsubscribe = Unsubscribe{
.packet_id = 42,
.topic_filters = topic_filters_slice,
};
var buffer = [_]u8{0} ** 100;
var stream = std.io.fixedBufferStream(&buffer);
var writer = stream.writer();
try unsubscribe.serialize(writer);
const written = try stream.getPos();
stream.reset();
const reader = stream.reader();
const PacketType = @import("../packet.zig").PacketType;
const fixed_header = FixedHeader{
.packet_type = PacketType.unsubscribe,
.flags = 0b0010,
.remaining_length = @intCast(u32, written),
};
var deser_unsubscribe = try Unsubscribe.parse(fixed_header, allocator, reader);
defer deser_unsubscribe.deinit(allocator);
try expect(unsubscribe.packet_id == deser_unsubscribe.packet_id);
try expect(unsubscribe.topic_filters.len == deser_unsubscribe.topic_filters.len);
try expectEqualSlices(u8, unsubscribe.topic_filters[0], deser_unsubscribe.topic_filters[0]);
try expectEqualSlices(u8, unsubscribe.topic_filters[1], deser_unsubscribe.topic_filters[1]);
}
|
src/mqtt4/packet/unsubscribe.zig
|
const std = @import("std");
const trie = @import("trie.zig");
const Request = @import("request.zig").Request;
const Response = @import("response.zig").Response;
const RequestHandler = @import("server.zig").RequestHandler;
/// Contains a path and a handler function that
pub const Route = struct {
/// Path by which the route is triggered
path: []const u8,
/// The handler function that will be called when triggered
handler: anytype,
/// http method
method: Request.Method,
};
/// Generic function that inserts each route's path into a radix tree
/// to retrieve the right route when a request has been made
pub fn router(comptime routes: []const Route) RequestHandler {
comptime var trees: [10]trie.Trie(u8) = undefined;
inline for (trees) |*t| t.* = trie.Trie(u8){};
inline for (routes) |r, i| {
if (@typeInfo(@TypeOf(r.handler)) != .Fn) @compileError("Handler must be a function");
const args = @typeInfo(@TypeOf(r.handler)).Fn.args;
if (args.len < 2) @compileError("Handler must have atleast 2 arguments");
if (args[0].arg_type.? != *Response) @compileError("First parameter must be of type " ++ @typeName(*Response));
if (args[1].arg_type.? != Request) @compileError("Second parameter must be of type " ++ @typeName(Request));
trees[@enumToInt(r.method)].insert(r.path, i);
}
return struct {
fn handle(
comptime route: Route,
params: []const trie.Entry,
res: *Response,
req: Request,
) !void {
const Fn = @typeInfo(@TypeOf(route.handler)).Fn;
const args = Fn.args;
if (args.len == 2) return route.handler(res, req);
comptime const ArgType = args[2].arg_type orelse return route.handler(res, req, {});
const param: ArgType = switch (ArgType) {
[]const u8 => if (params.len > 0) params[0].value else &[_]u8{},
?[]const u8 => if (params.len > 0) params[0].value else null,
else => switch (@typeInfo(ArgType)) {
.Struct => |info| blk: {
var new_struct: ArgType = undefined;
inline for (info.fields) |field| {
for (params) |p| {
if (std.mem.eql(u8, field.name, p.key)) {
const FieldType = @TypeOf(@field(new_struct, field.name));
@field(new_struct, field.name) = switch (FieldType) {
[]const u8, ?[]const u8 => p.value,
else => switch (@typeInfo(FieldType)) {
.Int => std.fmt.parseInt(FieldType, p.value, 10) catch 0,
.Optional => |child| if (@typeInfo(child) == .Int)
std.fmt.parseInt(FieldType, p.value, 10) catch null
else
@compileError("Unsupported optional type " ++ @typeName(child)),
else => @compileError("Unsupported type " ++ @typeName(FieldType)),
},
};
}
}
}
break :blk new_struct;
},
.Int => std.fmt.parseInt(ArgType, params[0].value, 10) catch 0,
.Optional => |child| if (@typeInfo(child) == .Int)
std.fmt.parseInt(ArgType, params[0].value, 10) catch null
else
@compileError("Unsupported optional type " ++ @typeName(child)),
else => @compileError("Unsupported type " ++ @typeName(ArgType)),
},
};
return route.handler(res, req, param);
}
fn serve(response: *Response, request: Request) !void {
switch (trees[@enumToInt(request.method)].get(request.url.path)) {
.none => {
// if nothing was found for current method, try the wildcard
switch (trees[9].get(request.url.path)) {
.none => return response.notFound(),
.static => |index| {
inline for (routes) |route, i|
if (index == i) return handle(route, &.{}, response, request);
},
.with_params => |object| {
inline for (routes) |route, i| {
if (object.data == i)
return handle(route, object.params[0..object.param_count], response, request);
}
},
}
},
.static => |index| {
inline for (routes) |route, i| {
if (index == i) return handle(route, &.{}, response, request);
}
},
.with_params => |object| {
inline for (routes) |route, i| {
if (object.data == i)
return handle(route, object.params[0..object.param_count], response, request);
}
},
}
}
}.serve;
}
/// Creates a new `Route` for the given HTTP Method that will be
/// triggered based on its path conditions
/// the `handler` function must have atleast 2 arguments where
/// @TypeOf(arg[0]) == *Response
/// @TypeOf(arg[1]) == Request
///
/// It's allowed to provide a 3rd argument if path contains parameters such as ':<name>'
/// The caught parameters will be parsed into the type of the argument
pub fn handle(
comptime method: Request.Method,
comptime path: []const u8,
comptime handler: anytype,
) Route {
return Route{
.path = path,
.handler = handler,
.method = method,
};
}
/// Shorthand function to create a `Route` where method is 'GET'
pub fn get(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.get, path, handler);
}
/// Shorthand function to create a `Route` where method is 'POST'
pub fn post(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.post, path, handler);
}
/// Shorthand function to create a `Route` where method is 'PATCH'
pub fn patch(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.patch, path, handler);
}
/// Shorthand function to create a `Route` where method is 'PUT'
pub fn put(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.put, path, handler);
}
/// Shorthand function to create a `Route` where method is 'HEAD'
pub fn head(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.head, path, handler);
}
/// Shorthand function to create a `Route` where method is 'DELETE'
pub fn delete(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.delete, path, handler);
}
/// Shorthand function to create a `Route` where method is 'CONNECT'
pub fn connect(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.connect, path, handler);
}
/// Shorthand function to create a `Route` where method is 'OPTIONS'
pub fn options(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.options, path, handler);
}
/// Shorthand function to create a `Route` where method is 'TRACE'
pub fn trace(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.trace, path, handler);
}
/// Shorthand function to create a `Route` which will be matched to any
/// request method. It is still recommended to use the other specific methods
pub fn any(comptime path: []const u8, comptime handler: anytype) Route {
return handle(.any, path, handler);
}
|
src/router.zig
|
const std = @import("std");
const NOPS = 16; // number of instructions
////////处理寄存器的函数
inline fn OPC(i: u16) u16 {
return i >> 12;
}
inline fn FCND(i: u16) u16 {
return i >> 9 & 0x7;
}
// Gets the 5th bit of i
// We shift to the right with 5 bits so we can have bit on the last position
inline fn FIMM(i: u16) u16 {
return i >> 5 & 1;
}
// Get DR
inline fn DR(i: u16) u16 {
return i >> 9 & 0x7;
}
inline fn SR1(i: u16) u16 {
return i >> 6 & 0x7;
}
inline fn SR2(i: u16) u16 {
return i & 0x7;
}
inline fn IMM(i: u16) u16 {
return i & 0x1F;
}
inline fn SEXTIMM(i: u16) u16 {
return sext(IMM(i), 5);
}
/// As a convention, we should start loading programs
/// into the main memory from 0x3000 onwards.
const PC_START = 0x3000;
/// MAIN MEMOERY
var mem: [std.math.maxInt(u16)]u16 = undefined;
/// Registers Types
/// R0 is a general-purpose register
/// We are going to also use it for reading/writing data from/to stdin/stdout;
/// R1, R2,..R7 are general purpose registers;
/// RPC is the program counter register.It contains
/// the memory address of the next instruction we will execute.
/// RCND is the conditional register.
/// The conditional flag gives us information about the
/// previous operation that happened at ALU level in the CPU.
/// RCNT is the register count
/// to access a register, we simply: reg[@enumToInt(.R0)]
const regist = enum(u8) { R0 = 0, R1, R2, R3, R4, R5, R6, R7, RPC, RCND, RCNT };
/// Register
var reg: [@enumToInt(regist.RCNT)]u16 = undefined;
/// RCND寄存器标志位
const flags = enum(u8) { FP = 1 << 0, FZ = 1 << 1, FN = 1 << 2 };
/// RCND赋值操作
fn uf(r: u16) void {
if (reg[r] == 0) {
reg[@enumToInt(regist.RCND)] = @enumToInt(flags.FZ); // the value in r is zero
} else if ((reg[r] >> 15) > 0) {
reg[@enumToInt(regist.RCND)] = @enumToInt(flags.FN); // the value in r is z negative number
} else {
reg[@enumToInt(regist.RCND)] = @enumToInt(flags.FP); // the value in r is a positive number
}
}
/// read from main memory
inline fn mr(address: u16) u16 {
return mem[address];
}
/// write from main memory
inline fn mw(address: u16, val: u16) void {
mem[address] = val;
}
/// OpCode functions
const op_ex_f = fn (i: u16) void;
fn add(i: u16) void {
reg[DR(i)] = reg[SR1(i)] +
// If the 5th bit is 1
// we sign extend IMM5 and we add it to SR1 (add2)
// else we add the value of SR2 to SR1 (add1)
if (FIMM(i) > 0) SEXTIMM(i) else reg[SR2(i)];
uf(DR(i));
}
fn and_(i: u16) void {
reg[DR(i)] = reg[SR1(i)] &
// If the 5th bit is 1
// we sign extend IMM5 and we add it to SR1 (add2)
// else we add the value of SR2 to SR1 (add1)
(if (FIMM(i) > 0) SEXTIMM(i) else reg[SR2(i)]);
uf(DR(i));
}
fn br(i: u16) void {
if ((reg[@enumToInt(regist.RCND)] & FCND(i)) > 0) {
reg[@enumToInt(regist.RPC)] += 1;
}
}
const op_ex = [NOPS]op_ex_f{ br, add };
fn sext(n: u16, b: comptime_int) u16 {
return if ((n >> (b - 1) & 1) > 0) n | (0xFFFF << b) else n;
}
//////////////////////////////////////////////
fn test_op_ex_f(i: u16) void {
std.debug.print("...{d}....", .{i});
}
test "max u16" {
try std.testing.expect(std.math.maxInt(u16) == 65535);
try std.testing.expect(reg.len == 10);
var arr = [_]u16{ 1, 2 };
arr[1] += 1;
try std.testing.expect(arr[1] == 3);
const op_ex_test = [_]op_ex_f{test_op_ex_f};
op_ex_test[0](17);
}
|
src/vm.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const terminfo = @import("terminfo.zig");
const enter_mouse_seq = "\x1b[?1000h\x1b[?1002h\x1b[?1015h\x1b[?1006h";
const exit_mouse_seq = "\x1b[?1006l\x1b[?1015l\x1b[?1002l\x1b[?1000l";
const ti_magic = 0432;
const ti_alt_magic = 542;
const ti_header_length = 12;
const ti_funcs = [_]i16{
28, 40, 16, 13, 5, 39, 36, 27, 26, 34, 89, 88,
};
const ti_keys = [_]i16{
66, 68, 69, 70, 71, 72, 73, 74, 75, 67, 216, 217, 77, 59, 76, 164, 82, 81,
87, 61, 79, 83,
};
const t_keys_num = 22;
const t_funcs_num = 14;
pub const TermFunc = enum {
EnterCa,
ExitCa,
ShowCursor,
HideCursor,
ClearScreen,
Sgr0,
Underline,
Bold,
Blink,
Reverse,
EnterKeypad,
ExitKeypad,
EnterMouse,
ExitMouse,
};
pub const TermFuncs = struct {
allocator: ?*Allocator,
data: [t_funcs_num][]const u8,
const Self = @This();
pub fn deinit(self: *Self) void {
if (self.allocator) |a| {
for (self.data) |x| a.free(x);
}
}
pub fn get(self: Self, x: TermFunc) []const u8 {
return self.data[@enumToInt(x)];
}
};
pub const TermKeys = struct {
allocator: ?*Allocator,
data: [t_keys_num][]const u8,
const Self = @This();
pub fn deinit(self: *Self) void {
if (self.allocator) |a| {
for (self.data) |x| a.free(x);
}
}
};
pub const Term = struct {
name: []const u8,
keys: TermKeys,
funcs: TermFuncs,
const Self = @This();
fn tryCompatible(term: []const u8, name: []const u8, keys: TermKeys, funcs: TermFuncs) ?Self {
if (std.mem.indexOf(u8, term, name)) |_| {
return Self{
.name = name,
.keys = keys,
.funcs = funcs,
};
} else {
return null;
}
}
fn initTermBuiltin() !Self {
const term = std.os.getenv("TERM") orelse return error.UnsupportedTerm;
for (terms) |t| {
if (std.mem.eql(u8, term, t.name)) return t;
}
// Trying some heuristics
if (Self.tryCompatible(term, "xterm", xterm_keys, xterm_funcs)) |r| return r;
if (Self.tryCompatible(term, "rxvt", rxvt_unicode_keys, rxvt_unicode_funcs)) |r| return r;
if (Self.tryCompatible(term, "linux", linux_keys, linux_funcs)) |r| return r;
if (Self.tryCompatible(term, "Eterm", eterm_keys, eterm_funcs)) |r| return r;
if (Self.tryCompatible(term, "screen", screen_keys, screen_funcs)) |r| return r;
if (Self.tryCompatible(term, "cygwin", xterm_keys, xterm_funcs)) |r| return r;
return error.UnsupportedTerm;
}
pub fn initTerm(allocator: *Allocator) !Self {
const data = (try terminfo.loadTerminfo(allocator)) orelse return initTermBuiltin();
defer allocator.free(data);
var result: Self = Self{
.name = "",
.keys = TermKeys{
.allocator = allocator,
.data = undefined,
},
.funcs = TermFuncs{
.allocator = allocator,
.data = undefined,
},
};
const header_0 = std.mem.readIntNative(i16, data[0..2]);
const header_1 = std.mem.readIntNative(i16, data[2..4]);
var header_2 = std.mem.readIntNative(i16, data[4..6]);
const header_3 = std.mem.readIntNative(i16, data[6..8]);
const header_4 = std.mem.readIntNative(i16, data[8..10]);
const number_sec_len = if (header_0 == ti_alt_magic) @as(i16, 4) else 2;
if (@mod(header_1 + header_2, 2) != 0) {
header_2 += 1;
}
const str_offset = ti_header_length + header_1 + header_2 + number_sec_len * header_3;
const table_offset = str_offset + 2 * header_4;
// Keys
for (result.keys.data) |*x, i| {
x.* = try terminfo.copyString(allocator, data, str_offset + 2 * ti_keys[i], table_offset);
}
// Functions
for (result.funcs.data[0 .. t_funcs_num - 2]) |*x, i| {
x.* = try terminfo.copyString(allocator, data, str_offset + 2 * ti_funcs[i], table_offset);
}
result.funcs.data[t_funcs_num - 2] = try allocator.dupe(u8, enter_mouse_seq);
result.funcs.data[t_funcs_num - 1] = try allocator.dupe(u8, exit_mouse_seq);
return result;
}
pub fn deinit(self: *Self) void {
self.keys.deinit();
self.funcs.deinit();
}
};
const rxvt_256color_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1B[11~", "\x1B[12~", "\x1B[13~", "\x1B[14~", "\x1B[15~", "\x1B[17~",
"\x1B[18~", "\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~",
"\x1B[2~", "\x1B[3~", "\x1B[7~", "\x1B[8~", "\x1B[5~", "\x1B[6~",
"\x1B[A", "\x1B[B", "\x1B[D", "\x1B[C",
},
};
const rxvt_256color_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"\x1B7\x1B[?47h", "\x1B[2J\x1B[?47l\x1B8",
"\x1B[?25h", "\x1B[?25l",
"\x1B[H\x1B[2J", "\x1B[m",
"\x1B[4m", "\x1B[1m",
"\x1B[5m", "\x1B[7m",
"\x1B=", "\x1B>",
enter_mouse_seq, exit_mouse_seq,
},
};
const eterm_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1B[11~", "\x1B[12~", "\x1B[13~", "\x1B[14~", "\x1B[15~", "\x1B[17~",
"\x1B[18~", "\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~",
"\x1B[2~", "\x1B[3~", "\x1B[7~", "\x1B[8~", "\x1B[5~", "\x1B[6~",
"\x1B[A", "\x1B[B", "\x1B[D", "\x1B[C",
},
};
const eterm_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"\x1B7\x1B[?47h", "\x1B[2J\x1B[?47l\x1B8", "\x1B[?25h", "\x1B[?25l",
"\x1B[H\x1B[2J", "\x1B[m", "\x1B[4m", "\x1B[1m",
"\x1B[5m", "\x1B[7m", "", "",
"", "",
},
};
const screen_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1BOP", "\x1BOQ", "\x1BOR", "\x1BOS", "\x1B[15~", "\x1B[17~",
"\x1B[18~", "\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~",
"\x1B[2~", "\x1B[3~", "\x1B[1~", "\x1B[4~", "\x1B[5~", "\x1B[6~",
"\x1BOA", "\x1BOB", "\x1BOD", "\x1BOC",
},
};
const screen_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"\x1B[?1049h", "\x1B[?1049l", "\x1B[34h\x1B[?25h", "\x1B[?25l",
"\x1B[H\x1B[J", "\x1B[m", "\x1B[4m", "\x1B[1m",
"\x1B[5m", "\x1B[7m", "\x1B[?1h\x1B=", "\x1B[?1l\x1B>",
enter_mouse_seq, exit_mouse_seq,
},
};
const rxvt_unicode_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1B[11~", "\x1B[12~", "\x1B[13~", "\x1B[14~", "\x1B[15~", "\x1B[17~",
"\x1B[18~", "\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~",
"\x1B[2~", "\x1B[3~", "\x1B[7~", "\x1B[8~", "\x1B[5~", "\x1B[6~",
"\x1B[A", "\x1B[B", "\x1B[D", "\x1B[C",
},
};
const rxvt_unicode_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"\x1B[?1049h", "\x1B[r\x1B[?1049l", "\x1B[?25h", "\x1B[?25l",
"\x1B[H\x1B[2J", "\x1B[m\x1B(B", "\x1B[4m", "\x1B[1m",
"\x1B[5m", "\x1B[7m", "\x1B=", "\x1B>",
enter_mouse_seq, exit_mouse_seq,
},
};
const linux_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1B[[A", "\x1B[[B", "\x1B[[C", "\x1B[[D", "\x1B[[E", "\x1B[17~",
"\x1B[18~", "\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~",
"\x1B[2~", "\x1B[3~", "\x1B[1~", "\x1B[4~", "\x1B[5~", "\x1B[6~",
"\x1B[A", "\x1B[B", "\x1B[D", "\x1B[C",
},
};
const linux_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"", "", "\x1B[?25h\x1B[?0c", "\x1B[?25l\x1B[?1c", "\x1B[H\x1B[J",
"\x1B[0;10m", "\x1B[4m", "\x1B[1m", "\x1B[5m", "\x1B[7m",
"", "", "", "",
},
};
const xterm_keys = TermKeys{
.allocator = null,
.data = [_][]const u8{
"\x1BOP", "\x1BOQ", "\x1BOR", "\x1BOS", "\x1B[15~", "\x1B[17~", "\x1B[18~",
"\x1B[19~", "\x1B[20~", "\x1B[21~", "\x1B[23~", "\x1B[24~", "\x1B[2~", "\x1B[3~",
"\x1BOH", "\x1BOF", "\x1B[5~", "\x1B[6~", "\x1BOA", "\x1BOB", "\x1BOD",
"\x1BOC",
},
};
const xterm_funcs = TermFuncs{
.allocator = null,
.data = [_][]const u8{
"\x1B[?1049h", "\x1B[?1049l", "\x1B[?12l\x1B[?25h", "\x1B[?25l",
"\x1B[H\x1B[2J", "\x1B(B\x1B[m", "\x1B[4m", "\x1B[1m",
"\x1B[5m", "\x1B[7m", "\x1B[?1h\x1B=", "\x1B[?1l\x1B>",
enter_mouse_seq, exit_mouse_seq,
},
};
const terms = [_]Term{
.{ .name = "rxvt-256color", .keys = rxvt_256color_keys, .funcs = rxvt_256color_funcs },
.{ .name = "Eterm", .keys = eterm_keys, .funcs = eterm_funcs },
.{ .name = "screen", .keys = screen_keys, .funcs = screen_funcs },
.{ .name = "rxvt-unicode", .keys = rxvt_unicode_keys, .funcs = rxvt_unicode_funcs },
.{ .name = "linux", .keys = linux_keys, .funcs = linux_funcs },
.{ .name = "xterm", .keys = xterm_keys, .funcs = xterm_funcs },
};
|
src/term.zig
|
const std = @import("std");
// Force a load of a value. This is useful in particular to avoid branches being optimized
// out at compile-time for to force error-inference on otherwise empty error functions.
pub fn forceRuntime(comptime T: type, n: T) T {
var p = @intToPtr(*volatile T, @ptrToInt(&n));
return p.*;
}
pub fn BitSet(comptime T: type) type {
inline for (@typeInfo(T).Enum.fields) |field| {
std.debug.assert(@popCount(@TagType(T), field.value) == 1);
}
return struct {
const Self = @This();
pub const Type = @TagType(T);
raw: Type,
pub fn init() Self {
return Self{ .raw = 0 };
}
pub fn initRaw(raw: Type) Self {
return Self{ .raw = raw };
}
pub fn set(self: *Self, flag: T) void {
self.raw |= @enumToInt(flag);
}
pub fn clear(self: *Self, flag: T) void {
self.raw &= ~@enumToInt(flag);
}
pub fn get(self: Self, flag: T) bool {
return self.raw & @enumToInt(flag) != 0;
}
pub fn count(self: Self) u8 {
return @popCount(self.raw);
}
};
}
pub fn FixedQueue(comptime T: type, comptime max_length: usize) type {
return struct {
const Self = @This();
head: usize,
length: usize,
buffer: [max_length]T,
// Caller must call `insert` length times to seed with valid data.
pub fn init(length: usize) Self {
std.debug.assert(length <= max_length);
return Self{
.head = 0,
.length = length,
.buffer = undefined,
};
}
pub fn insert(self: *Self, item: T) void {
self.buffer[self.head] = item;
self.head = (self.head + 1) % self.length;
}
pub fn take(self: *Self, next: T) T {
const item = self.buffer[self.head];
self.buffer[self.head] = next;
self.head = (self.head + 1) % self.length;
return item;
}
pub fn peek(self: Self, i: usize) T {
return self.buffer[(self.head + i) % self.length];
}
};
}
// Simple fixed-point storage for a UQ8.24 type.
pub const uq8p24 = struct {
inner: u32,
pub fn init(w: u8, f: u24) uq8p24 {
return uq8p24{ .inner = (@intCast(u32, w) << 24) | f };
}
pub fn initFraction(a: u32, b: u32) uq8p24 {
return uq8p24{ .inner = @truncate(u32, (u64(a) << 24) / b) };
}
pub fn whole(self: uq8p24) u8 {
return @intCast(u8, self.inner >> 24);
}
pub fn frac(self: uq8p24) u24 {
return @truncate(u24, self.inner);
}
pub fn add(a: uq8p24, b: uq8p24) uq8p24 {
// TODO: Handle overflow case.
return uq8p24{ .inner = a.inner + b.inner };
}
};
|
src/utility.zig
|
const std = @import("std");
var a: *std.mem.Allocator = undefined;
const stdout = std.io.getStdOut().writer(); //prepare stdout to write in
const PREAMBLE_SIZE: usize = 25;
fn check_valid(window: []?u64, target: u64) bool {
for (window) |elt1_opt, idx| {
if (elt1_opt) |elt1| {
for (window[idx..]) |elt2| {
if (elt1 + elt2.? == target) {
return true;
}
}
}
}
return false;
}
fn run(input: [:0]u8) u64 {
var all_lines_it = std.mem.tokenize(input, "\n");
var parsed = [_]?u64{null} ** 1000;
var counter: usize = 0;
var i: usize = 0;
while (i < 25) : (i += 1) {
parsed[counter] = std.fmt.parseInt(u64, all_lines_it.next().?, 10) catch unreachable;
counter += 1;
}
while (all_lines_it.next()) |line| {
const target: u64 = std.fmt.parseInt(u64, line, 10) catch unreachable;
if (!check_valid(parsed[counter - PREAMBLE_SIZE .. counter], target)) {
return target;
}
parsed[counter] = target;
counter += 1;
}
return 0;
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); // create memory allocator for strings
defer arena.deinit(); // clear memory
var arg_it = std.process.args();
_ = arg_it.skip(); // skip over exe name
a = &arena.allocator; // get ref to allocator
const input: [:0]u8 = try (arg_it.next(a)).?; // get the first argument
const start: i128 = std.time.nanoTimestamp(); // start time
const answer = run(input); // compute answer
const elapsed_nano: f128 = @intToFloat(f128, std.time.nanoTimestamp() - start);
const elapsed_milli: f64 = @floatCast(f64, @divFloor(elapsed_nano, 1_000_000));
try stdout.print("_duration:{d}\n{}\n", .{ elapsed_milli, answer }); // emit actual lines parsed by AOC
}
test "aoc input 1" {
const i =
\\1
\\2
\\3
\\4
\\5
\\6
\\7
\\8
\\9
\\10
\\11
\\12
\\13
\\14
\\15
\\16
\\17
\\18
\\19
\\20
\\21
\\22
\\23
\\24
\\25
\\26
\\49
\\100
;
var b = i.*;
std.testing.expect(run(&b) == 100);
}
|
day-09/part-1/lelithium.zig
|
const builtin = @import("builtin");
const std = @import("std");
const Builder = std.build.Builder;
pub fn linkArtifact(b: *Builder, exe: *std.build.LibExeObjStep, target: std.zig.CrossTarget, comptime prefix_path: []const u8) void {
exe.linkLibC();
exe.linkSystemLibrary("c++");
if (target.isWindows()) {
exe.linkSystemLibrary("user32");
exe.linkSystemLibrary("gdi32");
} else if (target.isDarwin()) {
const frameworks_dir = macosFrameworksDir(b) catch unreachable;
exe.addFrameworkDir(frameworks_dir);
exe.linkFramework("Foundation");
exe.linkFramework("Cocoa");
exe.linkFramework("Quartz");
exe.linkFramework("QuartzCore");
exe.linkFramework("Metal");
exe.linkFramework("MetalKit");
exe.linkFramework("OpenGL");
exe.linkFramework("Audiotoolbox");
exe.linkFramework("CoreAudio");
} else {
exe.linkSystemLibrary("X11");
}
exe.addIncludeDir(prefix_path ++ "src/deps/imgui");
exe.addIncludeDir(prefix_path ++ "src/deps/imgui/cimgui");
const cpp_args = [_][]const u8{"-Wno-return-type-c-linkage"};
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/cimgui/imgui/imgui.cpp", &cpp_args);
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/cimgui/imgui/imgui_demo.cpp", &cpp_args);
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/cimgui/imgui/imgui_draw.cpp", &cpp_args);
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/cimgui/imgui/imgui_widgets.cpp", &cpp_args);
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/cimgui/cimgui.cpp", &cpp_args);
exe.addCSourceFile(prefix_path ++ "src/deps/imgui/temporary_hacks.cpp", &cpp_args);
}
// helper function to get SDK path on Mac
fn macosFrameworksDir(b: *Builder) ![]u8 {
var str = try b.exec(&[_][]const u8{ "xcrun", "--show-sdk-path" });
const strip_newline = std.mem.lastIndexOf(u8, str, "\n");
if (strip_newline) |index| {
str = str[0..index];
}
const frameworks_dir = try std.mem.concat(b.allocator, u8, &[_][]const u8{ str, "/System/Library/Frameworks" });
return frameworks_dir;
}
|
src/deps/imgui/build.zig
|
const Self = @This();
const build_options = @import("build_options");
const std = @import("std");
const assert = std.debug.assert;
const os = std.os;
const math = std.math;
const wlr = @import("wlroots");
const wayland = @import("wayland");
const wl = wayland.server.wl;
const zwlr = wayland.server.zwlr;
const c = @import("c.zig");
const server = &@import("main.zig").server;
const util = @import("util.zig");
const Box = @import("Box.zig");
const Config = @import("Config.zig");
const LayerSurface = @import("LayerSurface.zig");
const Output = @import("Output.zig");
const Seat = @import("Seat.zig");
const View = @import("View.zig");
const ViewStack = @import("view_stack.zig").ViewStack;
const XwaylandUnmanaged = @import("XwaylandUnmanaged.zig");
const Mode = union(enum) {
passthrough: void,
down: *View,
move: struct {
view: *View,
/// View coordinates are stored as i32s as they are in logical pixels.
/// However, it is possible to move the cursor by a fraction of a
/// logical pixel and this happens in practice with low dpi, high
/// polling rate mice. Therefore we must accumulate the current
/// fractional offset of the mouse to avoid rounding down tiny
/// motions to 0.
delta_x: f64 = 0,
delta_y: f64 = 0,
},
resize: struct {
view: *View,
delta_x: f64 = 0,
delta_y: f64 = 0,
/// Offset from the lower right corner of the view
offset_x: i32,
offset_y: i32,
},
};
const default_size = 24;
const log = std.log.scoped(.cursor);
/// Current cursor mode as well as any state needed to implement that mode
mode: Mode = .passthrough,
seat: *Seat,
wlr_cursor: *wlr.Cursor,
pointer_gestures: *wlr.PointerGesturesV1,
xcursor_manager: *wlr.XcursorManager,
constraint: ?*wlr.PointerConstraintV1 = null,
/// Number of distinct buttons currently pressed
pressed_count: u32 = 0,
axis: wl.Listener(*wlr.Pointer.event.Axis) = wl.Listener(*wlr.Pointer.event.Axis).init(handleAxis),
frame: wl.Listener(*wlr.Cursor) = wl.Listener(*wlr.Cursor).init(handleFrame),
button: wl.Listener(*wlr.Pointer.event.Button) =
wl.Listener(*wlr.Pointer.event.Button).init(handleButton),
motion_absolute: wl.Listener(*wlr.Pointer.event.MotionAbsolute) =
wl.Listener(*wlr.Pointer.event.MotionAbsolute).init(handleMotionAbsolute),
motion: wl.Listener(*wlr.Pointer.event.Motion) =
wl.Listener(*wlr.Pointer.event.Motion).init(handleMotion),
pinch_begin: wl.Listener(*wlr.Pointer.event.PinchBegin) =
wl.Listener(*wlr.Pointer.event.PinchBegin).init(handlePinchBegin),
pinch_update: wl.Listener(*wlr.Pointer.event.PinchUpdate) =
wl.Listener(*wlr.Pointer.event.PinchUpdate).init(handlePinchUpdate),
pinch_end: wl.Listener(*wlr.Pointer.event.PinchEnd) =
wl.Listener(*wlr.Pointer.event.PinchEnd).init(handlePinchEnd),
request_set_cursor: wl.Listener(*wlr.Seat.event.RequestSetCursor) =
wl.Listener(*wlr.Seat.event.RequestSetCursor).init(handleRequestSetCursor),
swipe_begin: wl.Listener(*wlr.Pointer.event.SwipeBegin) =
wl.Listener(*wlr.Pointer.event.SwipeBegin).init(handleSwipeBegin),
swipe_update: wl.Listener(*wlr.Pointer.event.SwipeUpdate) =
wl.Listener(*wlr.Pointer.event.SwipeUpdate).init(handleSwipeUpdate),
swipe_end: wl.Listener(*wlr.Pointer.event.SwipeEnd) =
wl.Listener(*wlr.Pointer.event.SwipeEnd).init(handleSwipeEnd),
pub fn init(self: *Self, seat: *Seat) !void {
const wlr_cursor = try wlr.Cursor.create();
errdefer wlr_cursor.destroy();
wlr_cursor.attachOutputLayout(server.root.output_layout);
// This is here so that self.xcursor_manager doesn't need to be an
// optional pointer. This isn't optimal as it does a needless allocation,
// but this is not a hot path.
const xcursor_manager = try wlr.XcursorManager.create(null, default_size);
errdefer xcursor_manager.destroy();
self.* = .{
.seat = seat,
.wlr_cursor = wlr_cursor,
.pointer_gestures = try wlr.PointerGesturesV1.create(server.wl_server),
.xcursor_manager = xcursor_manager,
};
try self.setTheme(null, null);
// wlr_cursor *only* displays an image on screen. It does not move around
// when the pointer moves. However, we can attach input devices to it, and
// it will generate aggregate events for all of them. In these events, we
// can choose how we want to process them, forwarding them to clients and
// moving the cursor around. See following post for more detail:
// https://drewdevault.com/2018/07/17/Input-handling-in-wlroots.html
wlr_cursor.events.axis.add(&self.axis);
wlr_cursor.events.button.add(&self.button);
wlr_cursor.events.frame.add(&self.frame);
wlr_cursor.events.motion_absolute.add(&self.motion_absolute);
wlr_cursor.events.motion.add(&self.motion);
wlr_cursor.events.swipe_begin.add(&self.swipe_begin);
wlr_cursor.events.swipe_update.add(&self.swipe_update);
wlr_cursor.events.swipe_end.add(&self.swipe_end);
wlr_cursor.events.pinch_begin.add(&self.pinch_begin);
wlr_cursor.events.pinch_update.add(&self.pinch_update);
wlr_cursor.events.pinch_end.add(&self.pinch_end);
seat.wlr_seat.events.request_set_cursor.add(&self.request_set_cursor);
}
pub fn deinit(self: *Self) void {
self.xcursor_manager.destroy();
self.wlr_cursor.destroy();
}
/// Set the cursor theme for the given seat, as well as the xwayland theme if
/// this is the default seat. Either argument may be null, in which case a
/// default will be used.
pub fn setTheme(self: *Self, theme: ?[*:0]const u8, _size: ?u32) !void {
const size = _size orelse default_size;
self.xcursor_manager.destroy();
self.xcursor_manager = try wlr.XcursorManager.create(theme, size);
// For each output, ensure a theme of the proper scale is loaded
var it = server.root.outputs.first;
while (it) |node| : (it = node.next) {
const wlr_output = node.data.wlr_output;
self.xcursor_manager.load(wlr_output.scale) catch
log.err("failed to load xcursor theme '{s}' at scale {}", .{ theme, wlr_output.scale });
}
// If this cursor belongs to the default seat, set the xcursor environment
// variables and the xwayland cursor theme.
if (self.seat == server.input_manager.defaultSeat()) {
const size_str = try std.fmt.allocPrint0(util.gpa, "{}", .{size});
defer util.gpa.free(size_str);
if (c.setenv("XCURSOR_SIZE", size_str, 1) < 0) return error.OutOfMemory;
if (theme) |t| if (c.setenv("XCURSOR_THEME", t, 1) < 0) return error.OutOfMemory;
if (build_options.xwayland) {
self.xcursor_manager.load(1) catch {
log.err("failed to load xcursor theme '{s}' at scale 1", .{theme});
return;
};
const wlr_xcursor = self.xcursor_manager.getXcursor("left_ptr", 1).?;
const image = wlr_xcursor.images[0];
server.xwayland.setCursor(
image.buffer,
image.width * 4,
image.width,
image.height,
@intCast(i32, image.hotspot_x),
@intCast(i32, image.hotspot_y),
);
}
}
}
pub fn handleViewUnmap(self: *Self, view: *View) void {
if (switch (self.mode) {
.passthrough => false,
.down => |target_view| target_view == view,
.move => |data| data.view == view,
.resize => |data| data.view == view,
}) {
self.mode = .passthrough;
self.clearFocus();
}
}
fn clearFocus(self: Self) void {
self.xcursor_manager.setCursorImage("left_ptr", self.wlr_cursor);
self.seat.wlr_seat.pointerNotifyClearFocus();
}
/// Axis event is a scroll wheel or similiar
fn handleAxis(listener: *wl.Listener(*wlr.Pointer.event.Axis), event: *wlr.Pointer.event.Axis) void {
const self = @fieldParentPtr(Self, "axis", listener);
self.seat.handleActivity();
// Notify the client with pointer focus of the axis event.
self.seat.wlr_seat.pointerNotifyAxis(
event.time_msec,
event.orientation,
event.delta,
event.delta_discrete,
event.source,
);
}
fn handleButton(listener: *wl.Listener(*wlr.Pointer.event.Button), event: *wlr.Pointer.event.Button) void {
const self = @fieldParentPtr(Self, "button", listener);
self.seat.handleActivity();
if (event.state == .pressed) {
self.pressed_count += 1;
} else {
std.debug.assert(self.pressed_count > 0);
self.pressed_count -= 1;
if (self.pressed_count == 0 and self.mode != .passthrough) {
self.leaveMode(event);
return;
}
}
if (self.surfaceAt()) |result| {
switch (result.parent) {
.view => |view| {
// If a view has been clicked on, give that view keyboard focus and
// perhaps enter move/resize mode.
if (event.state == .pressed and self.pressed_count == 1) {
// If there is an active mapping for this button which is
// handled we are done here
if (self.handlePointerMapping(event, view)) return;
// Otherwise enter cursor down mode, giving keyboard focus
self.enterMode(.down, view);
}
},
.layer_surface => |layer_surface| {
// If a keyboard inteactive layer surface has been clicked on,
// give it keyboard focus.
if (layer_surface.wlr_layer_surface.current.keyboard_interactive == .exclusive) {
self.seat.focusOutput(layer_surface.output);
self.seat.setFocusRaw(.{ .layer = layer_surface });
}
},
.xwayland_unmanaged => assert(build_options.xwayland),
}
_ = self.seat.wlr_seat.pointerNotifyButton(event.time_msec, event.button, event.state);
}
}
fn handlePinchBegin(
listener: *wl.Listener(*wlr.Pointer.event.PinchBegin),
event: *wlr.Pointer.event.PinchBegin,
) void {
const self = @fieldParentPtr(Self, "pinch_begin", listener);
self.pointer_gestures.sendPinchBegin(
self.seat.wlr_seat,
event.time_msec,
event.fingers,
);
}
fn handlePinchUpdate(
listener: *wl.Listener(*wlr.Pointer.event.PinchUpdate),
event: *wlr.Pointer.event.PinchUpdate,
) void {
const self = @fieldParentPtr(Self, "pinch_update", listener);
self.pointer_gestures.sendPinchUpdate(
self.seat.wlr_seat,
event.time_msec,
event.dx,
event.dy,
event.scale,
event.rotation,
);
}
fn handlePinchEnd(
listener: *wl.Listener(*wlr.Pointer.event.PinchEnd),
event: *wlr.Pointer.event.PinchEnd,
) void {
const self = @fieldParentPtr(Self, "pinch_end", listener);
self.pointer_gestures.sendPinchEnd(
self.seat.wlr_seat,
event.time_msec,
event.cancelled,
);
}
fn handleSwipeBegin(
listener: *wl.Listener(*wlr.Pointer.event.SwipeBegin),
event: *wlr.Pointer.event.SwipeBegin,
) void {
const self = @fieldParentPtr(Self, "swipe_begin", listener);
self.pointer_gestures.sendSwipeBegin(
self.seat.wlr_seat,
event.time_msec,
event.fingers,
);
}
fn handleSwipeUpdate(
listener: *wl.Listener(*wlr.Pointer.event.SwipeUpdate),
event: *wlr.Pointer.event.SwipeUpdate,
) void {
const self = @fieldParentPtr(Self, "swipe_update", listener);
self.pointer_gestures.sendSwipeUpdate(
self.seat.wlr_seat,
event.time_msec,
event.dx,
event.dy,
);
}
fn handleSwipeEnd(
listener: *wl.Listener(*wlr.Pointer.event.SwipeEnd),
event: *wlr.Pointer.event.SwipeEnd,
) void {
const self = @fieldParentPtr(Self, "swipe_end", listener);
self.pointer_gestures.sendSwipeEnd(
self.seat.wlr_seat,
event.time_msec,
event.cancelled,
);
}
/// Handle the mapping for the passed button if any. Returns true if there
/// was a mapping and the button was handled.
fn handlePointerMapping(self: *Self, event: *wlr.Pointer.event.Button, view: *View) bool {
const wlr_keyboard = self.seat.wlr_seat.getKeyboard() orelse return false;
const modifiers = wlr_keyboard.getModifiers();
const fullscreen = view.current.fullscreen or view.pending.fullscreen;
return for (server.config.modes.items[self.seat.mode_id].pointer_mappings.items) |mapping| {
if (event.button == mapping.event_code and std.meta.eql(modifiers, mapping.modifiers)) {
switch (mapping.action) {
.move => if (!fullscreen) self.enterMode(.move, view),
.resize => if (!fullscreen) self.enterMode(.resize, view),
}
break true;
}
} else false;
}
/// Frame events are sent after regular pointer events to group multiple
/// events together. For instance, two axis events may happen at the same
/// time, in which case a frame event won't be sent in between.
fn handleFrame(listener: *wl.Listener(*wlr.Cursor), wlr_cursor: *wlr.Cursor) void {
const self = @fieldParentPtr(Self, "frame", listener);
self.seat.wlr_seat.pointerNotifyFrame();
}
/// This event is forwarded by the cursor when a pointer emits an _absolute_
/// motion event, from 0..1 on each axis. This happens, for example, when
/// wlroots is running under a Wayland window rather than KMS+DRM, and you
/// move the mouse over the window. You could enter the window from any edge,
/// so we have to warp the mouse there. There is also some hardware which
/// emits these events.
fn handleMotionAbsolute(
listener: *wl.Listener(*wlr.Pointer.event.MotionAbsolute),
event: *wlr.Pointer.event.MotionAbsolute,
) void {
const self = @fieldParentPtr(Self, "motion_absolute", listener);
self.seat.handleActivity();
var lx: f64 = undefined;
var ly: f64 = undefined;
self.wlr_cursor.absoluteToLayoutCoords(event.device, event.x, event.y, &lx, &ly);
const dx = lx - self.wlr_cursor.x;
const dy = ly - self.wlr_cursor.y;
self.processMotion(event.device, event.time_msec, dx, dy, dx, dy);
}
/// This event is forwarded by the cursor when a pointer emits a _relative_
/// pointer motion event (i.e. a delta)
fn handleMotion(
listener: *wl.Listener(*wlr.Pointer.event.Motion),
event: *wlr.Pointer.event.Motion,
) void {
const self = @fieldParentPtr(Self, "motion", listener);
self.seat.handleActivity();
self.processMotion(event.device, event.time_msec, event.delta_x, event.delta_y, event.unaccel_dx, event.unaccel_dy);
}
fn handleRequestSetCursor(
listener: *wl.Listener(*wlr.Seat.event.RequestSetCursor),
event: *wlr.Seat.event.RequestSetCursor,
) void {
// This event is rasied by the seat when a client provides a cursor image
const self = @fieldParentPtr(Self, "request_set_cursor", listener);
const focused_client = self.seat.wlr_seat.pointer_state.focused_client;
// This can be sent by any client, so we check to make sure this one is
// actually has pointer focus first.
if (focused_client == event.seat_client) {
// Once we've vetted the client, we can tell the cursor to use the
// provided surface as the cursor image. It will set the hardware cursor
// on the output that it's currently on and continue to do so as the
// cursor moves between outputs.
log.debug("focused client set cursor", .{});
self.wlr_cursor.setSurface(event.surface, event.hotspot_x, event.hotspot_y);
}
}
const SurfaceAtResult = struct {
surface: *wlr.Surface,
sx: f64,
sy: f64,
parent: union(enum) {
view: *View,
layer_surface: *LayerSurface,
xwayland_unmanaged: if (build_options.xwayland) *XwaylandUnmanaged else void,
},
};
/// Find the surface under the cursor if any, and return information about that
/// surface and the cursor's position in surface local coords.
/// This function must be kept in sync with the rendering order in render.zig.
pub fn surfaceAt(self: Self) ?SurfaceAtResult {
const lx = self.wlr_cursor.x;
const ly = self.wlr_cursor.y;
const wlr_output = server.root.output_layout.outputAt(lx, ly) orelse return null;
const output = @intToPtr(*Output, wlr_output.data);
// Get output-local coords from the layout coords
var ox = lx;
var oy = ly;
server.root.output_layout.outputCoords(wlr_output, &ox, &oy);
// Find the first visible fullscreen view in the stack if there is one
var it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, surfaceAtFilter);
const fullscreen_view = while (it.next()) |view| {
if (view.current.fullscreen) break view;
} else null;
// Check surfaces in the reverse order they are rendered in:
//
// fullscreen:
// 1. overlay layer toplevels and popups
// 2. xwayland unmanaged stuff
// 3. fullscreen view toplevels and popups
//
// non-fullscreen:
// 1. overlay layer toplevels and popups
// 2. top, bottom, background layer popups
// 3. top layer toplevels
// 4. xwayland unmanaged stuff
// 5. view toplevels and popups
// 6. bottom, background layer toplevels
if (layerSurfaceAt(output.getLayer(.overlay).*, ox, oy)) |s| return s;
if (fullscreen_view) |view| {
if (build_options.xwayland) if (xwaylandUnmanagedSurfaceAt(ly, lx)) |s| return s;
var sx: f64 = undefined;
var sy: f64 = undefined;
if (view.surfaceAt(ox, oy, &sx, &sy)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .view = view },
};
}
} else {
for ([_]zwlr.LayerShellV1.Layer{ .top, .bottom, .background }) |layer| {
if (layerPopupSurfaceAt(output.getLayer(layer).*, ox, oy)) |s| return s;
}
if (layerSurfaceAt(output.getLayer(.top).*, ox, oy)) |s| return s;
if (build_options.xwayland) if (xwaylandUnmanagedSurfaceAt(lx, ly)) |s| return s;
if (viewSurfaceAt(output, ox, oy)) |s| return s;
for ([_]zwlr.LayerShellV1.Layer{ .bottom, .background }) |layer| {
if (layerSurfaceAt(output.getLayer(layer).*, ox, oy)) |s| return s;
}
}
return null;
}
/// Find the topmost popup surface on the given layer at ox,oy.
fn layerPopupSurfaceAt(layer: std.TailQueue(LayerSurface), ox: f64, oy: f64) ?SurfaceAtResult {
var it = layer.first;
while (it) |node| : (it = node.next) {
const layer_surface = &node.data;
var sx: f64 = undefined;
var sy: f64 = undefined;
if (layer_surface.wlr_layer_surface.popupSurfaceAt(
ox - @intToFloat(f64, layer_surface.box.x),
oy - @intToFloat(f64, layer_surface.box.y),
&sx,
&sy,
)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .layer_surface = layer_surface },
};
}
}
return null;
}
/// Find the topmost surface (or popup surface) on the given layer at ox,oy.
fn layerSurfaceAt(layer: std.TailQueue(LayerSurface), ox: f64, oy: f64) ?SurfaceAtResult {
var it = layer.first;
while (it) |node| : (it = node.next) {
const layer_surface = &node.data;
var sx: f64 = undefined;
var sy: f64 = undefined;
if (layer_surface.wlr_layer_surface.surfaceAt(
ox - @intToFloat(f64, layer_surface.box.x),
oy - @intToFloat(f64, layer_surface.box.y),
&sx,
&sy,
)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .layer_surface = layer_surface },
};
}
}
return null;
}
/// Find the topmost visible view surface (incl. popups) at ox,oy.
fn viewSurfaceAt(output: *const Output, ox: f64, oy: f64) ?SurfaceAtResult {
var sx: f64 = undefined;
var sy: f64 = undefined;
// focused, floating views
var it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, surfaceAtFilter);
while (it.next()) |view| {
if (view.current.focus == 0 or !view.current.float) continue;
if (view.surfaceAt(ox, oy, &sx, &sy)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .view = view },
};
}
}
// non-focused, floating views
it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, surfaceAtFilter);
while (it.next()) |view| {
if (view.current.focus != 0 or !view.current.float) continue;
if (view.surfaceAt(ox, oy, &sx, &sy)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .view = view },
};
}
}
// focused, non-floating views
it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, surfaceAtFilter);
while (it.next()) |view| {
if (view.current.focus == 0 or view.current.float) continue;
if (view.surfaceAt(ox, oy, &sx, &sy)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .view = view },
};
}
}
// non-focused, non-floating views
it = ViewStack(View).iter(output.views.first, .forward, output.current.tags, surfaceAtFilter);
while (it.next()) |view| {
if (view.current.focus != 0 or view.current.float) continue;
if (view.surfaceAt(ox, oy, &sx, &sy)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .view = view },
};
}
}
return null;
}
fn xwaylandUnmanagedSurfaceAt(lx: f64, ly: f64) ?SurfaceAtResult {
var it = server.root.xwayland_unmanaged_views.first;
while (it) |node| : (it = node.next) {
const xwayland_surface = node.data.xwayland_surface;
var sx: f64 = undefined;
var sy: f64 = undefined;
if (xwayland_surface.surface.?.surfaceAt(
lx - @intToFloat(f64, xwayland_surface.x),
ly - @intToFloat(f64, xwayland_surface.y),
&sx,
&sy,
)) |found| {
return SurfaceAtResult{
.surface = found,
.sx = sx,
.sy = sy,
.parent = .{ .xwayland_unmanaged = &node.data },
};
}
}
return null;
}
fn surfaceAtFilter(view: *View, filter_tags: u32) bool {
// TODO(wlroots): we can remove this view.surface != null check as surfaceAt
// will start filtering by mapped views by default in 0.15.0
return view.surface != null and view.current.tags & filter_tags != 0;
}
pub fn enterMode(self: *Self, mode: std.meta.Tag((Mode)), view: *View) void {
log.debug("enter {s} cursor mode", .{@tagName(mode)});
self.seat.focus(view);
switch (mode) {
.passthrough => unreachable,
.down => {
self.mode = .{ .down = view };
server.root.startTransaction();
},
.move, .resize => {
switch (mode) {
.passthrough, .down => unreachable,
.move => self.mode = .{ .move = .{ .view = view } },
.resize => {
const cur_box = &view.current.box;
self.mode = .{ .resize = .{
.view = view,
.offset_x = cur_box.x + @intCast(i32, cur_box.width) - @floatToInt(i32, self.wlr_cursor.x),
.offset_y = cur_box.y + @intCast(i32, cur_box.height) - @floatToInt(i32, self.wlr_cursor.y),
} };
view.setResizing(true);
},
}
// Automatically float all views being moved by the pointer, if
// their dimensions are set by a layout generator. If however the views
// are unarranged, leave them as non-floating so the next active
// layout can affect them.
if (!view.current.float and view.output.current.layout != null) {
view.pending.float = true;
view.float_box = view.current.box;
view.applyPending();
} else {
// The View.applyPending() call in the other branch starts
// the transaction needed after the seat.focus() call above.
server.root.startTransaction();
}
// Clear cursor focus, so that the surface does not receive events
self.seat.wlr_seat.pointerNotifyClearFocus();
self.xcursor_manager.setCursorImage(
if (mode == .move) "move" else "se-resize",
self.wlr_cursor,
);
},
}
}
/// Return from down/move/resize to passthrough
fn leaveMode(self: *Self, event: *wlr.Pointer.event.Button) void {
log.debug("leave {s} mode", .{@tagName(self.mode)});
switch (self.mode) {
.passthrough => unreachable,
.down => {
// If we were in down mode, we need pass along the release event
_ = self.seat.wlr_seat.pointerNotifyButton(event.time_msec, event.button, event.state);
},
.move => {},
.resize => |resize| resize.view.setResizing(false),
}
self.mode = .passthrough;
self.passthrough(event.time_msec);
}
fn processMotion(self: *Self, device: *wlr.InputDevice, time: u32, delta_x: f64, delta_y: f64, unaccel_dx: f64, unaccel_dy: f64) void {
server.input_manager.relative_pointer_manager.sendRelativeMotion(
self.seat.wlr_seat,
@as(u64, time) * 1000,
delta_x,
delta_y,
unaccel_dx,
unaccel_dy,
);
var dx: f64 = delta_x;
var dy: f64 = delta_y;
if (self.constraint) |constraint| {
if (self.mode == .passthrough or self.mode == .down) {
if (constraint.type == .locked) return;
const result = self.surfaceAt() orelse return;
if (result.surface != constraint.surface) return;
const sx = result.sx;
const sy = result.sy;
var sx_con: f64 = undefined;
var sy_con: f64 = undefined;
if (!wlr.region.confine(&constraint.region, sx, sy, sx + dx, sy + dy, &sx_con, &sy_con)) {
return;
}
dx = sx_con - sx;
dy = sy_con - sy;
}
}
switch (self.mode) {
.passthrough => {
self.wlr_cursor.move(device, dx, dy);
if (self.surfaceAt()) |result| {
const focus_change = self.seat.wlr_seat.pointer_state.focused_surface != result.surface;
if (server.config.focus_follows_cursor == .normal and focus_change) {
switch (result.parent) {
.view => |view| {
if (self.seat.focused != .view or self.seat.focused.view != view) {
self.seat.focusOutput(view.output);
self.seat.focus(view);
server.root.startTransaction();
}
},
.layer_surface => {},
.xwayland_unmanaged => assert(build_options.xwayland),
}
}
}
self.passthrough(time);
},
.down => |view| {
self.wlr_cursor.move(device, dx, dy);
// This takes surface-local coordinates
const output_box = server.root.output_layout.getBox(view.output.wlr_output).?;
self.seat.wlr_seat.pointerNotifyMotion(
time,
self.wlr_cursor.x - @intToFloat(f64, output_box.x + view.current.box.x - view.surface_box.x),
self.wlr_cursor.y - @intToFloat(f64, output_box.y + view.current.box.y - view.surface_box.y),
);
},
.move => |*data| {
dx += data.delta_x;
dy += data.delta_y;
data.delta_x = dx - @trunc(dx);
data.delta_y = dy - @trunc(dy);
const view = data.view;
view.move(@floatToInt(i32, dx), @floatToInt(i32, dy));
self.wlr_cursor.move(
device,
@intToFloat(f64, view.pending.box.x - view.current.box.x),
@intToFloat(f64, view.pending.box.y - view.current.box.y),
);
view.applyPending();
},
.resize => |*data| {
dx += data.delta_x;
dy += data.delta_y;
data.delta_x = dx - @trunc(dx);
data.delta_y = dy - @trunc(dy);
const border_width = if (data.view.draw_borders) server.config.border_width else 0;
// Set width/height of view, clamp to view size constraints and output dimensions
const box = &data.view.pending.box;
box.width = @intCast(u32, math.max(0, @intCast(i32, box.width) + @floatToInt(i32, dx)));
box.height = @intCast(u32, math.max(0, @intCast(i32, box.height) + @floatToInt(i32, dy)));
data.view.applyConstraints();
const output_resolution = data.view.output.getEffectiveResolution();
box.width = math.min(box.width, output_resolution.width - border_width - @intCast(u32, box.x));
box.height = math.min(box.height, output_resolution.height - border_width - @intCast(u32, box.y));
data.view.applyPending();
// Keep cursor locked to the original offset from the bottom right corner
self.wlr_cursor.warpClosest(
device,
@intToFloat(f64, box.x + @intCast(i32, box.width) - data.offset_x),
@intToFloat(f64, box.y + @intCast(i32, box.height) - data.offset_y),
);
},
}
}
/// Handle potential change in location of views on the output, as well as
/// the target view of a cursor operation potentially being moved to a non-visible tag,
/// becoming fullscreen, etc.
pub fn updateState(self: *Self) void {
if (self.shouldPassthrough()) {
self.mode = .passthrough;
var now: os.timespec = undefined;
os.clock_gettime(os.CLOCK_MONOTONIC, &now) catch @panic("CLOCK_MONOTONIC not supported");
const msec = @intCast(u32, now.tv_sec * std.time.ms_per_s +
@divFloor(now.tv_nsec, std.time.ns_per_ms));
self.passthrough(msec);
}
}
fn shouldPassthrough(self: Self) bool {
switch (self.mode) {
.passthrough => {
// If we are not currently in down/resize/move mode, we *always* need to passthrough()
// as what is under the cursor may have changed and we are not locked to a single
// target view.
return true;
},
.down => |target| {
// The target view is no longer visible
return target.current.tags & target.output.current.tags == 0;
},
.resize, .move => {
const target = if (self.mode == .resize) self.mode.resize.view else self.mode.move.view;
// The target view is no longer visible, is part of the layout, or is fullscreen.
return target.current.tags & target.output.current.tags == 0 or
(!target.current.float and target.output.current.layout != null) or
target.current.fullscreen;
},
}
}
/// Pass an event on to the surface under the cursor, if any.
fn passthrough(self: *Self, time: u32) void {
assert(self.mode == .passthrough);
if (self.surfaceAt()) |result| {
// If input is allowed on the surface, send pointer enter and motion
// events. Note that wlroots won't actually send an enter event if
// the surface has already been entered.
if (server.input_manager.inputAllowed(result.surface)) {
self.seat.wlr_seat.pointerNotifyEnter(result.surface, result.sx, result.sy);
self.seat.wlr_seat.pointerNotifyMotion(time, result.sx, result.sy);
}
} else {
// There is either no surface under the cursor or input is disallowed
// Reset the cursor image to the default and clear focus.
self.clearFocus();
}
}
|
source/river-0.1.0/river/Cursor.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const AllMetadata = @import("metadata.zig").AllMetadata;
pub const Collator = struct {
metadata: *AllMetadata,
allocator: Allocator,
arena: std.heap.ArenaAllocator,
const Self = @This();
pub fn init(allocator: Allocator, metadata: *AllMetadata) Self {
return Self{
.metadata = metadata,
.allocator = allocator,
.arena = std.heap.ArenaAllocator.init(allocator),
};
}
pub fn deinit(self: *Self) void {
self.arena.deinit();
}
pub fn artists(self: *Self) ![][]const u8 {
var artist_set = CollatedTextSet.init(self.arena.allocator());
defer artist_set.deinit();
for (self.metadata.tags) |*tag| {
switch (tag.*) {
.id3v1 => {},
.flac => |*flac_meta| {
var artist_it = flac_meta.map.valueIterator("ARTIST");
while (artist_it.next()) |artist| {
try artist_set.put(artist);
}
},
.vorbis => |*vorbis_meta| {
var artist_it = vorbis_meta.map.valueIterator("ARTIST");
while (artist_it.next()) |artist| {
try artist_set.put(artist);
}
},
.id3v2 => |*id3v2_meta| {
var artist_it = id3v2_meta.metadata.map.valueIterator("TPE1");
while (artist_it.next()) |artist| {
try artist_set.put(artist);
}
},
.ape => |*ape_meta| {
var artist_it = ape_meta.metadata.map.valueIterator("Artist");
while (artist_it.next()) |artist| {
try artist_set.put(artist);
}
},
}
}
// id3v1 is a last resort
if (artist_set.count() == 0) {
if (self.metadata.getLastMetadataOfType(.id3v1)) |id3v1_meta| {
if (id3v1_meta.map.getFirst("artist")) |artist| {
try artist_set.put(artist);
}
}
}
return try self.arena.allocator().dupe([]const u8, artist_set.values.items);
}
};
// TODO: Some sort of CollatedSet that does:
// Trimming, empty value detection, case-insensitivity,
// maybe startsWith detection
const CollatedTextSet = struct {
values: std.ArrayListUnmanaged([]const u8),
// TODO: UTF-8 normalization, and/or a comparison function
// that does proper UTF-8 case-insensitive comparisons
normalized_set: std.StringArrayHashMapUnmanaged(usize),
arena: Allocator,
const Self = @This();
/// Allocator must be an arena that will get cleaned up outside of
/// this struct (this struct's deinit will not handle cleaning up the arena)
pub fn init(arena: Allocator) Self {
return .{
.values = std.ArrayListUnmanaged([]const u8){},
.normalized_set = std.StringArrayHashMapUnmanaged(usize){},
.arena = arena,
};
}
pub fn deinit(self: *Self) void {
// TODO: If this uses an arena, this isn't necessary
self.values.deinit(self.arena);
self.normalized_set.deinit(self.arena);
}
pub fn put(self: *Self, value: []const u8) !void {
const trimmed = std.mem.trim(u8, value, " ");
if (trimmed.len != 0) {
// TODO: this isn't actually ascii, need UTF-8 lowering/normalizing
const normalized = try std.ascii.allocLowerString(self.arena, trimmed);
const result = try self.normalized_set.getOrPut(self.arena, normalized);
if (!result.found_existing) {
const index = self.values.items.len;
try self.values.append(self.arena, trimmed);
result.value_ptr.* = index;
}
}
}
pub fn count(self: Self) usize {
return self.values.items.len;
}
};
|
src/collate.zig
|
const std = @import("std");
const util = @import("util.zig");
const data = @embedFile("../data/day16.txt");
const ParseError = error{ InvalidInput, OutOfMemory, EndOfStream };
const Reader = std.io.BitReader(.Big, std.io.FixedBufferStream([]u8).Reader);
const BitReader = struct {
const Self = @This();
reader: Reader,
bits_read: usize = 0,
/// Bit-reading function that records number of bytes read
pub fn readBitsNoEof(self: *Self, comptime U: type, bits: usize) !U {
self.bits_read += bits;
return self.reader.readBitsNoEof(U, bits);
}
};
pub const PacketHeader = struct {
const Self = @This();
version: u3,
type_id: u3,
pub fn deserialize(reader: *BitReader) ParseError!Self {
return Self{
.version = try reader.readBitsNoEof(u3, 3),
.type_id = try reader.readBitsNoEof(u3, 3),
};
}
};
pub const Literal = struct {
const Self = @This();
value: u32,
pub fn deserialize(reader: *BitReader) ParseError!Self {
var self = Self{ .value = 0 };
while (true) {
// Read lead bit of varint
const lead_bit = try reader.readBitsNoEof(u1, 1);
// Read group of 4 into number
self.value <<= 4;
self.value |= try reader.readBitsNoEof(u4, 4);
// Exit on last group
if (lead_bit == 0) break;
}
return self;
}
};
pub const Operator = struct {
const Self = @This();
operands: []Packet,
allocator: *util.Allocator,
pub fn deserialize(reader: *BitReader, allocator: *util.Allocator) ParseError!Self {
var operands = util.List(Packet).init(allocator);
errdefer operands.deinit();
const length_type_id = try reader.readBitsNoEof(u1, 1);
switch (length_type_id) {
// Next 15 bits are a number == total length in bits of sub-packets
0 => {
const bit_length = try reader.readBitsNoEof(u15, 15);
const starting_length = reader.bits_read;
while (reader.bits_read - starting_length < bit_length) {
const packet = try Packet.deserialize(reader, allocator);
try operands.append(packet);
}
if (reader.bits_read - starting_length != bit_length) return error.InvalidInput;
},
// Next 11 bits are a number == number of sub-packets immediately contained in this packet.
1 => {
const num_packets = try reader.readBitsNoEof(u11, 11);
while (operands.items.len < num_packets) {
const packet = try Packet.deserialize(reader, allocator);
try operands.append(packet);
}
},
}
return Self{
.operands = operands.toOwnedSlice(),
.allocator = allocator,
};
}
pub fn deinit(self: *Self) void {
for (self.operands) |*operand| {
operand.deinit();
}
self.allocator.free(self.operands);
}
};
pub const PacketData = union(enum) {
literal: Literal,
operator: Operator,
};
pub const Packet = struct {
const Self = @This();
header: PacketHeader,
data: PacketData,
pub fn deserialize(reader: *BitReader, allocator: *util.Allocator) ParseError!Self {
const header = try PacketHeader.deserialize(reader);
return Self{
.header = header,
.data = switch (header.type_id) {
4 => PacketData{ .literal = try Literal.deserialize(reader) },
else => PacketData{ .operator = try Operator.deserialize(reader, allocator) },
},
};
}
pub fn deinit(self: *Self) void {
switch (self.data) {
.operator => |*op| op.deinit(),
.literal => {},
}
}
pub fn value(self: Self) ParseError!u64 {
switch (self.data) {
.literal => |val| return val.value,
.operator => |op_data| {
switch (self.header.type_id) {
// Add
0 => {
var sum: u64 = 0;
for (op_data.operands) |operand| {
sum += try operand.value();
}
return sum;
},
// Product
1 => {
var prod: u64 = 1;
for (op_data.operands) |operand| {
prod *= try operand.value();
}
return prod;
},
// Min
2 => {
var min: u64 = std.math.maxInt(u64);
for (op_data.operands) |operand| {
const val = try operand.value();
if (val < min) min = val;
}
return min;
},
// Max
3 => {
var max: u64 = 0;
for (op_data.operands) |operand| {
const val = try operand.value();
if (val > max) max = val;
}
return max;
},
// Literal (not an operator)
4 => unreachable,
// Greater than
5 => {
if (op_data.operands.len != 2) return error.InvalidInput;
const val1 = try op_data.operands[0].value();
const val2 = try op_data.operands[1].value();
return if (val1 > val2) 1 else 0;
},
// Less than
6 => {
if (op_data.operands.len != 2) return error.InvalidInput;
const val1 = try op_data.operands[0].value();
const val2 = try op_data.operands[1].value();
return if (val1 < val2) 1 else 0;
},
// Equality
7 => {
if (op_data.operands.len != 2) return error.InvalidInput;
const val1 = try op_data.operands[0].value();
const val2 = try op_data.operands[1].value();
return if (val1 == val2) 1 else 0;
},
}
},
}
}
};
pub fn sumPacketVersions(root_packet: Packet) usize {
var sum: usize = root_packet.header.version;
switch (root_packet.data) {
.literal => {},
.operator => |packet_data| {
for (packet_data.operands) |operand| {
sum += sumPacketVersions(operand);
}
},
}
return sum;
}
pub fn main() !void {
defer {
const leaks = util.gpa_impl.deinit();
std.debug.assert(!leaks);
_ = leaks;
}
// Allocate buffer for hex value
var buf = try util.gpa.alloc(u8, data.len / 2);
defer util.gpa.free(buf);
// Get bytes for hex number and a reader to read big-endian numbers from it
const bytes = try std.fmt.hexToBytes(buf, data);
var reader = BitReader{ .reader = std.io.bitReader(.Big, std.io.fixedBufferStream(bytes).reader()) };
// Parse root packet
var root_packet = try Packet.deserialize(&reader, util.gpa);
defer root_packet.deinit();
// Part 1
const version_sum = sumPacketVersions(root_packet);
util.print("Part 1: {d}\n", .{version_sum});
// Part 2
const value = root_packet.value();
util.print("Part 2: {d}\n", .{value});
}
|
src/day16.zig
|
pub const sabaton = @import("../../sabaton.zig");
const std = @import("std");
const uefi = std.os.uefi;
const fs = @import("fs.zig");
var conout: ?*uefi.protocols.SimpleTextOutputProtocol = null;
pub const panic = sabaton.panic;
pub const io = struct {
pub fn putchar(ch: u8) void {
if (conout) |co| {
if (ch == '\n')
putchar('\r');
const chrarr = [2]u16{ ch, 0 };
_ = co.outputString(@ptrCast(*const [1:0]u16, &chrarr));
}
}
};
pub inline fn locateProtocol(comptime T: type) ?*T {
var ptr: *T = undefined;
const guid: std.os.uefi.Guid align(8) = T.guid;
if (uefi.system_table.boot_services.?.locateProtocol(&guid, null, @ptrCast(*?*c_void, &ptr)) != .Success) {
return null;
}
return ptr;
}
pub inline fn handleProtocol(handle: uefi.Handle, comptime T: type) ?*T {
var ptr: *T = undefined;
const guid: std.os.uefi.Guid align(8) = T.guid;
if (uefi.system_table.boot_services.?.handleProtocol(handle, &guid, @ptrCast(*?*c_void, &ptr)) != .Success) {
return null;
}
return ptr;
}
pub fn locateConfiguration(guid: uefi.Guid) ?*c_void {
const entries = uefi.system_table.configuration_table[0..uefi.system_table.number_of_table_entries];
for (entries) |e| {
if (e.vendor_guid.eql(guid))
return e.vendor_table;
}
return null;
}
pub fn toUtf16(comptime ascii: []const u8) [ascii.len:0]u16 {
const curr = [1:0]u16{ascii[0]};
if (ascii.len == 1) return curr;
return curr ++ toUtf16(ascii[1..]);
}
pub fn uefiVital(status: uefi.Status, context: [*:0]const u8) void {
switch (status) {
.Success => {},
else => |t| {
sabaton.puts("Fatal error: ");
sabaton.print_str(@tagName(t));
sabaton.puts(", while: ");
sabaton.puts(context);
@panic("");
},
}
}
pub fn uefiVitalFail(status: uefi.Status, context: [*:0]const u8) noreturn {
uefiVital(status, context);
unreachable;
}
pub const allocator_impl = struct {
vtab: std.mem.Allocator.VTable = .{
.alloc = allocate,
.resize = resize,
.free = free,
},
fn allocate(_: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
_ = ret_addr;
_ = len_align;
var ptr: [*]u8 = undefined;
if (ptr_align > 8) {
uefiVital(uefi.system_table.boot_services.?.allocatePages(.AllocateAnyPages, .LoaderData, (len + 0xFFF) / 0x1000, @ptrCast(*[*]align(0x1000) u8, &ptr)), "Allocating pages");
} else {
uefiVital(uefi.system_table.boot_services.?.allocatePool(.LoaderData, len, @ptrCast(*[*]align(8) u8, &ptr)), "Allocating memory");
}
return ptr[0..len];
}
fn resize(_: *c_void, old_mem: []u8, old_align: u29, new_size: usize, len_align: u29, ret_addr: usize) ?usize {
_ = ret_addr;
_ = len_align;
_ = new_size;
_ = old_align;
_ = old_mem;
sabaton.puts("allocator resize!!\n");
@panic("");
}
fn free(_: *c_void, old_mem: []u8, old_align: u29, ret_addr: usize) void {
_ = ret_addr;
_ = old_align;
_ = old_mem;
sabaton.puts("allocator free!!\n");
@panic("");
}
}{};
pub var allocator = std.mem.Allocator{
.ptr = undefined,
.vtable = &allocator_impl.vtab,
};
fn findFSRoot() *uefi.protocols.FileProtocol {
// zig fmt: off
const loaded_image = handleProtocol(uefi.handle, uefi.protocols.LoadedImageProtocol)
orelse @panic("findFSRoot(): Could not get loaded image protocol");
const device = loaded_image.device_handle orelse @panic("findFSRoot(): No loaded file device handle!");
const simple_file_proto = handleProtocol(device, uefi.protocols.SimpleFileSystemProtocol)
orelse @panic("findFSRoot(): Could not get simple file system");
// zig fmt: on
var file_proto: *uefi.protocols.FileProtocol = undefined;
switch (simple_file_proto.openVolume(&file_proto)) {
.Success => return file_proto,
else => @panic("findFSRoot(): openVolume failed!"),
}
}
var page_size: u64 = 0x1000;
pub fn get_page_size() u64 {
return page_size;
}
var paging_root: sabaton.paging.Root = undefined;
// O(n^2) but who cares, it's really small code
fn sortStivale2Memmap(data_c: []align(8) u8) void {
var data = data_c;
while (true) {
const num_entries = data.len / 0x18;
if (num_entries < 2)
return;
var curr_min_i: usize = 0;
var curr_min_addr = std.mem.readIntNative(u64, data[0..8]);
// First let's find the smallest addr among the rest
var i: usize = 1;
while (i < num_entries) : (i += 1) {
const curr_addr = std.mem.readIntNative(u64, data[i * 0x18 ..][0..8]);
if (curr_addr < curr_min_addr) {
curr_min_addr = curr_addr;
curr_min_i = i;
}
}
// Swap the current entry with the smallest one
std.mem.swap([0x18]u8, data[0..0x18], data[curr_min_i * 0x18 ..][0..0x18]);
data = data[0x18..];
}
}
const MemoryMap = struct {
memory_map: []align(8) u8,
key: usize,
desc_size: usize,
desc_version: u32,
const memory_map_size = 64 * 1024;
const Iterator = struct {
map: *const MemoryMap,
curr_offset: usize = 0,
fn next(self: *@This()) ?*uefi.tables.MemoryDescriptor {
if (self.curr_offset + @offsetOf(uefi.tables.MemoryDescriptor, "attribute") >= self.map.memory_map.len)
return null;
const result = @ptrCast(*uefi.tables.MemoryDescriptor, @alignCast(8, self.map.memory_map.ptr + self.curr_offset));
self.curr_offset += self.map.desc_size;
return result;
}
};
fn fetch(self: *@This()) void {
self.memory_map.len = memory_map_size;
uefiVital(uefi.system_table.boot_services.?.getMemoryMap(
&self.memory_map.len,
@ptrCast([*]uefi.tables.MemoryDescriptor, @alignCast(8, self.memory_map.ptr)), // Cast is workaround for the wrong zig type annotation
&self.key,
&self.desc_size,
&self.desc_version,
), "Getting UEFI memory map");
}
pub fn containsAddr(self: *const @This(), addr: usize) bool {
var iter = Iterator{ .map = self };
while (iter.next()) |e| {
const start_addr = e.physical_start;
const end_addr = start_addr + e.number_of_pages * page_size;
if (start_addr <= addr and addr < end_addr)
return true;
}
return false;
}
fn parse_to_stivale2(self: *const @This(), stivale2buf: []align(8) u8) void {
var iter = Iterator{ .map = self };
std.mem.writeIntNative(u64, stivale2buf[0x00..0x08], 0x2187F79E8612DE07);
//std.mem.writeIntNative(u64, stivale2buf[0x08..0x10], 0); // Next ptr
const num_entries = @ptrCast(*u64, &stivale2buf[0x10]);
num_entries.* = 0;
var stivale2ents = stivale2buf[0x18..];
while (iter.next()) |e| : ({
num_entries.* += 1;
stivale2ents = stivale2ents[0x18..];
}) {
std.mem.writeIntNative(u64, stivale2ents[0x00..0x08], e.physical_start);
std.mem.writeIntNative(u64, stivale2ents[0x08..0x10], e.number_of_pages * page_size);
//std.mem.writeIntNative(u32, stivale2ents[0x14..0x18], stiavle2_reserved);
std.mem.writeIntNative(u32, stivale2ents[0x10..0x14], @as(u32, switch (e.type) {
.ReservedMemoryType,
.UnusableMemory,
.MemoryMappedIO,
.MemoryMappedIOPortSpace,
.PalCode,
.PersistentMemory,
.RuntimeServicesCode,
.RuntimeServicesData,
=> 2, // RESERVED
// We load all kernel code segments as LoaderData, should probably be changed to reclaim more memory here
.LoaderData => 0x1001, // KERNEL_AND_MODULES
.LoaderCode => 0x1000, // BOOTLOADER_RECLAIMABLE
// Boot services entries are marked as usable since we've
// already exited boot services when we enter the kernel
.BootServicesCode,
.BootServicesData,
=> 1, // USABLE
.ConventionalMemory => 1, // USABLE
.ACPIReclaimMemory => 3, // ACPI_RECLAIMABLE
.ACPIMemoryNVS => 4, // ACPI_NVS
else => @panic("Bad memory map type"),
}));
}
sortStivale2Memmap(stivale2buf[0x18..]);
}
fn map_everything(self: *const @This(), root: *sabaton.paging.Root) void {
var iter = Iterator{ .map = self };
while (iter.next()) |e| {
if (sabaton.safety) {
sabaton.print_hex(e.physical_start);
sabaton.puts(", ");
sabaton.print_hex(e.physical_start + e.number_of_pages * page_size);
sabaton.puts(": ");
sabaton.print_str(@tagName(e.type));
sabaton.puts("\n");
}
const memory_type: sabaton.paging.MemoryType = switch (e.type) {
.ReservedMemoryType,
.LoaderCode,
.LoaderData,
.BootServicesCode,
.BootServicesData,
.RuntimeServicesCode,
.RuntimeServicesData,
.ConventionalMemory,
.UnusableMemory,
.ACPIReclaimMemory,
.PersistentMemory,
=> .memory,
.ACPIMemoryNVS,
.MemoryMappedIO,
.MemoryMappedIOPortSpace,
=> .mmio,
else => continue,
};
const perms: sabaton.paging.Perms = switch (e.type) {
.ReservedMemoryType,
.LoaderData,
.BootServicesCode,
.BootServicesData,
.RuntimeServicesData,
.ConventionalMemory,
.UnusableMemory,
.ACPIReclaimMemory,
.PersistentMemory,
.ACPIMemoryNVS,
.MemoryMappedIO,
.MemoryMappedIOPortSpace,
=> .rw,
.LoaderCode,
.RuntimeServicesCode,
=> .rwx,
else => continue,
};
sabaton.paging.map(e.physical_start, e.physical_start, e.number_of_pages * page_size, perms, memory_type, root);
sabaton.paging.map(sabaton.upper_half_phys_base + e.physical_start, e.physical_start, e.number_of_pages * page_size, perms, memory_type, root);
}
}
fn init(self: *@This()) void {
self.memory_map.ptr = @alignCast(8, sabaton.vital(allocator.alloc(u8, memory_map_size), "Allocating for UEFI memory map", true).ptr);
self.fetch();
}
};
comptime {
asm (
// zig fmt: off
\\switch_el2_to_el1:
\\ MRS X1, SCTLR_EL2
\\ MSR SCTLR_EL1, X1
\\ // aarch64 in EL1
\\ ORR X1, XZR, #(1 << 31)
\\ ORR X1, X1, #(1 << 1)
\\ MSR HCR_EL2, X1
\\ // Counters in EL1
\\ MRS X1, CNTHCTL_EL2
\\ ORR X1, X1, #3
\\ MSR CNTHCTL_EL2, X1
\\ MSR CNTVOFF_EL2, XZR
\\ // FP/SIMD in EL1
\\ MOV X1, #0x33FF
\\ MSR CPTR_EL2, X1
\\ MSR HSTR_EL2, XZR
\\ MOV X1, #0x300000
\\ MSR CPACR_EL1, X1
\\ // Get the fuck out of EL2 into EL1
\\ ADR X1, EL1
\\ MSR ELR_EL2, X1
\\ MOV X1, #0x3C5
\\ MSR SPSR_EL2, X1
\\ MOV X1, SP
\\ ERET
\\EL1:
\\ MOV SP, X1
\\ RET
// zig fmt: on
);
}
extern fn switch_el2_to_el1() void;
fn maybe_switch_EL() void {
const current_el = (asm volatile ("MRS %[el], CurrentEL"
: [el] "=r" (-> u64)
) >> 2) & 0x3;
if (current_el == 3)
unreachable; // Todo: implement
if (current_el > 1)
switch_el2_to_el1();
}
pub var memmap: MemoryMap = undefined;
pub fn main() noreturn {
if (locateProtocol(uefi.protocols.SimpleTextOutputProtocol)) |proto| {
conout = proto;
}
page_size = sabaton.paging.detect_page_size();
// Find RSDP
@import("acpi.zig").init();
// Find the root FS we booted from
const root = findFSRoot();
const kernel_file_bytes = sabaton.vital(fs.loadFile(root, "kernel.elf"), "Loading kernel ELF (esp\\kernel.elf)", true);
// Create the stivale2 tag for the kernel ELF file
sabaton.kernel_file_tag.kernel_addr = @ptrToInt(kernel_file_bytes.ptr);
sabaton.add_tag(&sabaton.kernel_file_tag.tag);
var kernel_elf_file = sabaton.Elf{
.data = kernel_file_bytes.ptr,
};
kernel_elf_file.init();
var kernel_stivale2_header: sabaton.Stivale2hdr = undefined;
_ = sabaton.vital(
kernel_elf_file.load_section(".stivale2hdr", sabaton.util.to_byte_slice(&kernel_stivale2_header)),
"loading .stivale2hdr",
true,
);
const kernel_memory_bytes = sabaton.vital(allocator.alignedAlloc(u8, 0x1000, kernel_elf_file.paged_bytes()), "Allocating kernel memory", true);
// Prepare a paging root for the kernel
paging_root = sabaton.paging.init_paging();
// Load the kernel into memory
kernel_elf_file.load(kernel_memory_bytes, &paging_root);
// Ought to be enough for any firmwares crappy memory layout, right?
const stivale2_memmap_bytes = @alignCast(8, sabaton.vital(allocator.alloc(u8, 64 * 1024), "Allocating for stivale2 memory map", true));
// Get the memory map to calculate a max address used by UEFI
memmap.init();
// Get a framebuffer
@import("framebuffer.zig").init(&paging_root);
sabaton.log_hex("Framebuffer at ", sabaton.fb.addr);
memmap.map_everything(&paging_root);
// Now we need a memory map to exit boot services
memmap.fetch();
memmap.parse_to_stivale2(stivale2_memmap_bytes);
sabaton.add_tag(@ptrCast(*sabaton.Stivale2tag, stivale2_memmap_bytes.ptr));
uefiVital(uefi.system_table.boot_services.?.exitBootServices(uefi.handle, memmap.key), "Exiting boot services");
// We can't call UEFI after exiting boot services
conout = null;
uefi.system_table.boot_services = null;
sabaton.paging.apply_paging(&paging_root);
maybe_switch_EL();
sabaton.enterKernel(&kernel_elf_file, kernel_stivale2_header.stack);
}
|
src/platform/uefi_aarch64/main.zig
|
const std = @import("std");
const c = @import("internal/c.zig");
const internal = @import("internal/internal.zig");
const log = std.log.scoped(.git);
const git = @import("git.zig");
/// A data buffer for exporting data from libgit2
pub const Buf = extern struct {
ptr: ?[*]u8 = null,
asize: usize = 0,
size: usize = 0,
const zero_array = [_]u8{0};
const zero_slice = zero_array[0..0 :0];
pub fn toSlice(self: Buf) [:0]const u8 {
return if (self.size == 0)
zero_slice
else
self.ptr.?[0..self.size :0];
}
/// Free the memory referred to by the `Buf`
///
/// *Note*: This will not free the memory if it looks like it was not allocated by libgit2, but it will clear the buffer back
/// to the empty state.
pub fn deinit(self: *Buf) void {
log.debug("Buf.deinit called", .{});
c.git_buf_dispose(@ptrCast(*c.git_buf, self));
log.debug("Buf freed successfully", .{});
}
/// Resize the buffer allocation to make more space.
///
/// If the buffer refers to memory that was not allocated by libgit2, then `ptr` will be replaced with a newly allocated block
/// of data. Be careful so that memory allocated by the caller is not lost.
/// If you pass `target_size` = 0 and the memory is not allocated by libgit2, this will allocate a new buffer of size `size`
/// and copy the external data into it.
///
/// Currently, this will never shrink a buffer, only expand it.
///
/// If the allocation fails, this will return an error and the buffer will be marked as invalid for future operations,
/// invaliding the contents.
pub fn grow(self: *Buf, target_size: usize) !void {
log.debug("Buf.grow called, target_size: {}", .{target_size});
try internal.wrapCall("git_buf_grow", .{ @ptrCast(*c.git_buf, self), target_size });
log.debug("Buf grown successfully", .{});
}
pub fn isBinary(self: Buf) bool {
log.debug("Buf.isBinary called", .{});
const ret = c.git_buf_is_binary(@ptrCast(*const c.git_buf, &self)) == 1;
log.debug("Buf is binary: {}", .{ret});
return ret;
}
pub fn containsNull(self: Buf) bool {
log.debug("Buf.isBinary called", .{});
const ret = std.mem.indexOfScalar(u8, self.toSlice(), 0) != null;
log.debug("Buf contains null: {}", .{ret});
return ret;
}
test {
try std.testing.expectEqual(@sizeOf(c.git_buf), @sizeOf(Buf));
try std.testing.expectEqual(@bitSizeOf(c.git_buf), @bitSizeOf(Buf));
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
}
|
src/buffer.zig
|
const std = @import("std");
const testing = std.testing;
const lir = @import("ir.zig");
/// Opcode for the virtual machine
pub const Opcode = enum(u8) {
// general ops
load_integer = 0,
load_string = 1,
load_func = 2,
load_true = 4,
load_false = 5,
load_nil = 6,
load_void = 7,
load_global = 8,
bind_global = 9,
load_local = 10,
bind_local = 11,
assign_global = 12,
assign_local = 13,
jump_false = 14,
jump = 15,
call = 16,
@"return" = 17,
return_value = 18,
load_module = 19,
iter_next = 20,
make_array = 21,
make_map = 22,
make_iter = 23,
make_range = 24,
make_enum = 54,
make_slice = 55,
//bin op
add = 25,
sub = 26,
mul = 27,
div = 28,
mod = 29,
equal = 30,
not_equal = 31,
greater_than = 32,
greater_than_equal = 33,
less_than = 34,
less_than_equal = 35,
minus = 36,
not = 37,
bitwise_or = 38,
bitwise_xor = 39,
bitwise_and = 40,
bitwise_not = 41,
shift_left = 42,
shift_right = 43,
@"and" = 44,
@"or" = 45,
assign_add = 46,
assign_sub = 47,
assign_mul = 48,
assign_div = 49,
// same as equal but does not pop the lhs value for switch prong
match = 50,
// referencing
get_by_index = 51,
set_by_index = 52,
/// specifically removes a value from the stack
pop = 53,
};
/// Utility struct with helper functions to make it easier
/// to manage instructions
pub const Instructions = struct {
list: std.ArrayListUnmanaged(Instruction),
gpa: *std.mem.Allocator,
scope: Scope = Scope.none,
pub const Error = error{OutOfMemory};
const Id = enum { none, loop };
const Scope = union(Id) {
none,
loop: struct { start: u32, jumps: std.ArrayListUnmanaged(u32) },
/// Creates a new loop Scope
fn createLoop(start: u32) Scope {
return .{
.loop = .{
.start = start,
.jumps = std.ArrayListUnmanaged(u32){},
},
};
}
};
/// Creates a new instance of `Instructions`
pub fn init(gpa: *std.mem.Allocator) Instructions {
return .{
.list = std.ArrayListUnmanaged(Instruction){},
.gpa = gpa,
};
}
/// Creates a new instance of `Instructions` aswell as codegen bytecode instructions from Luf's IR
pub fn fromCu(gpa: *std.mem.Allocator, cu: lir.CompileUnit) Error!ByteCode {
var instructions = init(gpa);
for (cu.instructions) |inst| {
try instructions.gen(inst);
}
return instructions.final();
}
/// Generates and appends instructions based on the given IR instruction
fn gen(self: *Instructions, inst: *lir.Inst) Error!void {
switch (inst.tag) {
.add,
.sub,
.mul,
.div,
.eql,
.nql,
.lt,
.gt,
.assign_add,
.assign_sub,
.assign_mul,
.assign_div,
.bitwise_xor,
.bitwise_or,
.bitwise_and,
.shift_left,
.shift_right,
.@"and",
.@"or",
.eql_lt,
.eql_gt,
.mod,
=> try self.emitInfix(inst.as(lir.Inst.Double)),
.not, .bitwise_not, .negate => try self.emitPrefix(inst.as(lir.Inst.Single)),
.int => try self.emitInt(inst.as(lir.Inst.Int)),
.string => try self.emitString(inst.as(lir.Inst.String)),
.primitive => try self.emitPrim(inst.as(lir.Inst.Primitive)),
.ident => try self.emitIdent(inst.as(lir.Inst.Ident)),
.expr => try self.emitExpr(inst.as(lir.Inst.Single)),
.decl => try self.emitDecl(inst.as(lir.Inst.Decl)),
.@"return" => try self.emitRet(inst.as(lir.Inst.Single)),
.assign => try self.emitAssign(inst.as(lir.Inst.Double)),
.store => try self.emitStore(inst.as(lir.Inst.Triple)),
.load => try self.emitLoad(inst.as(lir.Inst.Double)),
.list, .map => try self.emitList(inst.as(lir.Inst.DataStructure)),
.pair => try self.emitPair(inst.as(lir.Inst.Double)),
.range => try self.emitRange(inst.as(lir.Inst.Double)),
.import => try self.emitModule(inst.as(lir.Inst.String)),
.@"enum" => try self.emitEnum(inst.as(lir.Inst.Enum)),
.condition => try self.emitCond(inst.as(lir.Inst.Condition)),
.block => try self.emitBlock(inst.as(lir.Inst.Block)),
.func => try self.emitFunc("", inst.as(lir.Inst.Function)),
.call => try self.emitCall(inst.as(lir.Inst.Call)),
.@"while" => try self.emitWhile(inst.as(lir.Inst.Double)),
.@"switch" => try self.emitSwitch(inst.as(lir.Inst.Switch)),
.branch => try self.emitBranch(inst.as(lir.Inst.Double)),
.@"break" => try self.scope.loop.jumps.append(self.gpa, try self.label(.jump)),
.@"continue" => try self.emitPtr(.jump, self.scope.loop.start),
.@"for" => try self.emitLoop(inst.as(lir.Inst.Loop)),
.slice => try self.emitSlice(inst.as(lir.Inst.Triple)),
.comment, .type_def, .func_arg => {}, //VM doesn't do anything with this
}
}
/// Return the amount of instructions
fn len(self: Instructions) u32 {
return @intCast(u32, self.list.items.len);
}
/// Appends a new `Instruction`
fn append(self: *Instructions, inst: Instruction) !void {
return self.list.append(self.gpa, inst);
}
/// Appends a new `Instruction` and returns the position of the instruction
fn appendRetPos(self: *Instructions, inst: Instruction) !u32 {
try self.list.append(self.gpa, inst);
return self.len() - 1;
}
/// Can be used to check if last instruction is of given opcode
fn lastIs(self: Instructions, op: Opcode) bool {
const last = self.list.items[self.list.items.len - 1];
return last.getOp() == op;
}
/// Replaces the ptr of an instruction, this is used for jumping instructions
fn patch(self: *Instructions, pos: u32, ptr: u32) void {
self.list.items[pos].ptr.pos = ptr;
}
/// Replaces the opcode of the last instruction
/// Asserts there's atleast 1 instruction saved
fn replaceLastOp(self: *Instructions, op: Opcode) void {
std.debug.assert(self.list.items.len > 0);
self.list.items[self.len() - 1].op = op;
}
/// Pops the last instruction
fn pop(self: *Instructions) void {
_ = self.list.popOrNull();
}
/// emits a single opcode
fn emit(self: *Instructions, op: Opcode) !void {
try self.append(Instruction.gen(op));
}
/// Emits an opcode that contains an aditional index/pointer to a length/object/position
fn emitPtr(self: *Instructions, op: Opcode, ptr: u32) !void {
try self.append(Instruction.genPtr(op, ptr));
}
/// Emits an opcode and returns a label with a pointer to the new instruction
/// Sets the ptr to 0x0 as default
fn label(self: *Instructions, op: Opcode) !u32 {
return try self.appendRetPos(Instruction.genPtr(op, 0x0));
}
/// emits an integer
fn emitInt(self: *Instructions, int: *lir.Inst.Int) !void {
try self.append(Instruction.genInteger(int.value));
}
/// emits a string
fn emitString(self: *Instructions, string: *lir.Inst.String) !void {
try self.append(Instruction.genString(try self.gpa.dupe(u8, string.value)));
}
/// Emits a function
fn emitFunc(self: *Instructions, name: []const u8, func: *lir.Inst.Function) !void {
// initial jump over the body
var jump = try self.label(.jump);
const entry_point = self.len();
try self.gen(func.body);
// incase no return statement was inside the body, append our own that returns void
if (!self.lastIs(.return_value)) try self.emit(.@"return");
const end = try self.appendRetPos(Instruction.genFunction(
try self.gpa.dupe(u8, name),
func.locals.len,
func.args.len,
entry_point,
));
// jump to end of our function
self.patch(jump, end);
}
/// Generates bytecode for an arithmetic operation
fn emitInfix(self: *Instructions, double: *lir.Inst.Double) !void {
try self.gen(double.lhs);
try self.gen(double.rhs);
try self.emit(switch (double.base.tag) {
.add => .add,
.mul => .mul,
.sub => .sub,
.div => .div,
.lt => .less_than,
.gt => .greater_than,
.eql => .equal,
.nql => .not_equal,
.eql_lt => .less_than_equal,
.eql_gt => .greater_than_equal,
.mod => .mod,
.@"and" => .@"and",
.@"or" => .@"or",
.bitwise_xor => .bitwise_xor,
.bitwise_or => .bitwise_or,
.bitwise_and => .bitwise_and,
.not => .bitwise_not,
.shift_left => .shift_left,
.shift_right => .shift_right,
.assign_add => .assign_add,
.assign_sub => .assign_sub,
.assign_mul => .assign_mul,
.assign_div => .assign_div,
else => unreachable,
});
}
/// Emits bytecode to load a prefix into the VM
fn emitPrefix(self: *Instructions, single: *lir.Inst.Single) !void {
try self.gen(single.rhs);
try self.emit(switch (single.base.tag) {
.negate => .minus,
.not => .not,
.bitwise_not => .bitwise_not,
else => unreachable,
});
}
/// Emits a primitive bytecode
fn emitPrim(self: *Instructions, prim: *lir.Inst.Primitive) !void {
try self.emit(switch (prim.prim_type) {
.@"true" => .load_true,
.@"false" => .load_false,
.@"void" => .load_void,
.nil => .load_nil,
});
}
/// Generates the bytecode to load an identifier into the vm
fn emitIdent(self: *Instructions, ident: *lir.Inst.Ident) !void {
try self.emitPtr(
if (ident.scope == .global) .load_global else .load_local,
ident.index,
);
}
/// First emits bytecode of expression's value, and then emits a `pop` instruction
/// at the end of the expression.
fn emitExpr(self: *Instructions, single: *lir.Inst.Single) !void {
try self.gen(single.rhs);
try self.emit(.pop);
}
/// Generates bytecode to bind a value to an identifier
fn emitDecl(self: *Instructions, decl: *lir.Inst.Decl) !void {
if (decl.value.tag == .func)
try self.emitFunc(decl.name, decl.value.as(lir.Inst.Function))
else
try self.gen(decl.value);
try self.emitPtr(
if (decl.scope == .global) .bind_global else .bind_local,
decl.index,
);
}
/// Generates bytecode for returning a value
fn emitRet(self: *Instructions, single: *lir.Inst.Single) !void {
try self.gen(single.rhs);
try self.emit(.return_value);
}
/// Generates bytecode to reassign a global or local variable
fn emitAssign(self: *Instructions, double: *lir.Inst.Double) !void {
const ident = double.lhs.as(lir.Inst.Ident);
try self.gen(double.rhs);
try self.emitPtr(
if (ident.scope == .global) .assign_global else .assign_local,
ident.index,
);
}
/// Emits bytecode to assign a value to an element inside a map or list
/// lhs is the list, index is the index to retrieve the element from the list
/// and finally, rhs is the new value to assign to the element.
fn emitStore(self: *Instructions, triple: *lir.Inst.Triple) !void {
try self.gen(triple.lhs);
try self.gen(triple.index);
try self.gen(triple.rhs);
try self.emit(.set_by_index);
}
/// Emits bytecode to retrieve an element from a map or list
/// where lhs is the list and rhs is an index
fn emitLoad(self: *Instructions, double: *lir.Inst.Double) !void {
try self.gen(double.lhs);
try self.gen(double.rhs);
try self.emit(.get_by_index);
}
/// Generates bytecode from IR to create either a list or map
fn emitList(self: *Instructions, ds: *lir.Inst.DataStructure) !void {
for (ds.elements) |e| try self.gen(e);
try self.emitPtr(
if (ds.base.tag == .list) .make_array else .make_map,
@intCast(u32, ds.elements.len),
);
}
/// Emits bytecode to generate a key-value pair for maps
fn emitPair(self: *Instructions, double: *lir.Inst.Double) !void {
try self.gen(double.lhs);
try self.gen(double.rhs);
}
/// Generates bytecode to create a range
fn emitRange(self: *Instructions, double: *lir.Inst.Double) !void {
try self.gen(double.lhs);
try self.gen(double.rhs);
try self.emit(.make_range);
}
/// Emits .load_module bytecode with filename of the imported module
fn emitModule(self: *Instructions, string: *lir.Inst.String) !void {
try self.emitString(string);
try self.emit(.load_module);
}
/// Emits the bytecode required to build an enum
fn emitEnum(self: *Instructions, enm: *lir.Inst.Enum) !void {
for (enm.value) |e| try self.gen(e);
try self.emitPtr(.make_enum, @intCast(u32, enm.value.len));
}
/// Emits the bytecode for an if with optional else statement
fn emitCond(self: *Instructions, condition: *lir.Inst.Condition) !void {
try self.gen(condition.cond);
const false_label = try self.label(.jump_false);
try self.gen(condition.then_block);
if (self.lastIs(.pop)) self.pop();
const jump_label = try self.label(.jump);
self.patch(false_label, self.len());
if (condition.else_block) |block| {
try self.gen(block);
if (self.lastIs(.pop)) self.pop();
} else try self.emit(.load_void);
self.patch(jump_label, self.len());
}
/// Generates bytecode for each expression inside the block
/// Removes last instruction if it ends with a `pop`
fn emitBlock(self: *Instructions, block: *lir.Inst.Block) !void {
for (block.instructions) |inst| try self.gen(inst);
if (self.lastIs(.pop))
self.pop()
else if (!self.lastIs(.return_value))
try self.emit(.load_void);
}
/// Emits the .call bytecode after emitting the bytecode for the
/// identifier
fn emitCall(self: *Instructions, call: *lir.Inst.Call) !void {
for (call.args) |arg| try self.gen(arg);
try self.gen(call.func);
try self.emitPtr(.call, @intCast(u32, call.args.len));
}
/// Generates the bytecode for a while loop
fn emitWhile(self: *Instructions, loop: *lir.Inst.Double) !void {
const start = self.len();
const prev = self.scope;
self.scope = Scope.createLoop(start);
try self.gen(loop.lhs);
const false_jump = try self.label(.jump_false);
try self.gen(loop.rhs);
try self.emit(.pop);
try self.emitPtr(.jump, start);
self.patch(false_jump, self.len());
for (self.scope.loop.jumps.items) |jump_pos| {
self.patch(jump_pos, self.len());
}
self.scope.loop.jumps.deinit(self.gpa);
self.scope = prev;
}
/// Generates the full bytecode for a switch statement
fn emitSwitch(self: *Instructions, sw: *lir.Inst.Switch) !void {
try self.gen(sw.capture);
for (sw.branches) |branch| try self.gen(branch);
try self.emit(.pop);
}
/// Emits the bytecode for a single branch inside a switch statement
fn emitBranch(self: *Instructions, branch: *lir.Inst.Double) !void {
try self.gen(branch.lhs);
try self.emit(.match);
const jump = try self.label(.jump_false);
try self.gen(branch.rhs);
self.patch(jump, self.len());
}
/// Emits bytecode to generate a for loop
fn emitLoop(self: *Instructions, loop: *lir.Inst.Loop) !void {
// first create our iterator
try self.gen(loop.it);
try self.emitPtr(.make_iter, if (loop.index != null) 1 else 0);
// start of loop
try self.emit(.iter_next);
const prev = self.scope;
self.scope = Scope.createLoop(self.len() - 1);
const end_jump = try self.label(.jump_false);
// index and capture
if (loop.index) |index| {
try self.emitPtr(.assign_local, index.as(lir.Inst.Ident).index);
try self.emit(.pop);
}
try self.emitPtr(.assign_local, loop.capture.as(lir.Inst.Ident).index);
try self.emit(.pop);
try self.gen(loop.block);
// pop last value from block before we jump to ensure clean loop state
if (!self.lastIs(.pop)) try self.emit(.pop);
try self.emitPtr(.jump, self.scope.loop.start);
self.patch(end_jump, self.len());
for (self.scope.loop.jumps.items) |jump_pos| {
self.patch(jump_pos, self.len());
}
if (self.scope.loop.jumps.items.len > 0) try self.emit(.pop);
self.scope.loop.jumps.deinit(self.gpa);
self.scope = prev;
}
/// Generates the bytecode to create a slice from a string
pub fn emitSlice(self: *Instructions, slice: *lir.Inst.Triple) !void {
try self.gen(slice.lhs);
try self.gen(slice.index);
try self.gen(slice.rhs);
try self.emit(.make_slice);
}
/// Creates a `ByteCode` object from the current instructions
/// NOTE: This makes the instructions list on `self` invalid
pub fn final(self: *Instructions) ByteCode {
return .{
.instructions = self.list.toOwnedSlice(self.gpa),
.allocator = self.gpa,
};
}
};
/// Instruction generated by the compiler
/// Each instruction is encoded using little endian
pub const Instruction = union(Type) {
op: Opcode,
ptr: struct {
op: Opcode,
pos: u32,
},
integer: u64,
string: []const u8,
function: struct {
name: []const u8,
locals: u32,
arg_len: u8,
entry: u32,
},
const Type = enum { op, ptr, integer, string, function };
/// Returns the Opcode of the Instruction
pub fn getOp(self: Instruction) Opcode {
return switch (self) {
.op => self.op,
.ptr => |ptr| ptr.op,
.integer => .load_integer,
.string => .load_string,
.function => .load_func,
};
}
/// Generates a single opcode
pub fn gen(op: Opcode) Instruction {
return .{ .op = op };
}
/// Generates a `ptr` instruction
pub fn genPtr(op: Opcode, ptr: u32) Instruction {
return .{ .ptr = .{ .op = op, .pos = ptr } };
}
/// Generates an `integer` instruction
pub fn genInteger(value: u64) Instruction {
return .{ .integer = value };
}
/// Generates a `string` instruction
pub fn genString(value: []const u8) Instruction {
return .{ .string = value };
}
/// Generates a `function` Instruction
pub fn genFunction(name: []const u8, locals: usize, arg_len: usize, entry_point: u32) Instruction {
return .{
.function = .{
.name = name,
.locals = @intCast(u32, locals),
.arg_len = @intCast(u8, arg_len),
.entry = entry_point,
},
};
}
};
/// Bytecode contains the list of instructions
pub const ByteCode = struct {
instructions: []const Instruction,
allocator: *std.mem.Allocator,
/// Encodes instructions to bytecode
/// memory has to be freed by the caller
pub fn encode(self: ByteCode) ![]const u8 {
return Encoder.encode(self.allocator, self.instructions);
}
/// Encodes the instructions and emits to a writer stream
pub fn encodeToStream(self: ByteCode, writer: anytype) !void {
return Encoder.writeToStream(writer, self.instructions);
}
/// Decodes the instructions from a stream and creates a new `ByteCode` struct
pub fn decodeFromStream(gpa: *std.mem.Allocator, reader: anytype) !ByteCode {
return ByteCode{
.instructions = try Decoder.decode(reader, gpa),
.allocator = gpa,
};
}
/// Frees all memory generated by the compiler
pub fn deinit(self: *ByteCode) void {
for (self.instructions) |i| {
if (i == .string) self.allocator.free(i.string);
if (i == .function) self.allocator.free(i.function.name);
}
self.allocator.free(self.instructions);
self.* = undefined;
}
/// Dumps human-readable bytecode representation to the given `writer` interface
pub fn dump(self: ByteCode, writer: anytype) @TypeOf(writer).Error!void {
for (self.instructions) |inst| {
switch (inst) {
.op => |op| try writer.print("{}\n", .{op}),
.ptr => |ptr| try writer.print("{} {}\n", .{ ptr.op, ptr.pos }),
.integer => |int| try writer.print("{} {d}\n", .{ inst.getOp(), int }),
.string => |string| try writer.print("{} {s}\n", .{ inst.getOp(), string }),
.function => |func| try writer.print("{} {s} {d} {d} {d}\n", .{
inst.getOp(),
func.name,
func.locals,
func.arg_len,
func.entry,
}),
}
}
}
};
/// Byte code writer that encodes by little endian.
pub const Encoder = struct {
/// Encodes the instructions and returns the encoded bytecode for in-memory usage
/// Memory is owned by the caller
pub fn encode(allocator: *std.mem.Allocator, instructions: []const Instruction) ![]const u8 {
var code = std.ArrayList(u8).init(allocator);
for (instructions) |inst| {
try emitInstruction(inst, code.writer());
}
return code.toOwnedSlice();
}
/// Encodes the instructions and writes it to the input stream
pub fn writeToStream(writer: anytype, instructions: []const Instruction) @TypeOf(writer).Error!void {
for (instructions) |inst| {
try emitInstruction(inst, writer);
}
}
fn emitInstruction(inst: Instruction, writer: anytype) @TypeOf(writer).Error!void {
switch (inst) {
.op => |op| try emitOp(writer, op),
.ptr => |ptr| try emitPtr(writer, ptr.op, ptr.pos),
.integer => |int| try emitInteger(writer, int),
.string => |string| try emitString(writer, string),
.function => |func| try emitFunc(writer, func.name, .{
.locals = func.locals,
.arg_len = func.arg_len,
.entry = func.entry,
}),
}
}
/// Emits a single opcode.
fn emitOp(writer: anytype, op: Opcode) @TypeOf(writer).Error!void {
return writer.writeIntLittle(u8, @enumToInt(op));
}
/// Emits an opcode and a ptr it points to. This could be a constant, array length, etc
fn emitPtr(writer: anytype, op: Opcode, ptr: u32) @TypeOf(writer).Error!void {
try emitOp(writer, op);
return writer.writeIntLittle(u32, ptr);
}
/// Emits a load_integer opcode followed by the bytes representing the integer's value
fn emitInteger(writer: anytype, value: u64) @TypeOf(writer).Error!void {
try emitOp(writer, .load_integer);
return writer.writeIntLittle(u64, value);
}
/// Emits a load_string opcode followed the the length of the string encoded as u16.
/// Finalized by writing the value of the string
/// The max length of the string is 65536.
fn emitString(writer: anytype, value: []const u8) @TypeOf(writer).Error!void {
try emitOp(writer, .load_string);
try writer.writeIntLittle(u16, @intCast(u16, value.len));
return writer.writeAll(value);
}
/// Emits a `load_func` opcode where the struct is encoded as a byte slice
fn emitFunc(
writer: anytype,
name: []const u8,
func: struct {
locals: u32,
arg_len: u8,
entry: u32,
},
) @TypeOf(writer).Error!void {
try emitOp(writer, .load_func);
const len: u8 = @intCast(u8, name.len);
try writer.writeIntLittle(u8, len);
try writer.writeAll(name);
try writer.writeIntLittle(u32, func.locals);
try writer.writeIntLittle(u8, func.arg_len);
try writer.writeIntLittle(u32, func.entry);
}
};
/// Decoder for Luf's bytecode
pub const Decoder = struct {
/// Decodes a stream into a list of `Instruction`
pub fn decode(reader: anytype, allocator: *std.mem.Allocator) ![]Instruction {
var instructions = std.ArrayList(Instruction).init(allocator);
errdefer {
for (instructions.items) |i| {
if (i == .string) allocator.free(i.string);
if (i == .function) allocator.free(i.function.name);
}
instructions.deinit();
}
var decoder = Decoder{};
while (true) {
const byte = reader.readByte() catch |err| switch (err) {
error.EndOfStream => break,
else => return err,
};
const op = @intToEnum(Opcode, byte);
const inst = try instructions.addOne();
switch (op) {
.load_func => try decoder.loadFunc(reader, inst, allocator),
.load_string => try decoder.loadString(reader, inst, allocator),
.load_integer => try decoder.loadInt(reader, inst),
.load_global,
.bind_global,
.load_local,
.bind_local,
.assign_global,
.assign_local,
.jump_false,
.jump,
.make_array,
.make_map,
.make_enum,
.call,
=> try decoder.loadPtr(reader, inst, op),
else => inst.* = .{ .op = op },
}
}
return instructions.toOwnedSlice();
}
/// Loads the current opcode into a fuction instruction
fn loadFunc(self: *Decoder, reader: anytype, inst: *Instruction, allocator: *std.mem.Allocator) !void {
const name_length = try reader.readIntLittle(u8);
const name = if (name_length > 0) blk: {
const string = try allocator.alloc(u8, name_length);
errdefer allocator.free(string);
if ((try reader.readAll(string)) < name_length)
return error.InvalidBytecode;
break :blk string;
} else "";
inst.* = .{
.function = .{
.name = name,
.locals = try reader.readIntLittle(u32),
.arg_len = try reader.readIntLittle(u8),
.entry = try reader.readIntLittle(u32),
},
};
}
/// Loads a string instruction
fn loadString(self: *Decoder, reader: anytype, inst: *Instruction, allocator: *std.mem.Allocator) !void {
const string_length = try reader.readIntLittle(u16);
const string = try allocator.alloc(u8, string_length);
errdefer allocator.free(string);
if ((try reader.readAll(string)) < string_length)
return error.InvalidBytecode;
inst.* = .{ .string = string };
}
/// Loads a 64 bit unsigned integer `Instruction`, it is up to the VM
/// to bitcast it to a signed integer
fn loadInt(self: *Decoder, reader: anytype, inst: *Instruction) !void {
const int = try reader.readIntLittle(u64);
inst.* = .{ .integer = int };
}
/// Loads an instruction that contains the opcode as well as the ptr towards an index
fn loadPtr(
self: *Decoder,
reader: anytype,
inst: *Instruction,
op: Opcode,
) !void {
const ptr = try reader.readIntLittle(u32);
inst.* = .{ .ptr = .{ .op = op, .pos = ptr } };
}
};
test "Encoding and decoding of instructions" {
const allocator = testing.allocator;
var buffer: [34]u8 = undefined;
const instructions = &[_]Instruction{
.{ .op = .load_false },
.{ .string = "Hi" },
.{ .integer = 5 },
.{ .ptr = .{ .op = .jump, .pos = 5 } },
.{ .function = .{ .name = "add", .locals = 2, .arg_len = 2, .entry = 1 } },
};
var stream = std.io.fixedBufferStream(&buffer);
const code = try Encoder.encode(allocator, instructions);
defer allocator.free(code);
try Encoder.writeToStream(stream.writer(), instructions);
const load_false = "\x05";
const load_string = "\x01\x02\x00Hi";
const load_int = "\x00\x05\x00\x00\x00\x00\x00\x00\x00";
const load_ptr = "\x0F\x05\x00\x00\x00";
const load_fn = "\x02\x03add\x02\x00\x00\x00\x02\x01\x00\x00\x00";
try testing.expectEqualSlices(u8, load_false ++ load_string ++ load_int ++ load_ptr ++ load_fn, code);
try testing.expectEqualSlices(u8, load_false ++ load_string ++ load_int ++ load_ptr ++ load_fn, stream.getWritten());
stream.reset();
const decoded = try Decoder.decode(stream.reader(), allocator);
defer allocator.free(decoded);
try testing.expectEqual(instructions.len, decoded.len);
for (instructions) |inst, i| {
switch (inst) {
.op => try testing.expectEqual(inst.op, decoded[i].op),
.ptr => try testing.expectEqual(inst.ptr.pos, decoded[i].ptr.pos),
.string => try testing.expectEqualStrings(inst.string, decoded[i].string),
.integer => try testing.expectEqual(inst.integer, decoded[i].integer),
.function => |func| {
try testing.expectEqualStrings(func.name, decoded[i].function.name);
try testing.expectEqual(func.locals, decoded[i].function.locals);
try testing.expectEqual(func.arg_len, decoded[i].function.arg_len);
try testing.expectEqual(func.entry, decoded[i].function.entry);
},
}
}
for (decoded) |inst| {
if (inst == .string) allocator.free(inst.string);
if (inst == .function) allocator.free(inst.function.name);
}
}
fn testInput(input: []const u8, expected: []const Opcode) !void {
var alloc = testing.allocator;
var err = @import("error.zig").Errors.init(alloc);
defer err.deinit();
var cu = try @import("compiler.zig").compile(alloc, input, &err);
defer cu.deinit();
var result = try Instructions.fromCu(alloc, cu);
defer result.deinit();
for (expected) |exp, i| {
try testing.expectEqual(exp, result.instructions[i].getOp());
}
}
test "IR to Bytecode - Arithmetic" {
const test_cases = .{
.{
.input = "1 + 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .add, .pop },
},
.{
.input = "3 - 1",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .sub, .pop },
},
.{
.input = "1 * 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .mul, .pop },
},
.{
.input = "2 / 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .div, .pop },
},
.{
.input = "true",
.opcodes = &[_]Opcode{ .load_true, .pop },
},
.{
.input = "1 > 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .greater_than, .pop },
},
.{
.input = "1 < 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .less_than, .pop },
},
.{
.input = "1 == 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .equal, .pop },
},
.{
.input = "1 != 2",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .not_equal, .pop },
},
.{
.input = "true == false",
.opcodes = &[_]Opcode{ .load_true, .load_false, .equal, .pop },
},
.{
.input = "-1",
.opcodes = &[_]Opcode{ .load_integer, .minus, .pop },
},
.{
.input = "!true",
.opcodes = &[_]Opcode{ .load_true, .not, .pop },
},
};
inline for (test_cases) |case| {
try testInput(case.input, case.opcodes);
}
}
test "IR to Bytecode - Non control flow" {
const test_cases = .{
.{
.input = "const x = 5",
.opcodes = &[_]Opcode{ .load_integer, .bind_global },
},
.{
.input = "const x = \"foo\"",
.opcodes = &[_]Opcode{ .load_string, .bind_global },
},
.{
.input = "const x = []int{1, 2, 3}",
.opcodes = &[_]Opcode{ .load_integer, .load_integer, .load_integer, .make_array, .bind_global },
},
.{
.input = "const x = []int:int{1: 2, 2: 1, 5: 6}",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.load_integer,
.load_integer,
.load_integer,
.load_integer,
.make_map,
.bind_global,
},
},
.{
.input = "const x = 1..5",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.make_range,
.bind_global,
},
},
.{
.input = "const x = enum{first_value, second_value, third_value}",
.opcodes = &[_]Opcode{
.load_string,
.load_string,
.load_string,
.make_enum,
.bind_global,
},
},
.{
.input = "mut x = 5 x = 10",
.opcodes = &[_]Opcode{
.load_integer,
.bind_global,
.load_integer,
.assign_global,
.pop,
},
},
};
inline for (test_cases) |case| {
try testInput(case.input, case.opcodes);
}
}
test "IR to Bytecode - Control flow" {
const test_cases = .{
.{
.input = "if true { 5 } else { 7 } 10",
.opcodes = &[_]Opcode{
.load_true,
.jump_false,
.load_integer,
.jump,
.load_integer,
.pop,
.load_integer,
.pop,
},
},
.{
.input = "fn() void { 1 + 2 }",
.opcodes = &[_]Opcode{
.jump,
.load_integer,
.load_integer,
.add,
.@"return",
.load_func,
.pop,
},
},
.{
.input = "const x = fn() void { 1 } x()",
.opcodes = &[_]Opcode{
.jump,
.load_integer,
.@"return",
.load_func,
.bind_global,
.load_global,
.call,
.pop,
},
},
.{
.input = "const func = fn(x: int) int { return x } func(5)",
.opcodes = &[_]Opcode{
.jump,
.load_local,
.return_value,
.load_func,
.bind_global,
.load_integer,
.load_global,
.call,
.pop,
},
},
.{
.input = "mut i = 0 while (i > 10) { i = 10 }",
.opcodes = &[_]Opcode{
.load_integer,
.bind_global,
.load_global,
.load_integer,
.greater_than,
.jump_false,
.load_integer,
.assign_global,
.pop,
.jump,
},
},
.{
.input = "switch(5){4: nil, 5: nil}",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.match,
.jump_false,
.load_nil,
.pop,
.load_integer,
.match,
.jump_false,
.load_nil,
.pop,
.pop,
},
},
.{
.input = "while true {break continue}",
.opcodes = &[_]Opcode{
.load_true,
.jump_false,
.jump,
.jump,
.load_void,
.pop,
.jump,
},
},
.{
.input = "for x: 0..1 {}",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.make_range,
.make_iter,
.iter_next,
.jump_false,
.assign_local,
.pop,
.jump,
},
},
.{
.input = "for x, i: 0..1 {}",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.make_range,
.make_iter,
.iter_next,
.jump_false,
.assign_local,
.pop,
.assign_local,
.pop,
.jump,
},
},
.{
.input = "const list = []int{0,1} const slice = list[0:1]",
.opcodes = &[_]Opcode{
.load_integer,
.load_integer,
.make_array,
.bind_global,
.load_global,
.load_integer,
.load_integer,
.make_slice,
.bind_global,
},
},
};
inline for (test_cases) |case| {
try testInput(case.input, case.opcodes);
}
}
|
src/bytecode.zig
|
const std = @import("std");
const mach = @import("mach");
const gpu = @import("gpu");
const glfw = @import("glfw");
const App = @This();
const Vertex = struct {
pos: @Vector(4, f32),
uv: @Vector(2, f32),
};
const vertices = [_]Vertex{
.{ .pos = .{ -1, -1, 0, 1 }, .uv = .{ 0, 0 } },
.{ .pos = .{ 1, -1, 0, 1 }, .uv = .{ 1, 0 } },
.{ .pos = .{ 1, 1, 0, 1 }, .uv = .{ 1, 1 } },
.{ .pos = .{ -1, 1, 0, 1 }, .uv = .{ 0, 1 } },
};
const indices = [_]u16{ 0, 1, 2, 2, 3, 0 };
const UniformBufferObject = struct {
resolution: @Vector(2, f32),
time: f32,
};
var timer: std.time.Timer = undefined;
pipeline: gpu.RenderPipeline,
queue: gpu.Queue,
vertex_buffer: gpu.Buffer,
index_buffer: gpu.Buffer,
uniform_buffer: gpu.Buffer,
bind_group: gpu.BindGroup,
fragment_shader_file: std.fs.File,
fragment_shader_code: [:0]const u8,
last_mtime: i128,
pub fn init(app: *App, engine: *mach.Engine) !void {
timer = try std.time.Timer.start();
// On linux if we don't set a minimum size, you can squish the window to 0 pixels of width and height,
// this makes some strange effects when that happens, so it's better to leave a minimum size to avoid that,
// this doesn't prevent you from minimizing the window.
try engine.setOptions(.{
.size_min = .{ .width = 20, .height = 20 },
});
var fragment_file: std.fs.File = undefined;
var last_mtime: i128 = undefined;
if (std.fs.cwd().openFile("shaderexp/frag.wgsl", .{ .mode = .read_only })) |file| {
fragment_file = file;
if (file.stat()) |stat| {
last_mtime = stat.mtime;
} else |err| {
std.debug.print("Something went wrong when attempting to stat file: {}\n", .{err});
return;
}
} else |e| {
std.debug.print("Something went wrong when attempting to open file: {}\n", .{e});
return;
}
var code = try fragment_file.readToEndAllocOptions(engine.allocator, std.math.maxInt(u16), null, 1, 0);
const queue = engine.device.getQueue();
const vertex_buffer = engine.device.createBuffer(&.{
.usage = .{ .vertex = true },
.size = @sizeOf(Vertex) * vertices.len,
.mapped_at_creation = true,
});
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
vertex_buffer.unmap();
const index_buffer = engine.device.createBuffer(&.{
.usage = .{ .index = true },
.size = @sizeOf(u16) * indices.len,
.mapped_at_creation = true,
});
var index_mapped = index_buffer.getMappedRange(@TypeOf(indices[0]), 0, indices.len);
std.mem.copy(u16, index_mapped, indices[0..]);
index_buffer.unmap();
// We need a bgl to bind the UniformBufferObject, but it is also needed for creating
// the RenderPipeline, so we pass it to recreatePipeline as a pointer
var bgl: gpu.BindGroupLayout = undefined;
const pipeline = recreatePipeline(engine, code, &bgl);
const uniform_buffer = engine.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(UniformBufferObject),
.mapped_at_creation = false,
});
const bind_group = engine.device.createBindGroup(
&gpu.BindGroup.Descriptor{
.layout = bgl,
.entries = &.{
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
},
},
);
app.pipeline = pipeline;
app.queue = queue;
app.vertex_buffer = vertex_buffer;
app.index_buffer = index_buffer;
app.uniform_buffer = uniform_buffer;
app.bind_group = bind_group;
app.fragment_shader_file = fragment_file;
app.fragment_shader_code = code;
app.last_mtime = last_mtime;
bgl.release();
}
pub fn deinit(app: *App, engine: *mach.Engine) void {
app.fragment_shader_file.close();
engine.allocator.free(app.fragment_shader_code);
app.vertex_buffer.release();
app.index_buffer.release();
app.uniform_buffer.release();
app.bind_group.release();
}
pub fn update(app: *App, engine: *mach.Engine) !void {
while (engine.pollEvent()) |event| {
switch (event) {
.key_press => |ev| {
if (ev.key == .space)
engine.setShouldClose(true);
},
else => {},
}
}
if (app.fragment_shader_file.stat()) |stat| {
if (app.last_mtime < stat.mtime) {
std.log.info("The fragment shader has been changed", .{});
app.last_mtime = stat.mtime;
app.fragment_shader_file.seekTo(0) catch unreachable;
app.fragment_shader_code = app.fragment_shader_file.readToEndAllocOptions(engine.allocator, std.math.maxInt(u32), null, 1, 0) catch |err| {
std.log.err("Err: {}", .{err});
return engine.setShouldClose(true);
};
app.pipeline = recreatePipeline(engine, app.fragment_shader_code, null);
}
} else |err| {
std.log.err("Something went wrong when attempting to stat file: {}\n", .{err});
}
const back_buffer_view = engine.swap_chain.?.getCurrentTextureView();
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.resolve_target = null,
.clear_value = std.mem.zeroes(gpu.Color),
.load_op = .clear,
.store_op = .store,
};
const encoder = engine.device.createCommandEncoder(null);
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
.depth_stencil_attachment = null,
};
const time = @intToFloat(f32, timer.read()) / @as(f32, std.time.ns_per_s);
const ubo = UniformBufferObject{
.resolution = .{ @intToFloat(f32, engine.current_desc.width), @intToFloat(f32, engine.current_desc.height) },
.time = time,
};
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
const pass = encoder.beginRenderPass(&render_pass_info);
pass.setVertexBuffer(0, app.vertex_buffer, 0, @sizeOf(Vertex) * vertices.len);
pass.setIndexBuffer(app.index_buffer, .uint16, 0, @sizeOf(u16) * indices.len);
pass.setPipeline(app.pipeline);
pass.setBindGroup(0, app.bind_group, &.{0});
pass.drawIndexed(indices.len, 1, 0, 0, 0);
pass.end();
pass.release();
var command = encoder.finish(null);
encoder.release();
app.queue.submit(&.{command});
command.release();
engine.swap_chain.?.present();
back_buffer_view.release();
}
fn recreatePipeline(engine: *mach.Engine, fragment_shader_code: [:0]const u8, bgl: ?*gpu.BindGroupLayout) gpu.RenderPipeline {
const vs_module = engine.device.createShaderModule(&.{
.label = "my vertex shader",
.code = .{ .wgsl = @embedFile("vert.wgsl") },
});
defer vs_module.release();
const vertex_attributes = [_]gpu.VertexAttribute{
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "pos"), .shader_location = 0 },
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "uv"), .shader_location = 1 },
};
const vertex_buffer_layout = gpu.VertexBufferLayout{
.array_stride = @sizeOf(Vertex),
.step_mode = .vertex,
.attribute_count = vertex_attributes.len,
.attributes = &vertex_attributes,
};
// Check wether the fragment shader code compiled successfully, if not
// print the validation layer error and show a black screen
engine.device.pushErrorScope(.validation);
var fs_module = engine.device.createShaderModule(&gpu.ShaderModule.Descriptor{
.label = "my fragment shader",
.code = .{ .wgsl = fragment_shader_code },
});
var error_occurred: bool = false;
// popErrorScope() returns always true, (unless maybe it fails to capture the error scope?)
_ = engine.device.popErrorScope(&gpu.ErrorCallback.init(*bool, &error_occurred, struct {
fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
if (typ != .noError) {
std.debug.print("🔴🔴🔴🔴:\n{s}\n", .{message});
ctx.* = true;
}
}
}.callback));
if (error_occurred) {
fs_module = engine.device.createShaderModule(&gpu.ShaderModule.Descriptor{
.label = "my fragment shader",
.code = .{ .wgsl = @embedFile("black_screen_frag.wgsl") },
});
}
defer fs_module.release();
const blend = gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const color_target = gpu.ColorTargetState{
.format = engine.swap_chain_format,
.blend = &blend,
.write_mask = gpu.ColorWriteMask.all,
};
const fragment = gpu.FragmentState{
.module = fs_module,
.entry_point = "main",
.targets = &.{color_target},
.constants = null,
};
const bgle = gpu.BindGroupLayout.Entry.buffer(0, .{ .fragment = true }, .uniform, true, 0);
// bgl is needed outside, for the creation of the uniform_buffer in main
const bgl_tmp = engine.device.createBindGroupLayout(
&gpu.BindGroupLayout.Descriptor{
.entries = &.{bgle},
},
);
defer {
// In frame we don't need to use bgl, so we can release it inside this function, else we pass bgl
if (bgl == null) {
bgl_tmp.release();
} else {
bgl.?.* = bgl_tmp;
}
}
const bind_group_layouts = [_]gpu.BindGroupLayout{bgl_tmp};
const pipeline_layout = engine.device.createPipelineLayout(&.{
.bind_group_layouts = &bind_group_layouts,
});
defer pipeline_layout.release();
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
.layout = pipeline_layout,
.depth_stencil = null,
.vertex = .{
.module = vs_module,
.entry_point = "main",
.buffers = &.{vertex_buffer_layout},
},
.multisample = .{
.count = 1,
.mask = 0xFFFFFFFF,
.alpha_to_coverage_enabled = false,
},
.primitive = .{
.front_face = .ccw,
.cull_mode = .none,
.topology = .triangle_list,
.strip_index_format = .none,
},
};
// Create the render pipeline. Even if the shader compilation succeeded, this could fail if the
// shader is missing a `main` entrypoint.
engine.device.pushErrorScope(.validation);
const pipeline = engine.device.createRenderPipeline(&pipeline_descriptor);
// popErrorScope() returns always true, (unless maybe it fails to capture the error scope?)
_ = engine.device.popErrorScope(&gpu.ErrorCallback.init(*bool, &error_occurred, struct {
fn callback(ctx: *bool, typ: gpu.ErrorType, message: [*:0]const u8) void {
if (typ != .noError) {
std.debug.print("🔴🔴🔴🔴:\n{s}\n", .{message});
ctx.* = true;
}
}
}.callback));
if (error_occurred) {
// Retry with black_screen_frag which we know will work.
return recreatePipeline(engine, @embedFile("black_screen_frag.wgsl"), bgl);
}
return pipeline;
}
|
shaderexp/main.zig
|
usingnamespace @import("root").preamble;
pub const PixelFormat = enum {
rgb, // 24 bpp rgb
rgba, // 32 bpp with alpha
rgbx, // 32 bpp, alpha ignored
pub fn bytesPerPixel(fmt: @This()) usize {
return switch (fmt) {
.rgb => 3,
.rgba, .rgbx => 4,
};
}
pub fn meaningfulBytesPerPixel(fmt: @This()) usize {
return switch (fmt) {
.rgb, .rgbx => 3,
.rgba => 4,
};
}
pub fn canReadManyAs(actual: @This(), candidate: @This()) bool {
if (actual == candidate)
return true;
if (candidate == .rgbx and actual == .rgba)
return true;
return false;
}
pub fn canReadAs(actual: @This(), candidate: @This()) bool {
if (actual.canReadManyAs(candidate))
return true;
switch (candidate) {
.rgb => {
switch (actual) {
.rgba, .rgbx => return true,
else => {},
}
},
else => {},
}
return false;
}
// @TODO: Check if the following two always are correct... I think they are??
pub fn canWriteMany(actual: @This(), candidate: @This()) bool {
return candidate.canReadManyAs(actual);
}
pub fn canWrite(actual: @This(), candidate: @This()) bool {
return candidate.canReadAs(actual);
}
pub fn hasAlpha(fmt: @This()) bool {
return switch (fmt) {
.rgba => true,
else => false,
};
}
};
test "canReadAs" {
try std.testing.expect(PixelFormat.rgba.canReadAs(.rgb));
try std.testing.expect(PixelFormat.rgbx.canReadAs(.rgb));
try std.testing.expect(PixelFormat.rgba.canReadAs(.rgbx));
try std.testing.expect(!PixelFormat.rgbx.canReadAs(.rgba));
}
test "canReadManyAs" {
try std.testing.expect(!PixelFormat.rgba.canReadManyAs(.rgb));
try std.testing.expect(!PixelFormat.rgbx.canReadManyAs(.rgb));
try std.testing.expect(PixelFormat.rgba.canReadManyAs(.rgbx));
try std.testing.expect(!PixelFormat.rgbx.canReadManyAs(.rgba));
}
|
lib/graphics/pixel_format.zig
|
const std = @import("std");
pub const Feature = enum(u32) {
depth24_unorm_stencil8 = 0x00000002,
depth32_float_stencil8 = 0x00000003,
timestamp_query = 0x00000004,
pipeline_statistics_query = 0x00000005,
texture_compression_bc = 0x00000006,
texture_compression_etc2 = 0x00000007,
texture_compression_astc = 0x00000008,
indirect_first_instance = 0x00000009,
depth_clamping = 0x000003e8,
dawn_shader_float16 = 0x000003e9,
dawn_internal_usages = 0x000003ea,
dawn_multi_planar_formats = 0x000003eb,
dawn_native = 0x000003ec,
};
pub const AddressMode = enum(u32) {
repeat = 0x00000000,
mirror_repeat = 0x00000001,
clamp_to_edge = 0x00000002,
};
pub const PresentMode = enum(u32) {
immediate = 0x00000000,
mailbox = 0x00000001,
fifo = 0x00000002,
};
pub const AlphaMode = enum(u32) {
premultiplied = 0x00000000,
unpremultiplied = 0x00000001,
};
pub const BlendFactor = enum(u32) {
zero = 0x00000000,
one = 0x00000001,
src = 0x00000002,
one_minus_src = 0x00000003,
src_alpha = 0x00000004,
oneMinusSrcAlpha = 0x00000005,
dst = 0x00000006,
one_minus_dst = 0x00000007,
dst_alpha = 0x00000008,
one_minus_dst_alpha = 0x00000009,
src_alpha_saturated = 0x0000000A,
constant = 0x0000000B,
one_minus_constant = 0x0000000C,
};
pub const BlendOperation = enum(u32) {
add = 0x00000000,
subtract = 0x00000001,
reverse_subtract = 0x00000002,
min = 0x00000003,
max = 0x00000004,
};
pub const CompareFunction = enum(u32) {
none = 0x00000000,
never = 0x00000001,
less = 0x00000002,
less_equal = 0x00000003,
greater = 0x00000004,
greater_equal = 0x00000005,
equal = 0x00000006,
not_equal = 0x00000007,
always = 0x00000008,
};
pub const ComputePassTimestampLocation = enum(u32) {
beginning = 0x00000000,
end = 0x00000001,
};
pub const CullMode = enum(u32) {
none = 0x00000000,
front = 0x00000001,
back = 0x00000002,
};
pub const ErrorFilter = enum(u32) {
validation = 0x00000000,
out_of_memory = 0x00000001,
};
pub const ErrorType = enum(u32) {
noError = 0x00000000,
validation = 0x00000001,
out_of_memory = 0x00000002,
unknown = 0x00000003,
device_lost = 0x00000004,
};
pub const FilterMode = enum(u32) {
nearest = 0x00000000,
linear = 0x00000001,
};
pub const FrontFace = enum(u32) {
ccw = 0x00000000,
cw = 0x00000001,
};
pub const IndexFormat = enum(u32) {
none = 0x00000000,
uint16 = 0x00000001,
uint32 = 0x00000002,
};
pub const LoadOp = enum(u32) {
none = 0x00000000,
clear = 0x00000001,
load = 0x00000002,
};
pub const LoggingType = enum(u32) {
verbose = 0x00000000,
info = 0x00000001,
warning = 0x00000002,
err = 0x00000003,
};
pub const PipelineStatistic = enum(u32) {
vertex_shader_invocations = 0x00000000,
clipper_invocations = 0x00000001,
clipper_primitives_out = 0x00000002,
fragment_shader_invocations = 0x00000003,
compute_shader_invocations = 0x00000004,
};
pub const PowerPreference = enum(u32) {
none = 0x00000000,
low_power = 0x00000001,
high_performance = 0x00000002,
};
pub const PredefinedColorSpace = enum(u32) {
none = 0x00000000,
srgb = 0x00000001,
};
pub const PrimitiveTopology = enum(u32) {
point_list = 0x00000000,
line_list = 0x00000001,
line_strip = 0x00000002,
triangle_list = 0x00000003,
triangle_strip = 0x00000004,
};
pub const QueryType = enum(u32) {
occlusion = 0x00000000,
pipeline_statistics = 0x00000001,
timestamp = 0x00000002,
};
pub const RenderPassTimestampLocation = enum(u32) {
beginning = 0x00000000,
end = 0x00000001,
};
pub const StencilOperation = enum(u32) {
keep = 0x00000000,
zero = 0x00000001,
replace = 0x00000002,
invert = 0x00000003,
increment_clamp = 0x00000004,
decrement_clamp = 0x00000005,
increment_wrap = 0x00000006,
decrement_wrap = 0x00000007,
};
pub const StorageTextureAccess = enum(u32) {
none = 0x00000000,
write_only = 0x00000001,
};
pub const StoreOp = enum(u32) {
none = 0x00000000,
store = 0x00000001,
discard = 0x00000002,
};
pub const VertexFormat = enum(u32) {
none = 0x00000000,
uint8x2 = 0x00000001,
uint8x4 = 0x00000002,
sint8x2 = 0x00000003,
sint8x4 = 0x00000004,
unorm8x2 = 0x00000005,
unorm8x4 = 0x00000006,
snorm8x2 = 0x00000007,
snorm8x4 = 0x00000008,
uint16x2 = 0x00000009,
uint16x4 = 0x0000000A,
sint16x2 = 0x0000000B,
sint16x4 = 0x0000000C,
unorm16x2 = 0x0000000D,
unorm16x4 = 0x0000000E,
snorm16x2 = 0x0000000F,
snorm16x4 = 0x00000010,
float16x2 = 0x00000011,
float16x4 = 0x00000012,
float32 = 0x00000013,
float32x2 = 0x00000014,
float32x3 = 0x00000015,
float32x4 = 0x00000016,
uint32 = 0x00000017,
uint32x2 = 0x00000018,
uint32x3 = 0x00000019,
uint32x4 = 0x0000001A,
sint32 = 0x0000001B,
sint32x2 = 0x0000001C,
sint32x3 = 0x0000001D,
sint32x4 = 0x0000001E,
};
pub const VertexStepMode = enum(u32) {
vertex = 0x00000000,
instance = 0x00000001,
};
pub const BufferUsage = enum(u32) {
none = 0x00000000,
map_read = 0x00000001,
map_write = 0x00000002,
copy_src = 0x00000004,
copy_dst = 0x00000008,
index = 0x00000010,
vertex = 0x00000020,
uniform = 0x00000040,
storage = 0x00000080,
indirect = 0x00000100,
query_resolve = 0x00000200,
};
pub const ColorWriteMask = enum(u32) {
none = 0x00000000,
red = 0x00000001,
green = 0x00000002,
blue = 0x00000004,
alpha = 0x00000008,
all = 0x0000000F,
};
pub const ShaderStage = enum(u32) {
none = 0x00000000,
vertex = 0x00000001,
fragment = 0x00000002,
compute = 0x00000004,
};
test "name" {
try std.testing.expect(std.mem.eql(u8, @tagName(Feature.timestamp_query), "timestamp_query"));
}
test {
_ = Feature;
_ = AddressMode;
_ = PresentMode;
_ = AlphaMode;
_ = BlendFactor;
_ = BlendOperation;
_ = CompareFunction;
_ = ComputePassTimestampLocation;
_ = CullMode;
_ = ErrorFilter;
_ = ErrorType;
_ = FilterMode;
_ = FrontFace;
_ = IndexFormat;
_ = LoadOp;
_ = LoggingType;
_ = PipelineStatistic;
_ = PowerPreference;
_ = PredefinedColorSpace;
_ = PrimitiveTopology;
_ = QueryType;
_ = RenderPassTimestampLocation;
_ = StencilOperation;
_ = StorageTextureAccess;
_ = StoreOp;
_ = VertexFormat;
_ = VertexStepMode;
_ = BufferUsage;
_ = ColorWriteMask;
_ = ShaderStage;
}
|
gpu/src/enums.zig
|
const std = @import("std");
const mem = std.mem;
const DecimalNumber = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 48,
hi: u21 = 130041,
pub fn init(allocator: *mem.Allocator) !DecimalNumber {
var instance = DecimalNumber{
.allocator = allocator,
.array = try allocator.alloc(bool, 129994),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
index = 0;
while (index <= 9) : (index += 1) {
instance.array[index] = true;
}
index = 1584;
while (index <= 1593) : (index += 1) {
instance.array[index] = true;
}
index = 1728;
while (index <= 1737) : (index += 1) {
instance.array[index] = true;
}
index = 1936;
while (index <= 1945) : (index += 1) {
instance.array[index] = true;
}
index = 2358;
while (index <= 2367) : (index += 1) {
instance.array[index] = true;
}
index = 2486;
while (index <= 2495) : (index += 1) {
instance.array[index] = true;
}
index = 2614;
while (index <= 2623) : (index += 1) {
instance.array[index] = true;
}
index = 2742;
while (index <= 2751) : (index += 1) {
instance.array[index] = true;
}
index = 2870;
while (index <= 2879) : (index += 1) {
instance.array[index] = true;
}
index = 2998;
while (index <= 3007) : (index += 1) {
instance.array[index] = true;
}
index = 3126;
while (index <= 3135) : (index += 1) {
instance.array[index] = true;
}
index = 3254;
while (index <= 3263) : (index += 1) {
instance.array[index] = true;
}
index = 3382;
while (index <= 3391) : (index += 1) {
instance.array[index] = true;
}
index = 3510;
while (index <= 3519) : (index += 1) {
instance.array[index] = true;
}
index = 3616;
while (index <= 3625) : (index += 1) {
instance.array[index] = true;
}
index = 3744;
while (index <= 3753) : (index += 1) {
instance.array[index] = true;
}
index = 3824;
while (index <= 3833) : (index += 1) {
instance.array[index] = true;
}
index = 4112;
while (index <= 4121) : (index += 1) {
instance.array[index] = true;
}
index = 4192;
while (index <= 4201) : (index += 1) {
instance.array[index] = true;
}
index = 6064;
while (index <= 6073) : (index += 1) {
instance.array[index] = true;
}
index = 6112;
while (index <= 6121) : (index += 1) {
instance.array[index] = true;
}
index = 6422;
while (index <= 6431) : (index += 1) {
instance.array[index] = true;
}
index = 6560;
while (index <= 6569) : (index += 1) {
instance.array[index] = true;
}
index = 6736;
while (index <= 6745) : (index += 1) {
instance.array[index] = true;
}
index = 6752;
while (index <= 6761) : (index += 1) {
instance.array[index] = true;
}
index = 6944;
while (index <= 6953) : (index += 1) {
instance.array[index] = true;
}
index = 7040;
while (index <= 7049) : (index += 1) {
instance.array[index] = true;
}
index = 7184;
while (index <= 7193) : (index += 1) {
instance.array[index] = true;
}
index = 7200;
while (index <= 7209) : (index += 1) {
instance.array[index] = true;
}
index = 42480;
while (index <= 42489) : (index += 1) {
instance.array[index] = true;
}
index = 43168;
while (index <= 43177) : (index += 1) {
instance.array[index] = true;
}
index = 43216;
while (index <= 43225) : (index += 1) {
instance.array[index] = true;
}
index = 43424;
while (index <= 43433) : (index += 1) {
instance.array[index] = true;
}
index = 43456;
while (index <= 43465) : (index += 1) {
instance.array[index] = true;
}
index = 43552;
while (index <= 43561) : (index += 1) {
instance.array[index] = true;
}
index = 43968;
while (index <= 43977) : (index += 1) {
instance.array[index] = true;
}
index = 65248;
while (index <= 65257) : (index += 1) {
instance.array[index] = true;
}
index = 66672;
while (index <= 66681) : (index += 1) {
instance.array[index] = true;
}
index = 68864;
while (index <= 68873) : (index += 1) {
instance.array[index] = true;
}
index = 69686;
while (index <= 69695) : (index += 1) {
instance.array[index] = true;
}
index = 69824;
while (index <= 69833) : (index += 1) {
instance.array[index] = true;
}
index = 69894;
while (index <= 69903) : (index += 1) {
instance.array[index] = true;
}
index = 70048;
while (index <= 70057) : (index += 1) {
instance.array[index] = true;
}
index = 70336;
while (index <= 70345) : (index += 1) {
instance.array[index] = true;
}
index = 70688;
while (index <= 70697) : (index += 1) {
instance.array[index] = true;
}
index = 70816;
while (index <= 70825) : (index += 1) {
instance.array[index] = true;
}
index = 71200;
while (index <= 71209) : (index += 1) {
instance.array[index] = true;
}
index = 71312;
while (index <= 71321) : (index += 1) {
instance.array[index] = true;
}
index = 71424;
while (index <= 71433) : (index += 1) {
instance.array[index] = true;
}
index = 71856;
while (index <= 71865) : (index += 1) {
instance.array[index] = true;
}
index = 71968;
while (index <= 71977) : (index += 1) {
instance.array[index] = true;
}
index = 72736;
while (index <= 72745) : (index += 1) {
instance.array[index] = true;
}
index = 72992;
while (index <= 73001) : (index += 1) {
instance.array[index] = true;
}
index = 73072;
while (index <= 73081) : (index += 1) {
instance.array[index] = true;
}
index = 92720;
while (index <= 92729) : (index += 1) {
instance.array[index] = true;
}
index = 92960;
while (index <= 92969) : (index += 1) {
instance.array[index] = true;
}
index = 120734;
while (index <= 120783) : (index += 1) {
instance.array[index] = true;
}
index = 123152;
while (index <= 123161) : (index += 1) {
instance.array[index] = true;
}
index = 123584;
while (index <= 123593) : (index += 1) {
instance.array[index] = true;
}
index = 125216;
while (index <= 125225) : (index += 1) {
instance.array[index] = true;
}
index = 129984;
while (index <= 129993) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *DecimalNumber) void {
self.allocator.free(self.array);
}
// isDecimalNumber checks if cp is of the kind Decimal_Number.
pub fn isDecimalNumber(self: DecimalNumber, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
}
|
src/components/autogen/DerivedGeneralCategory/DecimalNumber.zig
|
const std = @import("std");
const mem = std.mem;
const testing = std.testing;
const ascii = std.ascii;
const nodes = @import("nodes.zig");
const htmlentities = @import("htmlentities");
const zunicode = @import("zunicode");
pub fn isLineEndChar(ch: u8) bool {
return switch (ch) {
'\n', '\r' => true,
else => false,
};
}
pub fn isSpaceOrTab(ch: u8) bool {
return switch (ch) {
' ', '\t' => true,
else => false,
};
}
pub fn isBlank(s: []const u8) bool {
for (s) |c| {
switch (c) {
'\n', '\r' => return true,
' ', '\t' => {},
else => return false,
}
}
return true;
}
test "isBlank" {
try testing.expect(isBlank(""));
try testing.expect(isBlank("\nx"));
try testing.expect(isBlank(" \t\t \r"));
try testing.expect(!isBlank("e"));
try testing.expect(!isBlank(" \t e "));
}
const SPACES = "\t\n\x0b\x0c\r ";
pub fn ltrim(s: []const u8) []const u8 {
return mem.trimLeft(u8, s, SPACES);
}
test "ltrim" {
try testing.expectEqualStrings("abc", ltrim("abc"));
try testing.expectEqualStrings("abc", ltrim(" abc"));
try testing.expectEqualStrings("abc", ltrim(" \n\n \t\r abc"));
try testing.expectEqualStrings("abc \n zz \n ", ltrim("\nabc \n zz \n "));
}
pub fn rtrim(s: []const u8) []const u8 {
return mem.trimRight(u8, s, SPACES);
}
test "rtrim" {
try testing.expectEqualStrings("abc", rtrim("abc"));
try testing.expectEqualStrings("abc", rtrim("abc "));
try testing.expectEqualStrings("abc", rtrim("abc \n\n \t\r "));
try testing.expectEqualStrings(" \nabc \n zz", rtrim(" \nabc \n zz \n"));
}
pub fn trim(s: []const u8) []const u8 {
return mem.trim(u8, s, SPACES);
}
test "trim" {
try testing.expectEqualStrings("abc", trim("abc"));
try testing.expectEqualStrings("abc", trim(" abc "));
try testing.expectEqualStrings("abc", trim(" abc \n\n \t\r "));
try testing.expectEqualStrings("abc \n zz", trim(" \nabc \n zz \n"));
}
pub fn trimIt(al: *std.ArrayList(u8)) void {
var trimmed = trim(al.items);
if (al.items.ptr == trimmed.ptr and al.items.len == trimmed.len) return;
std.mem.copy(u8, al.items, trimmed);
al.items.len = trimmed.len;
}
test "trimIt" {
var buf = std.ArrayList(u8).init(std.testing.allocator);
defer buf.deinit();
try buf.appendSlice("abc");
trimIt(&buf);
try std.testing.expectEqualStrings("abc", buf.items);
buf.items.len = 0;
try buf.appendSlice(" \tabc");
trimIt(&buf);
try std.testing.expectEqualStrings("abc", buf.items);
buf.items.len = 0;
try buf.appendSlice(" \r abc \n ");
trimIt(&buf);
try std.testing.expectEqualStrings("abc", buf.items);
}
pub fn chopTrailingHashtags(s: []const u8) []const u8 {
var r = rtrim(s);
if (r.len == 0) return r;
const orig_n = r.len - 1;
var n = orig_n;
while (r[n] == '#') : (n -= 1) {
if (n == 0) return r;
}
if (n != orig_n and isSpaceOrTab(r[n])) {
return rtrim(r[0..n]);
} else {
return r;
}
}
test "chopTrailingHashtags" {
try testing.expectEqualStrings("xyz", chopTrailingHashtags("xyz"));
try testing.expectEqualStrings("xyz#", chopTrailingHashtags("xyz#"));
try testing.expectEqualStrings("xyz###", chopTrailingHashtags("xyz###"));
try testing.expectEqualStrings("xyz###", chopTrailingHashtags("xyz### "));
try testing.expectEqualStrings("xyz###", chopTrailingHashtags("xyz### #"));
try testing.expectEqualStrings("xyz", chopTrailingHashtags("xyz "));
try testing.expectEqualStrings("xyz", chopTrailingHashtags("xyz ##"));
try testing.expectEqualStrings("xyz", chopTrailingHashtags("xyz ##"));
}
pub fn normalizeCode(allocator: *mem.Allocator, s: []const u8) ![]u8 {
var code = try std.ArrayList(u8).initCapacity(allocator, s.len);
errdefer code.deinit();
var i: usize = 0;
var contains_nonspace = false;
while (i < s.len) {
switch (s[i]) {
'\r' => {
if (i + 1 == s.len or s[i + 1] != '\n') {
try code.append(' ');
}
},
'\n' => {
try code.append(' ');
},
else => try code.append(s[i]),
}
if (s[i] != ' ') {
contains_nonspace = true;
}
i += 1;
}
if (contains_nonspace and code.items.len != 0 and code.items[0] == ' ' and code.items[code.items.len - 1] == ' ') {
_ = code.orderedRemove(0);
_ = code.pop();
}
return code.toOwnedSlice();
}
const Case = struct {
in: []const u8,
out: []const u8,
};
fn testCases(function: fn (*mem.Allocator, []const u8) anyerror![]u8, cases: []const Case) !void {
for (cases) |case| {
const result = try function(std.testing.allocator, case.in);
defer std.testing.allocator.free(result);
try testing.expectEqualStrings(case.out, result);
}
}
test "normalizeCode" {
try testCases(normalizeCode, &[_]Case{
.{ .in = "qwe", .out = "qwe" },
.{ .in = " qwe ", .out = "qwe" },
.{ .in = " qwe ", .out = " qwe " },
.{ .in = " abc\rdef'\r\ndef ", .out = "abc def' def" },
});
}
pub fn removeTrailingBlankLines(line: *std.ArrayList(u8)) void {
var i = line.items.len - 1;
while (true) : (i -= 1) {
const c = line.items[i];
if (c != ' ' and c != '\t' and !isLineEndChar(c)) {
break;
}
if (i == 0) {
line.items.len = 0;
return;
}
}
while (i < line.items.len) : (i += 1) {
if (!isLineEndChar(line.items[i])) continue;
line.items.len = i;
break;
}
}
test "removeTrailingBlankLines" {
const cases = [_]Case{
.{ .in = "\n\n \r\t\n ", .out = "" },
.{ .in = "yep\nok\n\n ", .out = "yep\nok" },
.{ .in = "yep ", .out = "yep " },
};
var line = std.ArrayList(u8).init(std.testing.allocator);
defer line.deinit();
for (cases) |case| {
line.items.len = 0;
try line.appendSlice(case.in);
removeTrailingBlankLines(&line);
try testing.expectEqualStrings(case.out, line.items);
}
}
fn encodeUtf8Into(in_cp: u21, al: *std.ArrayList(u8)) !void {
// utf8Encode throws:
// - Utf8CannotEncodeSurrogateHalf, which we guard against that by
// rewriting 0xd800..0xe0000 to 0xfffd.
// - CodepointTooLarge, which we guard against by rewriting 0x110000+
// to 0xfffd.
var cp = in_cp;
if (cp == 0 or (cp >= 0xd800 and cp <= 0xdfff) or cp >= 0x110000) {
cp = 0xFFFD;
}
var sequence = [4]u8{ 0, 0, 0, 0 };
const len = std.unicode.utf8Encode(cp, &sequence) catch unreachable;
try al.appendSlice(sequence[0..len]);
}
const ENTITY_MIN_LENGTH: u8 = 2;
const ENTITY_MAX_LENGTH: u8 = 32;
pub fn unescapeInto(text: []const u8, out: *std.ArrayList(u8)) !?usize {
if (text.len >= 3 and text[0] == '#') {
var codepoint: u32 = 0;
var i: usize = 0;
const num_digits = block: {
if (ascii.isDigit(text[1])) {
i = 1;
while (i < text.len and ascii.isDigit(text[i])) {
codepoint = (codepoint * 10) + (@as(u32, text[i]) - '0');
codepoint = std.math.min(codepoint, 0x11_0000);
i += 1;
}
break :block i - 1;
} else if (text[1] == 'x' or text[1] == 'X') {
i = 2;
while (i < text.len and ascii.isXDigit(text[i])) {
codepoint = (codepoint * 16) + (@as(u32, text[i]) | 32) % 39 - 9;
codepoint = std.math.min(codepoint, 0x11_0000);
i += 1;
}
break :block i - 2;
}
break :block 0;
};
if (num_digits >= 1 and num_digits <= 8 and i < text.len and text[i] == ';') {
try encodeUtf8Into(@truncate(u21, codepoint), out);
return i + 1;
}
}
const size = std.math.min(text.len, ENTITY_MAX_LENGTH);
var i = ENTITY_MIN_LENGTH;
while (i < size) : (i += 1) {
if (text[i] == ' ')
return null;
if (text[i] == ';') {
var key = [_]u8{'&'} ++ [_]u8{';'} ** (ENTITY_MAX_LENGTH + 1);
mem.copy(u8, key[1..], text[0..i]);
if (htmlentities.lookup(key[0 .. i + 2])) |item| {
try out.appendSlice(item.characters);
return i + 1;
}
}
}
return null;
}
fn unescapeHtmlInto(html: []const u8, out: *std.ArrayList(u8)) !void {
var size = html.len;
var i: usize = 0;
while (i < size) {
const org = i;
while (i < size and html[i] != '&') : (i += 1) {}
if (i > org) {
if (org == 0 and i >= size) {
try out.appendSlice(html);
return;
}
try out.appendSlice(html[org..i]);
}
if (i >= size)
return;
i += 1;
if (try unescapeInto(html[i..], out)) |unescaped_size| {
i += unescaped_size;
} else {
try out.append('&');
}
}
}
pub fn unescapeHtml(allocator: *mem.Allocator, html: []const u8) ![]u8 {
var al = std.ArrayList(u8).init(allocator);
errdefer al.deinit();
try unescapeHtmlInto(html, &al);
return al.toOwnedSlice();
}
test "unescapeHtml" {
try testCases(unescapeHtml, &[_]Case{
.{ .in = "test", .out = "test" },
.{ .in = "テスト", .out = "テスト" },
.{ .in = "test", .out = "test" },
.{ .in = "テスト", .out = "テスト" },
// "Although HTML5 does accept some entity references without a trailing semicolon
// (such as ©), these are not recognized here, because it makes the grammar too
// ambiguous:"
.{ .in = "…éÉ⇉Ⓢ", .out = "…éÉ⇉Ⓢ" },
});
}
pub fn cleanAutolink(allocator: *mem.Allocator, url: []const u8, kind: nodes.AutolinkType) ![]u8 {
var trimmed = trim(url);
if (trimmed.len == 0)
return &[_]u8{};
var buf = try std.ArrayList(u8).initCapacity(allocator, trimmed.len);
errdefer buf.deinit();
if (kind == .Email)
try buf.appendSlice("mailto:");
try unescapeHtmlInto(trimmed, &buf);
return buf.toOwnedSlice();
}
test "cleanAutolink" {
var email = try cleanAutolink(std.testing.allocator, " hello@world.example ", .Email);
defer std.testing.allocator.free(email);
try testing.expectEqualStrings("mailto:<EMAIL>", email);
var uri = try cleanAutolink(std.testing.allocator, " www.com ", .URI);
defer std.testing.allocator.free(uri);
try testing.expectEqualStrings("www.com", uri);
}
fn unescape(allocator: *mem.Allocator, s: []const u8) ![]u8 {
var buffer = try std.ArrayList(u8).initCapacity(allocator, s.len);
errdefer buffer.deinit();
var r: usize = 0;
while (r < s.len) : (r += 1) {
if (s[r] == '\\' and r + 1 < s.len and ascii.isPunct(s[r + 1]))
r += 1;
try buffer.append(s[r]);
}
return buffer.toOwnedSlice();
}
pub fn cleanUrl(allocator: *mem.Allocator, url: []const u8) ![]u8 {
var trimmed = trim(url);
if (trimmed.len == 0)
return &[_]u8{};
var b = try unescapeHtml(allocator, trimmed);
defer allocator.free(b);
return unescape(allocator, b);
}
test "cleanUrl" {
var url = try cleanUrl(std.testing.allocator, " \\(hello\\)@world ");
defer std.testing.allocator.free(url);
try testing.expectEqualStrings("(hello)@world", url);
}
pub fn cleanTitle(allocator: *mem.Allocator, title: []const u8) ![]u8 {
if (title.len == 0)
return &[_]u8{};
const first = title[0];
const last = title[title.len - 1];
var b = if ((first == '\'' and last == '\'') or (first == '(' and last == ')') or (first == '"' and last == '"'))
try unescapeHtml(allocator, title[1 .. title.len - 1])
else
try unescapeHtml(allocator, title);
defer allocator.free(b);
return unescape(allocator, b);
}
test "cleanTitle" {
try testCases(cleanTitle, &[_]Case{
.{ .in = "\\'title", .out = "'title" },
.{ .in = "'title'", .out = "title" },
.{ .in = "(test)", .out = "test" },
.{ .in = "\"テスト\"", .out = "テスト" },
.{ .in = "'…éÉ⇉Ⓢ'", .out = "…éÉ⇉Ⓢ" },
});
}
pub fn normalizeLabel(allocator: *mem.Allocator, s: []const u8) ![]u8 {
var trimmed = trim(s);
var buffer = try std.ArrayList(u8).initCapacity(allocator, trimmed.len);
errdefer buffer.deinit();
var last_was_whitespace = false;
var view = std.unicode.Utf8View.initUnchecked(trimmed);
var it = view.iterator();
while (it.nextCodepoint()) |cp| {
var rune = @intCast(i32, cp);
if (zunicode.isSpace(rune)) {
if (!last_was_whitespace) {
last_was_whitespace = true;
try buffer.append(' ');
}
} else {
last_was_whitespace = false;
var lower = zunicode.toLower(rune);
try encodeUtf8Into(@intCast(u21, lower), &buffer);
}
}
return buffer.toOwnedSlice();
}
test "normalizeLabel" {
try testCases(normalizeLabel, &[_]Case{
.{ .in = "Hello", .out = "hello" },
.{ .in = " Y E S ", .out = "y e s" },
.{ .in = "yÉs", .out = "yés" },
});
}
pub fn toLower(allocator: *mem.Allocator, s: []const u8) ![]u8 {
var buffer = try std.ArrayList(u8).initCapacity(allocator, s.len);
errdefer buffer.deinit();
var view = try std.unicode.Utf8View.init(s);
var it = view.iterator();
while (it.nextCodepoint()) |cp| {
var rune = @intCast(i32, cp);
var lower = zunicode.toLower(rune);
try encodeUtf8Into(@intCast(u21, lower), &buffer);
}
return buffer.toOwnedSlice();
}
test "toLower" {
try testCases(toLower, &[_]Case{
.{ .in = "Hello", .out = "hello" },
.{ .in = "ΑαΒβΓγΔδΕεΖζΗηΘθΙιΚκΛλΜμ", .out = "ααββγγδδεεζζηηθθιικκλλμμ" },
.{ .in = "АаБбВвГгДдЕеЁёЖжЗзИиЙйКкЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЫыЬьЭэЮюЯя", .out = "ааббввггддееёёжжззииййккллммннооппррссттууффххццччшшщщъъыыььээююяя" },
});
}
pub fn createMap(chars: []const u8) [256]bool {
var arr = [_]bool{false} ** 256;
for (chars) |c| {
arr[c] = true;
}
return arr;
}
test "createMap" {
comptime {
const m = createMap("abcxyz");
try testing.expect(m['a']);
try testing.expect(m['b']);
try testing.expect(m['c']);
try testing.expect(!m['d']);
try testing.expect(!m['e']);
try testing.expect(!m['f']);
try testing.expect(m['x']);
try testing.expect(!m[0]);
}
}
|
src/strings.zig
|
const std = @import("std");
const builtin = @import("builtin");
const command = @import("command.zig");
const Builder = @import("Builder.zig");
const SrcEntry = @import("SrcEntry.zig");
const Command = command.Command;
const help =
\\Usage:
\\ fimbs [command]
\\ fimbs [command] <ARGS>
\\
\\Command:
\\ help Display help
\\ clean Clean the build dir
\\ run Run the project. Be sure to build it first
\\ build <ARGS> Build the project.
\\ new_project <name> Create new project
\\
\\Run fimbs [command] help to see related args
;
const build_cmd_help =
\\Usage:
\\ fimbs build <ARGS>
\\
\\Args:
\\ debug Build the project in debug mode
\\ release Build the project in release mode
;
pub fn main() anyerror!void {
var allocator = std.heap.page_allocator;
const argv = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, argv);
var cmd = command.parse(argv[1..], &[_]Command{
.{ .name = "help", .takes_arg = false },
.{ .name = "clean", .takes_arg = false },
.{ .name = "run", .takes_arg = false },
.{ .name = "build", .takes_arg = true },
.{ .name = "new_project", .takes_arg = true },
}) catch |err| {
if (builtin.mode == .Debug) return err;
switch (err) {
error.ExpectedACommand => {
fatal("expected a commad argument", .{});
},
error.UnknownCommand => {
fatal("unknown command", .{});
},
else => |e| return e,
}
};
if (cmd.matches("help")) {
displayHelp(help);
return;
}
if (cmd.matches("clean")) {
try deleteBuildDir(Builder.DEFAULT_BUILD_DIR);
return;
}
if (cmd.matches("run")) {
std.log.info("Run feature is yet to implement", .{});
return;
}
if (cmd.argsOf("new_project")) |args| {
var args_result = try args.parse(&[_]Command.Arg{
.{ .name = "name", .takes_value = true },
});
if (args_result.valueOf("name")) |project_name| {
try createNewProject(project_name);
return;
}
return;
}
if (cmd.argsOf("build")) |args_parser| {
var args = try args_parser.parse(&[_]Command.Arg{
.{ .name = "help", .takes_value = false },
.{ .name = "debug", .takes_value = false },
.{ .name = "release", .takes_value = false },
});
if (args.matches("help")) {
displayHelp(build_cmd_help);
return;
}
var builder = Builder.init(allocator, null, null, null);
defer builder.deinit();
if (args.matches("debug")) {
builder.cflags = "-DDEBUG";
}
if (args.matches("release")) {
builder.cflags = "-DNDEBUG -O2";
}
try scanSourceDir(allocator, "src", &builder.srcs);
try builder.build();
return;
}
}
pub fn displayHelp(help_text: []const u8) void {
std.debug.print("{s}\n", .{help_text});
}
fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.log.err(format, args);
std.os.exit(1);
}
fn deleteBuildDir(build_dir: []const u8) !void {
try std.fs.cwd().deleteTree(build_dir);
}
fn scanSourceDir(
allocator: std.mem.Allocator,
src_dir_name: []const u8,
srcs_arr_list: *std.ArrayList(SrcEntry),
) !void {
var src_dir = try std.fs.cwd().openDir(src_dir_name, .{
.access_sub_paths = true,
.iterate = true,
});
var src_dir_walker = try src_dir.walk(allocator);
defer src_dir_walker.deinit();
while (try src_dir_walker.next()) |entry| {
if (entry.kind == .File) {
if (std.mem.eql(u8, std.fs.path.extension(entry.basename), ".c")) {
var src_entry = try SrcEntry.init(
allocator,
entry.basename,
entry.path,
src_dir_name,
);
try srcs_arr_list.append(src_entry);
}
}
}
}
fn createNewProject(project_name: []const u8) !void {
var cwd = std.fs.cwd();
try cwd.makeDir(project_name);
var project_dir = try cwd.openDir(project_name, .{});
defer project_dir.close();
try project_dir.makeDir("src");
var main_c_file = try project_dir.createFile("src/main.c", .{ .read = true });
defer main_c_file.close();
try main_c_file.writeAll(
\\#include <stdio.h>
\\
\\int main(int argc, char **argv)
\\{
\\ printf("Hello, World!!\n");
\\ return 0;
\\}
);
}
test "basic test" {
try std.testing.expectEqual(10, 3 + 7);
}
|
src/main.zig
|
const std = @import("std");
const zang = @import("zang");
const wav = @import("wav");
const common = @import("common.zig");
const c = @import("common/c.zig");
pub const AUDIO_FORMAT: zang.AudioFormat = .signed16_lsb;
pub const AUDIO_SAMPLE_RATE = 44100;
pub const AUDIO_BUFFER_SIZE = 1024;
pub const DESCRIPTION =
\\example_sampler
\\
\\Loop a WAV file.
\\
\\Press spacebar to reset the sampler with a randomly
\\selected speed between 50% and 150%.
\\
\\Press 'b' to do the same, but with the sound playing
\\in reverse.
\\
\\Press 'd' to toggle distortion.
;
fn readWav(comptime filename: []const u8) !zang.Sample {
const buf = @embedFile(filename);
var fbs = std.io.fixedBufferStream(buf);
var stream = fbs.inStream();
const Loader = wav.Loader(@TypeOf(stream), true);
const preloaded = try Loader.preload(&stream);
// don't call Loader.load because we're working on a slice, so we can just
// take a subslice of it
return zang.Sample{
.num_channels = preloaded.num_channels,
.sample_rate = preloaded.sample_rate,
.format = switch (preloaded.format) {
.unsigned8 => .unsigned8,
.signed16_lsb => .signed16_lsb,
.signed24_lsb => .signed24_lsb,
.signed32_lsb => .signed32_lsb,
},
.data = buf[fbs.pos .. fbs.pos + preloaded.getNumBytes()],
};
}
pub const MainModule = struct {
pub const num_outputs = 1;
pub const num_temps = 1;
pub const output_audio = common.AudioOut{ .mono = 0 };
pub const output_visualize = 0;
sample: zang.Sample,
iq: zang.Notes(zang.Sampler.Params).ImpulseQueue,
idgen: zang.IdGenerator,
sampler: zang.Sampler,
trigger: zang.Trigger(zang.Sampler.Params),
distortion: zang.Distortion,
r: std.rand.Xoroshiro128,
distort: bool,
first: bool,
pub fn init() MainModule {
return .{
.sample = readWav("drumloop.wav") catch unreachable,
.iq = zang.Notes(zang.Sampler.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.sampler = zang.Sampler.init(),
.trigger = zang.Trigger(zang.Sampler.Params).init(),
.distortion = zang.Distortion.init(),
.r = std.rand.DefaultPrng.init(0),
.distort = false,
.first = true,
};
}
pub fn paint(
self: *MainModule,
span: zang.Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
) void {
if (self.first) {
self.first = false;
self.iq.push(0, self.idgen.nextId(), .{
.sample_rate = AUDIO_SAMPLE_RATE,
.sample = self.sample,
.channel = 0,
.loop = true,
});
}
zang.zero(span, temps[0]);
var ctr = self.trigger.counter(span, self.iq.consume());
while (self.trigger.next(&ctr)) |result| {
self.sampler.paint(
result.span,
.{temps[0]},
.{},
result.note_id_changed,
result.params,
);
}
zang.multiplyWithScalar(span, temps[0], 2.5);
if (self.distort) {
self.distortion.paint(span, .{outputs[0]}, .{}, false, .{
.input = temps[0],
.type = .overdrive,
.ingain = 0.9,
.outgain = 0.5,
.offset = 0.0,
});
} else {
zang.addInto(span, outputs[0], temps[0]);
}
}
pub fn keyEvent(self: *MainModule, key: i32, down: bool, impulse_frame: usize) bool {
if (down and key == c.SDLK_SPACE) {
self.iq.push(impulse_frame, self.idgen.nextId(), .{
.sample_rate = AUDIO_SAMPLE_RATE *
(0.5 + 1.0 * self.r.random.float(f32)),
.sample = self.sample,
.channel = 0,
.loop = true,
});
}
if (down and key == c.SDLK_b) {
self.iq.push(impulse_frame, self.idgen.nextId(), .{
.sample_rate = AUDIO_SAMPLE_RATE *
-(0.5 + 1.0 * self.r.random.float(f32)),
.sample = self.sample,
.channel = 0,
.loop = true,
});
}
if (down and key == c.SDLK_d) {
self.distort = !self.distort;
}
return false;
}
};
|
examples/example_sampler.zig
|