diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..db45f28b --- /dev/null +++ b/404.html @@ -0,0 +1,2316 @@ + + + + + + + + + + + + + + + + + + + + + + + Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..e2599df7 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +complightlab.com diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 00000000..e69de29b diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.83f73b43.min.js b/assets/javascripts/bundle.83f73b43.min.js new file mode 100644 index 00000000..43d8b70f --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.83f73b43.min.js.map + diff --git a/assets/javascripts/bundle.83f73b43.min.js.map b/assets/javascripts/bundle.83f73b43.min.js.map new file mode 100644 index 00000000..fe920b7d --- /dev/null +++ b/assets/javascripts/bundle.83f73b43.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an

+ +
+

Research Highlights

+
+ + +
+
+
+

Most downloaded paper award from Nature's Light and Science Applications.

+
+
+ + +
+
+
+

Multi-color holograms improve brightness in holographic displays (SIGGRAPH ASIA 2023)

+
+
+ + +
+
+
+

Ahmet Güzel received the best poster award at UKRI AI CDT conference.

+
+
+ + +
+
+
+

HoloBeam: Paper-Thin Near-Eye Displays (IEEE VR 2023)

+
+
+ + +
+
+
+

Realistic Defocus Blur for Multiplane Computer-Generated Holography (IEEE VR 2023)

+
+
+ + +
+
+
+

ChromaCorrect: Perceptual Prescription Correction in Virtual Reality (Optics Express)

+
+
+ +
+
+
+

Optimizing vision and visuals (SIGGRAPH 2022)

+
+
+ + +
+
+
+

Unrolled Primal-Dual Networks for Lensless Imaging (Optics Express)

+
+
+ + +
+
+
+

Metameric Varifocal Holograms (IEEE VR 2022)

+
+
+ + +
+
+
+

Learned Holographic Light Transport (Applied Optics)

+
+
+ + +
+
+
+

Telelife: the future of remote living (Frontiers in VR)

+
+
+ + +
+
+
+

SensiCut: material-aware laser cutting using speckle sensing and deep learning (UIST 2021)

+
+
+ + +
+
+
+

Beaming Displays (IEEE VR 2021)

+
+
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/javascripts/config.js b/javascripts/config.js new file mode 100644 index 00000000..06dbf38b --- /dev/null +++ b/javascripts/config.js @@ -0,0 +1,16 @@ +window.MathJax = { + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + } +}; + +document$.subscribe(() => { + MathJax.typesetPromise() +}) diff --git a/lectures/index.html b/lectures/index.html new file mode 100644 index 00000000..f9790cf6 --- /dev/null +++ b/lectures/index.html @@ -0,0 +1,2412 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Secret link - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

A secret page

+

Aha! You found a secret page.

+

Pssst, if Kaan told you this, click this link!

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/logo/door_tag.pdf b/logo/door_tag.pdf new file mode 100644 index 00000000..be27d438 Binary files /dev/null and b/logo/door_tag.pdf differ diff --git a/logo/door_tag.svg b/logo/door_tag.svg new file mode 100644 index 00000000..16944b7a --- /dev/null +++ b/logo/door_tag.svg @@ -0,0 +1,502 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + c + c + + ComputationalLightLaboratory + Lead: Assoc. Prof. Kaan Akşit (https://kaanaksit.com)E-mail: kaanaksit@kaanaksit.comWebsite: https://complightlab.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Warning Laser Hazard!DO NOT ENTER WITHOUT PERMISSION! + + diff --git a/logo/logo.png b/logo/logo.png new file mode 100644 index 00000000..66cc5825 Binary files /dev/null and b/logo/logo.png differ diff --git a/logo/logo.svg b/logo/logo.svg new file mode 100644 index 00000000..6f1f96f1 --- /dev/null +++ b/logo/logo.svg @@ -0,0 +1,373 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + c + c + + diff --git a/media/acm.png b/media/acm.png new file mode 100644 index 00000000..6d72cf56 Binary files /dev/null and b/media/acm.png differ diff --git a/media/ahmet_guzel_poster_award.jpg b/media/ahmet_guzel_poster_award.jpg new file mode 100644 index 00000000..53bac086 Binary files /dev/null and b/media/ahmet_guzel_poster_award.jpg differ diff --git a/media/ahmet_guzel_poster_award_2.png b/media/ahmet_guzel_poster_award_2.png new file mode 100644 index 00000000..d0133726 Binary files /dev/null and b/media/ahmet_guzel_poster_award_2.png differ diff --git a/media/chi2022.png b/media/chi2022.png new file mode 100644 index 00000000..1063f653 Binary files /dev/null and b/media/chi2022.png differ diff --git a/media/egsr2022.png b/media/egsr2022.png new file mode 100644 index 00000000..a0fc0954 Binary files /dev/null and b/media/egsr2022.png differ diff --git a/media/eu_horizon2020.png b/media/eu_horizon2020.png new file mode 100644 index 00000000..34f362c2 Binary files /dev/null and b/media/eu_horizon2020.png differ diff --git a/media/frontiers.png b/media/frontiers.png new file mode 100644 index 00000000..bdafa336 Binary files /dev/null and b/media/frontiers.png differ diff --git a/media/frontiers_in_optics.png b/media/frontiers_in_optics.png new file mode 100644 index 00000000..641363de Binary files /dev/null and b/media/frontiers_in_optics.png differ diff --git a/media/graphics_replicability_stamp_initiative.png b/media/graphics_replicability_stamp_initiative.png new file mode 100644 index 00000000..dd7f9a26 Binary files /dev/null and b/media/graphics_replicability_stamp_initiative.png differ diff --git a/media/huawei.png b/media/huawei.png new file mode 100644 index 00000000..6dff6b9c Binary files /dev/null and b/media/huawei.png differ diff --git a/media/ieee.png b/media/ieee.png new file mode 100644 index 00000000..3c8a8bb7 Binary files /dev/null and b/media/ieee.png differ diff --git a/media/ieee_vgtc_2022_award.png b/media/ieee_vgtc_2022_award.png new file mode 100644 index 00000000..11f51296 Binary files /dev/null and b/media/ieee_vgtc_2022_award.png differ diff --git a/media/ieeevr2022.png b/media/ieeevr2022.png new file mode 100644 index 00000000..451c175d Binary files /dev/null and b/media/ieeevr2022.png differ diff --git a/media/jom_ar_vr_mr_call.png b/media/jom_ar_vr_mr_call.png new file mode 100644 index 00000000..c43698eb Binary files /dev/null and b/media/jom_ar_vr_mr_call.png differ diff --git a/media/jst_forest.png b/media/jst_forest.png new file mode 100644 index 00000000..8596bc62 Binary files /dev/null and b/media/jst_forest.png differ diff --git a/media/meta_reality_labs.png b/media/meta_reality_labs.png new file mode 100644 index 00000000..98837c7a Binary files /dev/null and b/media/meta_reality_labs.png differ diff --git a/media/nature.png b/media/nature.png new file mode 100644 index 00000000..8cf15915 Binary files /dev/null and b/media/nature.png differ diff --git a/media/nature_light_most_download.jpeg b/media/nature_light_most_download.jpeg new file mode 100644 index 00000000..9c776681 Binary files /dev/null and b/media/nature_light_most_download.jpeg differ diff --git a/media/optica.png b/media/optica.png new file mode 100644 index 00000000..859ca185 Binary files /dev/null and b/media/optica.png differ diff --git a/media/oracle.png b/media/oracle.png new file mode 100644 index 00000000..5bfdff91 Binary files /dev/null and b/media/oracle.png differ diff --git a/media/petg_settings.png b/media/petg_settings.png new file mode 100644 index 00000000..72c89b16 Binary files /dev/null and b/media/petg_settings.png differ diff --git a/media/photonics_west.png b/media/photonics_west.png new file mode 100644 index 00000000..6e248d22 Binary files /dev/null and b/media/photonics_west.png differ diff --git a/media/print_settings.PNG b/media/print_settings.PNG new file mode 100644 index 00000000..5c29bfcd Binary files /dev/null and b/media/print_settings.PNG differ diff --git a/media/printer.jpg b/media/printer.jpg new file mode 100644 index 00000000..2684d91c Binary files /dev/null and b/media/printer.jpg differ diff --git a/media/printer_USB.jpg b/media/printer_USB.jpg new file mode 100644 index 00000000..b5e45c52 Binary files /dev/null and b/media/printer_USB.jpg differ diff --git a/media/research_statement_future.png b/media/research_statement_future.png new file mode 100644 index 00000000..3c6ff350 Binary files /dev/null and b/media/research_statement_future.png differ diff --git a/media/royal_society.png b/media/royal_society.png new file mode 100644 index 00000000..b308e881 Binary files /dev/null and b/media/royal_society.png differ diff --git a/media/siggraph2022.png b/media/siggraph2022.png new file mode 100644 index 00000000..e87cf545 Binary files /dev/null and b/media/siggraph2022.png differ diff --git a/media/siggraph_asia_2023.png b/media/siggraph_asia_2023.png new file mode 100644 index 00000000..64871946 Binary files /dev/null and b/media/siggraph_asia_2023.png differ diff --git a/media/stanford_university.png b/media/stanford_university.png new file mode 100644 index 00000000..d81875eb Binary files /dev/null and b/media/stanford_university.png differ diff --git a/media/the_next_byte.png b/media/the_next_byte.png new file mode 100644 index 00000000..422f4c18 Binary files /dev/null and b/media/the_next_byte.png differ diff --git a/media/tubitak.png b/media/tubitak.png new file mode 100644 index 00000000..dddfaceb Binary files /dev/null and b/media/tubitak.png differ diff --git a/media/ucl.png b/media/ucl.png new file mode 100644 index 00000000..4a0e3430 Binary files /dev/null and b/media/ucl.png differ diff --git a/media/ucl_logo_modified.png b/media/ucl_logo_modified.png new file mode 100644 index 00000000..5e9eaaed Binary files /dev/null and b/media/ucl_logo_modified.png differ diff --git a/media/ucl_osaka.png b/media/ucl_osaka.png new file mode 100644 index 00000000..9bf63923 Binary files /dev/null and b/media/ucl_osaka.png differ diff --git a/media/ucl_vecg.png b/media/ucl_vecg.png new file mode 100644 index 00000000..521e959a Binary files /dev/null and b/media/ucl_vecg.png differ diff --git a/media/university_of_rochester.png b/media/university_of_rochester.png new file mode 100644 index 00000000..bd8529a3 Binary files /dev/null and b/media/university_of_rochester.png differ diff --git a/outreach/index.html b/outreach/index.html new file mode 100644 index 00000000..b2295320 --- /dev/null +++ b/outreach/index.html @@ -0,0 +1,5073 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Outreach - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Outreach

+

Research Hub

+
+

Info

+

We started a public Slack group dedicated to scientists researching computer graphics, human visual perception, computational photography and computational displays. We aim to build a single hub for everyone and provide all members with a gateway to:

+
    +
  • meet others in the field,
  • +
  • find collaborators worldwide,
  • +
  • introduce open-source tools for research,
  • +
  • announce and plan events in major conferences (e.g., SIGGRAPH, CVPR, IEEE VR, SPIE PW),
  • +
  • advertise opportunities for others (e.g., internships, jobs, initiatives, grants),
  • +
  • promote their most recent research,
  • +
  • find subjects for their experiments,
  • +
+

But most of all, the primary goal is to stay connected to sustain a healthy research field. +To join our Slack channel and contribute to future conversations, please use the provided below link:

+

Subscribe to our Slack

+

Please do not hesitate to share the invitation link with other people in your field. If you encounter any issue with the link, please do not hesitate to reach us using kaanaksit@kaanaksit.com.

+
+

Seminars

+

We organize a seminar series named High-Beams. +High-Beams seminar series is an exclusive event where we host experts across the industry and academia. +Overall, seminars are a blend of internal and external presenters.

+
+

Question

+

If you are wondering how to get an invitation to the next seminar series, please do not hesitate to email Kaan Akşit or subscribe yourself to our mailing list (open to public).

+

Subscribe to our mailing list

+
+

2024

+

These seminars are organized by Kaan Akşit.

+

Manu Gopakumar (Stanford University)

+
+Details +

Date: +16th October 2024

+

Presenter: Manu Gopakumar, Ph.D. Candidate, Stanford University

+

Title: Full-color 3D holographic augmented reality displays with metasurface waveguides

+

Watch: Recording (Password protected)

+
+

Guosheng Hu (University of Bristol)

+
+Details +

Date: +10th October 2024

+

Presenter: Guosheng Hu, Senior Lecturer, University of Bristol

+

Title: Reduce AI’s Carbon Footprint

+

Watch: Recording (Password protected)

+
+

Binglun Wang (University College London)

+
+Details +

Date: +2nd October 2024

+

Presenter: Binglun Wang, Ph.D. candidate at University College London

+

Title: 3D Editings using Diffusion Models

+

Watch: Recording (Password protected)

+
+

Henry Fuchs (University of North Carolina at Chapel Hill)

+
+Details +

Date: +20th June 2024

+

Presenter: Henry Fuchs, Professor at the University of North Carolina at Chapel Hill

+

Title: Everyday Augmented Reality Glasses: Past Predictions, Present Problems, Future Possibilities

+

Watch: Not recorded

+
+

Zian Wang (University of Toronto and NVIDIA)

+
+Details +

Date: +24th April 2024

+

Presenter: Zian Wang, PhD student at the University of Toronto

+

Title: Hybrid Rendering: Bridging Volumetric and Surface Representations for Efficient 3D Content Modeling

+

Watch: Recording (Password protected)

+
+

Litu Rout (The University of Texas Austin)

+
+Details +

Date: +10th April 2024

+

Presenter: Litu Rout, PhD student at the University of Texas, Austin

+

Title: On Solving Inverse Problems using Latent Diffusion

+

Watch: Recording (Password protected)

+
+

Yingsi Qin (Carnegie Mellon University)

+
+Details +

Date: +3rd April 2024

+

Presenter: Yingsi Qin, PhD Candidate at Carnegie Mellon University

+

Title: Split-Lohmann Multifocal Displays

+

Watch: Recording (Password protected)

+
+

Seung-Hwan Baek (Postech)

+
+Details +

Date: +20th March 2024

+

Presenter: Seung-Hwan Baek, Assistant Professor at POSTECH

+

Title: High-dimensional Visual Computing

+

Watch: Recording (Password protected)

+
+

Divya Kothandaraman (University of Maryland College Park)

+
+Details +

Date: +13th March 2024

+

Presenter: Divya Kothandaraman, PhD student at the University of Maryland College Park

+

Title: Text Controlled Aerial-View Synthesis from a Single Image using Diffusion Models

+

Watch: Recording (Password protected)

+
+

Cheng Zheng (Massachusetts Institute of Technology)

+
+Details +

Date: +6th March 2024

+

Presenter: Cheng Zheng, PhD student at Massachusetts

+

Title: Neural Lithography: Close the Design to Manufacturing Gap in Computational Optics

+

Watch: Recording (Password protected)

+
+

Taimoor Tariq (Università della Svizzera Italiana)

+
+Details +

Date: +28th February 2024

+

Presenter: Taimoor Tariq, PhD student at Università della Svizzera Italiana

+

Title: Its all in the Eyes: Towards Perceptually Optimized Real-Time VR

+

Watch: Recording (Password protected)

+
+

Mose Sakashita (Cornell University)

+
+Details +

Date: +21st February 2024

+

Presenter: Mose Sakashita, PhD student at Cornell University

+

Title: Enhancing Remote Design Collaboration through Motion-Controlled Telepresence Robots

+

Watch: Recording (Password protected)

+
+

Ruoshi Liu (Columbia University)

+
+Details +

Date: +14th February 2024

+

Presenter: Ruoshi Liu, PhD student at Columbia University

+

Title: Neural Network Inversion for Imaging, Vision, Robotics, and Beyond

+

Watch: Recording (Password protected)

+
+

Madalina Nicolae (Saarland University and Polytechnic Institute of Paris)

+
+Details +

Date: +7th February 2024

+

Presenter: Madalina Nicolae, PhD student at Saarland University and Polytechnic Institute of Paris

+

Title: Towards Digital Biofabrication and Sustainable Innovation

+

Watch: Recording (Password protected)

+
+

2023

+

These seminars are organized by Kaan Akşit. +Simon Julier invited Stephen Ellis and moderated the session.

+

Daiseku Iwai (Osaka University)

+
+Details +

Date: +29th November 2023

+

Presenter: Daisuke Iwai, Associate Professor at Osaka University

+

Title: Computational displays in projection mapping

+

Watch: Recording (Password protected)

+
+

Lior Yariv (Weizmann Institute of Science)

+
+Details +

Date: +22nd November 2023

+

Presenter: Lior Yariv, PhD student at Weizmann Institute of Science

+

Title: MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation

+

Watch: Recording (Password protected)

+
+

Ziya Erkoç (Technical University of Munich)

+
+Details +

Date: +15th November 2023

+

Presenter: Ziya Erkoç, PhD student at the Technical University of Munich

+

Title: Generative Modeling with Neural Field Weights

+

Watch: Recording (Password protected)

+
+

Guillaume Thekkadath (National Research Council of Canada)

+
+Details +

Date: +8th November 2023

+

Presenter: Guillaume Thekkadath, Postdoctoral Fellow at National Research Council of Canada

+

Title: Intensity correlation holography: applications in single photon and remote imaging

+

Watch: Recording (Password protected)

+
+

Kenan Bektas (University of Saint Gallen)

+
+Details +

Date: +1st November 2023

+

Presenter: Kenan Bektas, Postdoctoral Researcher at the University of Saint Gallen

+

Title: Gaze-Enabled Mixed Reality for Human Augmentation in Ubiquitous Computing Environments

+

Watch: Recording (Password protected)

+
+

Koya Narumi (The University of Tokyo)

+
+Details +

Date: +25th October 2023

+

Presenter: Koya Narumi, Assistant Professor at the University of Tokyo

+

Title: Computational Origami Fabrication

+

Watch: Recording (Password protected)

+
+

Stephen Ellis (NASA)

+
+Details +

Date: +11th October 2023

+

Presenter: Stephen Ellis

+

Title: Complexity -complicated

+

Watch: N/A

+
+

Simeng Qiu (King Abdullah University of Science and Technology)

+
+Details +

Date: +4th October 2023

+

Presenter: Simeng Qiu, PhD Candidate at King Abdullah University of Science and Technology

+

Title: MoireTag: Angular Measurement and Tracking with a Passive Marker

+

Watch: Recording (Password protected)

+
+

Suyeon Choi (Stanford University)

+
+Details +

Date: +27th September 2023

+

Presenter: Suyeon Choi, PhD student at Stanford University

+

Title: Neural Holography for Next-generation Virtual and Augmented Reality Displays

+

Watch: Recording (Password protected)

+
+

Ulugbek Kamilov (University of Washington in Saint Louis)

+
+Details +

Date: +20th September 2023

+

Presenter: Ulugbek Kamilov, Associate Professor of Electrical & Systems Engineering and Computer Science & Engineering at Washington University in St. Louis

+

Title: Plug-and-Play Models for Large-Scale Computational Imaging

+

Watch: Recording (Password protected)

+
+

Shariq Bhat (King Abdullah University of Science and Technology)

+
+Details +

Date: +13th September 2023

+

Presenter: Shariq Bhat, PhD Student at King Abdullah University of Science and Technology

+

Title: A Journey Towards State-of-the-art Monocular Depth Estimation Using Adaptive Bins

+

Watch: Recording (Password protected)

+
+

Congli Wang (University of California, Berkeley)

+
+Details +

Date: +6th September 2023

+

Presenter: Congli Wang, Postdoctoral Researcher at University of California, Berkeley

+

Title: Computational sensing with intelligent optical instruments

+

Watch: Recording (Password protected)

+
+

Silvia Sellán (University of Toronto)

+
+Details +

Date: +14th June 2023

+

Presenter: Silvia Sellán, PhD student at University Toronto

+

Title: Uncertain Surface Reconstruction

+

Watch: Recording (Password protected)

+
+

Omer Shapira (NVIDIA)

+
+Details +

Date: +26th May 2023

+

Presenter: Omer Shapira, Engineer and Researcher at NVIDIA

+

Title: Cloud Computing Around the Body: Theoretical Limits and Practical Applications

+

Watch: Recording (Password protected)

+
+

Michael Fischer (University College London)

+
+Details +

Date: +17th May 2023

+

Presenter: Michael Fischer, PhD student at University College London

+

Title: Advanced Machine Learning for Rendering

+

Watch: Recording (Password protected)

+
+

Michael Proulx (Meta Reality Labs and University of Bath)

+
+Details +

Date: +3rd May 2023

+

Presenter: Michael Proulx, Reader at University of Bath and Research Scientist at Meta Reality Labs

+

Title: Visual interactions in Extended Reality

+

Watch: Recording (Password protected)

+
+

İnci Ayhan (Bogazici University)

+
+Details +

Date: +26th April 2023

+

Presenter: İnci Ayhan, Associate Professor at Bogazici University

+

Title: Cognitive Embodiment and Affordance Perception in the Virtual Reality Environment

+

Watch: Recording (Password protected)

+
+

Zerrin Yumak (Utrecht University)

+
+Details +

Date: +12th April 2023

+

Presenter: Zerrin Yumak, Assistant Professor at Utrecht University

+

Title: AI-driven Virtual Humans with Non-verbal Communication Skills

+

Watch: Recording (Password protected)

+
+

Elia Gatti (University College London)

+
+Details +

Date: +5th April 2023

+

Presenter: Elia Gatti, Assistant Professor at University College London

+

Title: AI-driven Virtual Humans with Non-verbal Communication Skills

+

Watch: Recording (Password protected)

+
+

Yuhao Zhu (University of Rochester)

+
+Details +

Date: +29th March 2023

+

Presenter: Yuhao Zhu, University of Rochester

+

Title: Rethinking Imaging-Computing Interface

+

Watch: Recording (Password protected)

+
+

Taejun Kim (KAIST)

+
+Details +

Date: +22nd March 2023

+

Presenter: Taejun Kim, PhD Student at KAIST

+

Title: Interface Control with Eye Movement

+

Watch: Recording (Password protected)

+
+

Josef Spjut (NVIDIA)

+
+Details +

Date: +15th March 2023

+

Presenter: Josef Spjut, Senior Research Scientist at NVIDIA

+

Title: Esports Rendering and Display: Psychophysical Experimentation

+

Watch: Recording (Password protected)

+
+

Ruth Rosenholtz (Massachusetts Institute of Technology)

+
+Details +

Date: +1st March 2023

+

Presenter: Ruth Rosenholtz, Principal Research Scientist at Massachusetts Institute of Technology

+

Title: Human vision at a glance

+

Watch: Recording (Password protected)

+
+

Qi Sun (NYU)

+
+Details +

Date: +21st February 2023

+

Presenter: Qi Sun, Assistant Professor at NYU

+

Title: Co-Optimizing Human-System Performance in VR/AR

+

Watch: Recording (Password protected)

+
+

Towaki Takikawa (NVIDIA)

+
+Details +

Date: +8th February 2023

+

Presenter: Towaki Takikawa, Research Scientist at NVIDIA

+

Title: Towards Volumetric Multimedia Compression and Transport with Neural Fields

+

Watch: Recording (Password protected)

+
+

2022

+

The seminar series of 2022 is conducted with the help of several key people at University College London. +Many of these seminars are coordinated by Kaan Akşit. +Kaan has received help from Simon Julier, Oliver Kingshott, Klara Brandstätter, and Felix Thiel for the moderation and organization of several of these events.

+

Ernst Kruijff (Bonn-Rhein-Sieg University of Applied Sciences)

+
+Details +

Date: +29th November 2022

+

Presenter: Ernst Kruijff, Professor of Human Computer INteraction at Bonn-Rhein-Sieg University

+

Title: Multi-sensory feedback for 3D User Interfaces

+

Watch: Recording (Password protected)

+
+

Aykut Erdem (Koç University)

+
+Details +

Date: +23th November 2022

+

Presenter: Aykut Erdem, Associate Professor at Koç University.

+

Title: Disentangling Content and Motion for Text-Based Neural Video Manipulation

+

Watch: Recording (Password protected)

+
+

Gül Varol (École des Ponts ParisTech)

+
+Details +

Date: +16th November 2022

+

Presenter: Gül Varol, Assistant Professor at École des Ponts ParisTech

+

Title: Controllable 3D human motion synthesis

+

Watch: Recording (Password protected)

+
+

Ana Serrano (Universidad de Zaragoza)

+
+Details +

Date: +2nd November 2022

+

Presenter: Ana Serrano, Universidad de Zaragoza

+

Title: Material Appearance Perception and Applications

+

Watch: Recording (Password protected)

+
+

Praneeth Chakravarthula (Princenton University)

+
+Details +

Date: +27th October 2022

+

Presenter: Praneeth Chakravarthula, Research Scholar at Princenton University

+

Title: The Present Developments and Future Challenges of Holographic Near-Eye Displays

+

Watch: Recording (Password protected)

+
+

Koki Nagano (NVIDIA)

+
+Details +

Date: +12th October 2022

+

Presenter: Koki Nagano, Senior Research Scientist at NVIDIA

+

Title: Frontiers of Neural Human Synthesis

+

Watch: Recording (Password protected)

+
+

Peter Wonka (King Abdullah University of Science and Technology)

+
+Details +

Date: +28th September 2022

+

Presenter: Peter Wonka, Computer Science at King Abdullah University of Science and Technology (KAUST) and Interim Director of the Visual Computing Center (VCC)

+

Title: Recent Research Efforts for Building 3D GANs

+

Watch: Recording (Password protected)

+
+

Rob Lindeman (University of Canterbury)

+
+Details +

Date: +21st September 2022

+

Presenter: Rob Lindeman, Professor at the University of Canterbury

+

Title: Comfortable VR: Supporting Regular and Long-term Immersion

+

Watch: Recording (Password protected)

+
+

Felix Heide (Princenton University)

+
+Details +

Date: +7th September 2022

+

Presenter: Felix Heide, Assistant Professor at Princeton University and Co-Founder and Chief Technology Officer of self-driving vehicle startup Algolux

+

Title: Neural Nanophotonic Cameras

+

Watch: Recording (Password protected)

+
+

Yulia Gryaditskaya (Surrey Institute for People-Centred Artifical Intelligence)

+
+Details +

Date: +1st June 2022

+

Presenter: Yulia Gryadistkaya, Assistant Professor at CVSSP and Surrey Institute for People-Centered Artifical Intelligence

+

Title: Amateur Sketches

+

Watch: Recording (Password protected)

+
+

Michael Bauer (NVIDIA)

+
+Details +

Date: +25th May 2022

+

Presenter: Michael Bauer, Principal Scientist at NVIDIA

+

Title: Running Unmodified NumPy Programs on Hundreds of GPUs with cuNumeric

+

Watch: Recording (Password protected)

+
+

Mark Pauly (EPFL)

+
+Details +

Date: +18th May 2022

+

Presenter: Mark Pauly, Professor of Computer Graphics at École polytechnique fédérale de Lausanne

+

Title: Computational Inverse Design of Deployable Structures

+

Watch: Recording (Password protected)

+
+

Tuanfeng Wang (Adobe)

+
+Details +

Date: +11th May 2022

+

Presenter: Tuanfeng Wang, Research Scientist at Adobe

+

Title: Synthesizing dynamic human appearance

+

Watch: Recording (Password protected)

+
+

Tim Weyrich (FAU and UCL)

+
+Details +

Date: +4th May 2022

+

Presenter: Tim Weyrich, Friedrich-Alexander-Universität Erlangen-Nürnberg and Professor of Visual Computing at University College London

+

Title: Digital Reality: Visual Computing Interacting With The Real World

+

Watch: Recording (Password protected)

+
+

Sanjeev Muralikrishnan (UCL)

+
+Details +

Date: +27th April 2022

+

Presenter: Sanjeev Muralikrishnan, PhD student at UCL

+

Title: GLASS: Geometric Latent Augmentation For Shape Spaces

+

Watch: Recording (Password protected)

+
+

Valentin Deschaintre (Adobe)

+
+Details +

Date: +20th April 2022

+

Presenter: Valentin Deschaintre, Research Scientist at Adobe

+

Title: Material Creation for Virtual Environments

+

Watch: Recording (Password protected)

+
+

Dan Archer (University College London) and Animesh Karnewar (University College London)

+
+Details +

Date: +23rd March 2022

+

Presenter:

+
    +
  • Dan Archer, PhD Student at University College London
  • +
  • Animesh Karnewar, Phd Student at University College London
  • +
+

Title:

+
    +
  • Optimizing Performance through Stress and Embodiment Levels in Virtual Reality Using Autonomic Responses
  • +
  • ReLU Fields: The Little Non-linearity That Could ...
  • +
+

Watch: Recording (Password protected)

+
+

Oya Celiktutan (King's College London)

+
+Details +

Date: +23rd March 2022

+

Presenter: Oya Celiktutan, Assistant Professor at King's College London

+

Title: Towards Building Socially Informed and Adaptive Robotic Systems

+

Watch: Recording (Password protected)

+
+

Iuri Frosio (NVIDIA)

+
+Details +

Date: +17th March 2022

+

Presenter: Iuri Frosio, Principal Research Scientist at NVIDIA

+

Title: Research & videogames @ NVIDIA – the cases of saliency estimation and cheating prevention

+

Watch: Recording (Password protected)

+
+

Avi Bar-Zeev (RealityPrime)

+
+Details +

Date: +9th March 2022

+

Presenter: Avi Bar-Zeev

+

Title: Beyond Meta - AR and the Road Ahead

+

Watch: Recording (Password protected)

+
+

Vinoba Vinayagamoorthy (British Broadcasting Corporation)

+
+Details +

Date: +2nd March 2022

+

Presenter: Vinoba Vinayagamoorthy, Researcher at British Broadcasting Corporation

+

Title: Designing for the Future: Exploring the Impact of (Immersive) Experiences on BBC Audiences

+

Watch: Recording (Password protected)

+
+

Lauria Waller (University of California, Berkeley)

+
+Details +

Date: +23rd February 2022

+

Presenter: Laura Waller, Associate Professor, Department of Electrical Engineering and Computer Sciences, University of California, Berkeley

+

Title: Computational Microscopy

+

Watch: Recording (Password protected)

+
+

Doğa Doğan (Massachusetts Institute of Technology)

+
+Details +

Date: +16th February 2022

+

Presenter: Doğa Doğan, Phd Candidate at Massachusetts Institute of Technology

+

Title: Unobtrusive Machine-Readable Tags for Seamless Ineractions with Real-World Objects

+

Watch: Recording (Password protected)

+
+

Anthony Steed (University College London)

+
+Details +

Date: +2nd February 2022

+

Presenter: Anthony Steed, Professor at University College London

+

Title: So you want to build a Metaverse

+

Watch: Recording (Password protected)

+
+

2021

+

The seminar series of 2021 is conducted with the help of several key people at University College London. +Many of these seminars are coordinated by Kaan Akşit. +Kaan has received help from Klara Brandstätter, Felix Thiel, Oliver Kingshott, Tobias Ritschel, Tim Weyrich and Anthony Steed for moderation and organization of several of these events.

+

Sebastian Friston (University College London)

+
+Details +

Date: +24th November 2021

+

Presenter: Sebastian Friston, Research Associate at University College London

+

Title: Ubiq

+

Watch: Recording (Password protected)

+
+

Wolfgang Stürzlinger (Simon Fraser University)

+
+Details +

Date: +17th November 2021

+

Presenter: Wolfgang Stürzlinger, Professor at Simon Fraser University

+

Title: Current Challenges and Solutions for Virtual and Augmented Reality

+

Watch: Recording (Password protected)

+
+

Nels Numan (University College London) and Koray Kavaklı (Koç University)

+
+Details +

Date: +10th November 2021

+

Presenters:

+
    +
  • Koray Kavaklı, MSc student at Koç University
  • +
  • Nels Numan, PhD student at University College London
  • +
+

Title:

+
    +
  • Learned Holographic Light Transport
  • +
  • Asymmetric Collaborative Mixed Reality
  • +
+

Watch: Recording (Password protected)

+
+

David Swapp (University College London)

+
+Details +

Date: +3th November 2021

+

Presenters: David Swapp, PhDSenior Research Fellow at University College London

+

Title: Who are VR systems designed for?

+

Watch: Recording (Password protected)

+
+

Katharina Krösl (VRVis Zentrum für Virtual Reality and Visualisierung)

+
+Details +

Date: +20th October 2021

+

Presenters: Katharina Krösl, Researcher at VRVis Zentrum für Virtual Reality und Visualisierung

+

Title: Simulating Vision Impairments in XR

+

Watch: Recording (Password protected)

+
+

Morgan Mcguire (Roblox)

+
+Details +

Date: +14th October 2021

+

Presenters: Morgan Mcguire, Chief Scientist at Roblox

+

Title: Metaverse Research

+

Watch: Recording (Password protected)

+
+

Wenzel Jakob (École Polytechnique Fédérale de Lausanne)

+
+Details +

Date: +6th October 2021

+

Presenters: Wenzel Jakob, Assistant Professor at École Polytechnique Fédérale de Lausanne

+

Title: Differentiable Simulation of Light

+

Watch: Recording (Password protected)

+
+

Gordon Wetzstein (Stanford University)

+
+Details +

Date: +29th September 2021

+

Presenters: Gordon Wetzstein, Associate Professor at Stanford University

+

Title: Towards Neural Signal Processing and Imaging

+

Watch: Recording (Password protected)

+
+

Anjul Patney (NVIDIA)

+
+Details +

Date: +22nd September 2021

+

Presenters: Anjul Patney, Principal Scientist at NVIDIA

+

Title: Peripheral Perception & Pixels

+

Watch: Recording (Password protected)

+
+

Douglas Lanman (Facebook)

+
+Details +

Date: +15th September 2021

+

Presenters: Douglas Lanman, Director of Display Systems Research at Facebook Reality Labs, Affiliate Instructor at University of Washington

+

Title: How to Pass the Visual Turing Test with AR/VR Displays

+

Watch: Recording (Password protected)

+
+

Sylvia Xueni Pan (Gold Smiths, University of London)

+
+Details +

Date: +8th September 2021

+

Presenters: Sylvia Xueni Pan, Lecturer in Graphics, Gold Smiths, University of London

+

Title: Virtual Social Interaction in VR

+

Watch: Recording (Password protected)

+
+

Duygu Ceylan (Adobe)

+
+Details +

Date: +28th July 2021

+

Presenters: Duygu Ceylan, Senior Research Scientist, Adobe

+

Title: Neural Dynamic Characters

+

Watch: Recording (Password protected)

+
+

Oliver Kingshott and Michael Fischer (University College London)

+
+Details +

Date: +21th July 2021

+

Presenters:

+
    +
  • Oliver Kingshott, MSc student at University College London
  • +
  • Michael Fischer, PhD student at University College London
  • +
+

Title:

+
    +
  • Lensless Learning
  • +
  • Learning to Overfit
  • +
+

Watch: Recording (Password protected)

+
+

Yuta Itoh (Tokyo Institute of Technology)

+
+Details +

Date: +14th July 2021

+

Presenters: Yuta Itoh, Project Associate Professor at the University of Tokyo

+

Title: Vision Augmentation: overwriting our visual world via computation

+

Watch: Recording (Password protected)

+
+

Kaan Akşit (University College London)

+
+Details +

Date: +7th July 2021

+

Presenters: Kaan Akşit, Associate Professor at University College London

+

Title: Towards remote pixelless displays

+

Watch: Recording (Password protected)

+
+

Cengiz Öztireli (University of Cambridge, Google)

+
+Details +

Date: +28th June 2021

+

Presenters: Cengiz Öztireli, Associate Professor at University of Cambridge, Senior Researcher at Google

+

Title: 3D Digital Reality - Modeling for Perception

+

Watch: Recording (Password protected)

+
+

Paul Linton (City, University of London)

+
+Details +

Date: +23rd June 2021

+

Presenters: Paul Linton, Research Fellow, Centre for Applied Vision Research, City, University of London

+

Title: Size and Distance Perception for Virtual Reality

+

Watch: Recording (Password protected)

+
+

Luca Morreale and Lisa Izzouzi (University College London)

+
+Details +

Date: +16th June 2021

+

Presenters:

+
    +
  • Luca Morreale, PhD student at University College London
  • +
  • Lisa Izzouzi, Phd student at University College London
  • +
+

Title: +- Interpretable Neural Surface Maps +- Meaningful meetups in Virtual Reality

+

Watch: Recording (Password protected)

+
+

Rafał Mantiuk (Cambridge University)

+
+Details +

Date: +9th June 2021

+

Presenter: Rafał Mantiuk, Reader in Graphics and Displays at the University of Cambridge

+

Title: Modelling the quality of high frame-rate graphics for adaptive refresh rate and resolution

+

Watch: Recording (Password protected)

+
+

Peter Shirley (NVIDIA)

+
+Details +

Date: +2nd June 2021

+

Presenter: Peter Shirley, Distinguished Research Scientist at NVIDIA

+

Title: A tour of the rapidly moving target of computer graphics

+

Watch: Recording (Password protected)

+
+

David Walton and Rafel Kuffner dos Anjos (University College London)

+
+Details +

Date: +26th May 2021

+

Presenters:

+
    +
  • David Walton, Postdoctoral researcher at University College London
  • +
  • Rafael Kuffner dos Anjos, Postdoctoral researcher at University College London
  • +
+

Title:

+
    +
  • Beyond Blur: Ventral Metamers for Foveated Rendering
  • +
  • Metameric Inpainting for Image Warping
  • +
+

Watch: Recording (Password protected)

+
+

Tobias Ritschel (University College London)

+
+Details +

Date: +19th May 2021

+

Presenters: Tobias Ritschel, Professor of Computer Graphics at University College London

+

Title: Blue noise plots

+

Watch: Not recorded

+
+

Philip Henzler and David Griffiths (University College London)

+
+Details +

Date: +12th May 2021

+

Presenters:

+
    +
  • Philip Henzler, PhD student at University College London
  • +
  • David Griffiths, PhD student at University College London
  • +
+

Title:

+
    +
  • Generative Modelling of BRDF Textures from Flash Images
  • +
  • 3D object detection without scene labels
  • +
+

Watch: Recording (Password protected)

+
+

Klara Brandstätter and Felix Thiel (University College London)

+
+Details +

Date: +5th May 2021

+

Presenters:

+
    +
  • Klara Brandstätter, PhD student at University College London
  • +
  • Felix Thiel, PhD student at University College London
  • +
+

Title:

+
    +
  • Creating Lively Interactive Populated Environments
  • +
  • You have control. I have control
  • +
+

Watch: Recording (Password protected)

+
+

Victoria Rege and Alex Titterton (Graphcore)

+
+Details +

Date: +28th April 2021

+

Presenters:

+
    +
  • Victoria Rege, Director, Alliances & Strategic Partnerships at Graphcore
  • +
  • Alex Titterton, Field Engineer at Graphcore (and former CERN Physicist)
  • +
+

Title: Next in Machine Intelligence

+

Watch: Recording (Password protected)

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/people/chuanjun_zheng.png b/people/chuanjun_zheng.png new file mode 100644 index 00000000..948cce41 Binary files /dev/null and b/people/chuanjun_zheng.png differ diff --git a/people/david_walton.png b/people/david_walton.png new file mode 100644 index 00000000..6a8b4a7d Binary files /dev/null and b/people/david_walton.png differ diff --git a/people/debosmit_neogi.png b/people/debosmit_neogi.png new file mode 100644 index 00000000..7977d517 Binary files /dev/null and b/people/debosmit_neogi.png differ diff --git a/people/doga_yilmaz.png b/people/doga_yilmaz.png new file mode 100644 index 00000000..d237b1a5 Binary files /dev/null and b/people/doga_yilmaz.png differ diff --git a/people/douglas_lanman.png b/people/douglas_lanman.png new file mode 100644 index 00000000..0cfdc1a9 Binary files /dev/null and b/people/douglas_lanman.png differ diff --git a/people/gbemisola_akinola_alli.png b/people/gbemisola_akinola_alli.png new file mode 100644 index 00000000..2bbb9712 Binary files /dev/null and b/people/gbemisola_akinola_alli.png differ diff --git a/people/hakan_urey.png b/people/hakan_urey.png new file mode 100644 index 00000000..f59ed8aa Binary files /dev/null and b/people/hakan_urey.png differ diff --git a/people/henry_kam.png b/people/henry_kam.png new file mode 100644 index 00000000..bf608fdc Binary files /dev/null and b/people/henry_kam.png differ diff --git a/people/index.html b/people/index.html new file mode 100644 index 00000000..5a4635b8 --- /dev/null +++ b/people/index.html @@ -0,0 +1,2710 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Current members and alumni - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

People

+

This page will give you a complete of our current members. +At the end of the page, you will also find our alumni list as a separate section.

+

Current members

+

All our current members are located in 169 Euston Road, London NW1 2AE, United Kingdom.

+

Faculty

+
+ +
+

Kaan Akşit

+

Associate Professor of Computational Light

+

E-mail

+

Office: R409 +

+

Doctoral students

+
+ +
+

Yicheng Zhan

+

Ph.D. Student

+

E-mail

+

Office: R404.188 +

+
+ +
+

Ziyang Chen

+

Ph.D. Student

+

E-mail

+

Office: R404.187 +

+

Interns

+
+ +
+

Henry Kam

+

MSc Student (New York University)

+

E-mail

+

Office: Virtual +

+
+ +
+

Weijie Xie

+

MSc Student (University College London)

+

E-mail

+

Office: Virtual +

+
+ +
+

Chuanjun Zheng

+

MSc Student (Shenzhen University)

+

E-mail

+

Office: Virtual +

+
+ +
+

Koray Kavaklı

+

Ph.D. Student (Koç University)

+

E-mail

+

Office: Virtual +

+

Alumni

+

Post-Doctoral Researchers

+
    +
  • David Robert Walton, Investigation on perceptually guided display technology, 2021-2022, Next: Lecturer at Birmingham City University.
  • +
+

Master Students

+
    +
  • Doğa Yılmaz, Learned Single-Pass Multitasking Perceptual Graphics for Immersive Displays, 2024, Next: Ph.D. student at University College London.
  • +
  • Weijie Xie, Learned Method for Computer Generated Hologram, 2024, Next: Intern Researcher at University College London.
  • +
  • Pengze Li, Text to hologram, 2024, Next: -.
  • +
  • Ziyang Chen, Speckle imaging with a lensless camera, 2023, Next: Ph.D. student at University College London.
  • +
  • Jeanne Beyazian, Hologram Compression, 2022, Next: Computer Vision Developer at Glimpse Analytics.
  • +
  • Yilin Qu, Predicting Next Frames of a RGBD video, 2022, Next: Machine Learning Software Engineer at Qualcomm Incorporated.
  • +
  • Gbemisola Akinola-Alli, Differentiable Ray Tracing for Designing Optical Parts, 2022, Next: Senior Engineer at MBDA.
  • +
  • Oliver Kingshott, Learned Point-spread Functions for Lensless Imaging, 2021, Next: Ph.D. Student at University College London.
  • +
  • Koray Kavaklı, Towards Improving Visual Quality in Computer-Generated Holography, 2021, Next: Ph.D. Student at Koç University.
  • +
  • Chengkun Li, Neural Optical Beam Propagation, 2021, Next: Ph.D. student at the Chinese University of Hong Kong.
  • +
  • Yuze Yang, Learned 3D Representations: Point Cloud, Depth Maps and Holograms, 2021, Next: -.
  • +
+

Research Interns

+
    +
  • Ahmet Hamdi Güzel,Perceptual Prescription Correction, 2022-2024, Next: Ph.D. Student at University College London.
  • +
  • Yichen Zou, 3D Dataset generation, 2022, Next: Graduate Student at McGill Univesity.
  • +
  • Nerea Sainz De La Maza, Printable camera casing design, 2022, Next: Bachelor of Science at University College London.
  • +
  • Kerem Eroğlu, Embedding data to images, 2022, Next: MEng at University College London.
  • +
  • Serhat Aksoy, Volume rendering tool, 2022, Next: Bachelor of Science at Istanbul Technical University.
  • +
  • Debosmit Neogi, Compressing RGBD data, 2022, Next: Master of Science at University at Buffalo.
  • +
  • Josh Kaizer, as a part of In2Science UK programme, 2022, Next: -.
  • +
  • Abubakar Sharif, as a part of In2Science UK programme, 2022, Next: -.
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/people/jeanne_beyazian.png b/people/jeanne_beyazian.png new file mode 100644 index 00000000..478588f5 Binary files /dev/null and b/people/jeanne_beyazian.png differ diff --git a/people/josef_spjut.png b/people/josef_spjut.png new file mode 100644 index 00000000..32848345 Binary files /dev/null and b/people/josef_spjut.png differ diff --git a/people/kaan_aksit.png b/people/kaan_aksit.png new file mode 100644 index 00000000..aa3f820a Binary files /dev/null and b/people/kaan_aksit.png differ diff --git a/people/kerem_eroglu.png b/people/kerem_eroglu.png new file mode 100644 index 00000000..029e9431 Binary files /dev/null and b/people/kerem_eroglu.png differ diff --git a/people/klara_brandstatter.png b/people/klara_brandstatter.png new file mode 100644 index 00000000..a67d0278 Binary files /dev/null and b/people/klara_brandstatter.png differ diff --git a/people/koray_kavakli.png b/people/koray_kavakli.png new file mode 100644 index 00000000..cb6f12d3 Binary files /dev/null and b/people/koray_kavakli.png differ diff --git a/people/liang_shi.png b/people/liang_shi.png new file mode 100644 index 00000000..8978893f Binary files /dev/null and b/people/liang_shi.png differ diff --git a/people/master_thesis/2021/chengkun_li_neural_optical_beam_propagation.pdf b/people/master_thesis/2021/chengkun_li_neural_optical_beam_propagation.pdf new file mode 100644 index 00000000..a176a74f Binary files /dev/null and b/people/master_thesis/2021/chengkun_li_neural_optical_beam_propagation.pdf differ diff --git a/people/master_thesis/2021/yuze_yang_learned_3d_representations_point_cloud_depth_maps_and_holograms.pdf b/people/master_thesis/2021/yuze_yang_learned_3d_representations_point_cloud_depth_maps_and_holograms.pdf new file mode 100644 index 00000000..0a4d2d34 Binary files /dev/null and b/people/master_thesis/2021/yuze_yang_learned_3d_representations_point_cloud_depth_maps_and_holograms.pdf differ diff --git a/people/mustafa_doga_dogan.png b/people/mustafa_doga_dogan.png new file mode 100644 index 00000000..b679eb12 Binary files /dev/null and b/people/mustafa_doga_dogan.png differ diff --git a/people/nerea_sainz_de_la_maza_melon.png b/people/nerea_sainz_de_la_maza_melon.png new file mode 100644 index 00000000..ff164c40 Binary files /dev/null and b/people/nerea_sainz_de_la_maza_melon.png differ diff --git a/people/nick_antipa.png b/people/nick_antipa.png new file mode 100644 index 00000000..6fd33d75 Binary files /dev/null and b/people/nick_antipa.png differ diff --git a/people/oliver_kingshott.png b/people/oliver_kingshott.png new file mode 100644 index 00000000..a05c113b Binary files /dev/null and b/people/oliver_kingshott.png differ diff --git a/people/ozan_cakmakci.png b/people/ozan_cakmakci.png new file mode 100644 index 00000000..c4f2b86b Binary files /dev/null and b/people/ozan_cakmakci.png differ diff --git a/people/rafal_mantiuk.png b/people/rafal_mantiuk.png new file mode 100644 index 00000000..a66c54df Binary files /dev/null and b/people/rafal_mantiuk.png differ diff --git a/people/serhat_aksoy.png b/people/serhat_aksoy.png new file mode 100644 index 00000000..30ea70a7 Binary files /dev/null and b/people/serhat_aksoy.png differ diff --git a/people/weijie_xie.png b/people/weijie_xie.png new file mode 100644 index 00000000..2a95ddbc Binary files /dev/null and b/people/weijie_xie.png differ diff --git a/people/wojciech_matusik.png b/people/wojciech_matusik.png new file mode 100644 index 00000000..eeffb8a8 Binary files /dev/null and b/people/wojciech_matusik.png differ diff --git a/people/yichen_zou.png b/people/yichen_zou.png new file mode 100644 index 00000000..c74e3fc4 Binary files /dev/null and b/people/yichen_zou.png differ diff --git a/people/yicheng_zhan.png b/people/yicheng_zhan.png new file mode 100644 index 00000000..65408162 Binary files /dev/null and b/people/yicheng_zhan.png differ diff --git a/people/yilin_qu.png b/people/yilin_qu.png new file mode 100644 index 00000000..b2fd3b0a Binary files /dev/null and b/people/yilin_qu.png differ diff --git a/people/yuta_itoh.png b/people/yuta_itoh.png new file mode 100644 index 00000000..05da3d38 Binary files /dev/null and b/people/yuta_itoh.png differ diff --git a/people/ziyang_chen.png b/people/ziyang_chen.png new file mode 100644 index 00000000..97feda16 Binary files /dev/null and b/people/ziyang_chen.png differ diff --git a/publications/focal_surface_light_transport/index.html b/publications/focal_surface_light_transport/index.html new file mode 100644 index 00000000..fa1fedd7 --- /dev/null +++ b/publications/focal_surface_light_transport/index.html @@ -0,0 +1,2703 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions

+

People

+ + + + + + + + + + + + + + + + + +
/      /      /      /      /     

Chuanjun Zheng1

Yicheng Zhan1

Liang Shi2

Ozan Cakmakci3

Kaan Akşit1

+

+1University College London, +2Massachusetts Institute of Technology, +3Google +

+

SIGGRAPH Asia 2024 Technical Communications

+ +

Resources

+

Manuscript + Supplementary + Code

+
+ Bibtex +
@inproceedings{zheng2024focalholography,
+  title={Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions},
+  author={Chuanjun Zheng, Yicheng Zhan, Liang Shi, Ozan Cakmakci, and Kaan Ak{\c{s}}it},
+  booktitle = {SIGGRAPH Asia 2024 Technical Communications (SA Technical Communications '24)},
+  keywords = {Computer-Generated Holography, Light Transport, Optimization},
+  location = {Tokyo, Japan},
+  series = {SA '24},
+  month={December},
+  year={2024},
+  doi={https://doi.org/10.1145/3681758.3697989}
+}
+
+
+

Abstract

+

Computer-Generated Holography (CGH) is a set of algorithmic methods for identifying holograms that reconstruct Three-Dimensional (3D) scenes +in holographic displays. CGH algorithms decompose 3D scenes into multiplanes at different depth levels and rely on simulations +of light that propagated from a source plane to a targeted plane. Thus, for \(n\) planes, CGH typically optimizes holograms using \(n\) plane-to-plane +light transport simulations, leading to major time and computational demands. Our work replaces multiple planes with a focal surface and introduces +a learned light transport model that could propagate a light field from a source plane to the focal surface in a single inference. Our model leverages +spatially adaptive convolution to achieve depth-varying propagation demanded by targeted focal surfaces. The proposed model reduces the hologram +optimization process up to \(1.5x\), which contributes to hologram dataset generation and the training of future learned CGH models.

+

Focal Surface Holographic Light Transport

+

Simulating light propagation among multiple planes in a 3D volume is computationally +demanding, as a 3D volume is represented with multiple planes and each plane requires +a separate calculation of light propagation to reconstruct the target image. Thus, +for \(n\) planes, conventional light transport simulation methods require \(n\) plane-to-plane +simulations, leading to major time and computational demands. Our work replaces multiple +planes with a focal surface and introduces a learned light transport model that could +propagate a light field from a source plane to the focal surface in a single inference, +reducing simulation time by \(10x\).

+
+

Image title

+
+

Results

+

When simulating a full-color, all-in-focus 3D image across a focal surface, conventional +Angular Spectrum Method (ASM) requires eighteen forward passes to simulate the 3D image with six depth planes given there are three color primaries. +In contrast, our model simulates the three colorprimary images simultaneously +onto a focal surface with a single forward pass. +In the mean time, our model preserves more high-frequency content than U-Net, providing +finer details and sharper edges, closer to the ground truth.

+
+

Image title

+
+

We utilize our model for a 3D phase-only hologram optimization application under + \(0 mm\) propagation distance. Optimizing holograms with six target planes using ASM + is denoted as ASM 6, while Ours 6 represents optimizing holograms using our model with six + focal surfaces. When comparing the simulation results, all holograms are reconstructed using ASM for performance assessment. +Ours 6 achieves comparable results with about \(70\%\) of the optimization time compared to ASM 6.

+
+

Image title

+
+

We also apply our model for a 3D phase-only hologram optimization application under \(10 mm\) propagation distance.

+
+

Image title

+
+

Relevant research works

+

Here are relevant research works from the authors:

+ +

Outreach

+

We host a Slack group with more than 250 members. +This Slack group focuses on the topics of rendering, perception, displays and cameras. +The group is open to public and you can become a member by following this link.

+

Contact Us

+
+

Warning

+

Please reach us through email to provide your feedback and comments.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/publications/holobeam/index.html b/publications/holobeam/index.html new file mode 100644 index 00000000..e51e6ea4 --- /dev/null +++ b/publications/holobeam/index.html @@ -0,0 +1,2726 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + HoloBeam: Paper-Thin Near-Eye Displays - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

HoloBeam: Paper-Thin Near-Eye Displays

+

People

+ + + + + + + + + + + +
/      /     

Kaan Akşit1

Yuta Itoh2

+

1University College London, 2The University of Tokyo

+

IEEE VR 2023

+ +

Resources

+

Manuscript + Code

+
+ Bibtex +
@inproceedings{aksit2022holobeam,
+  title = "HoloBeam: Paper-Thin Near-Eye Displays",
+  author = "Akşit, Kaan and Itoh, Yuta",
+  booktitle ={2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)},
+  pages = {581--591},
+  year = {2023},
+}
+
+
+

Presentation

+

+ +

+ +

Abstract

+

An emerging alternative to conventional Augmented Reality (AR) glasses designs, Beaming displays promise slim AR glasses free from challenging design trade-offs, including battery-related limits or computational budget-related issues. +These beaming displays remove active components such as batteries and electronics from AR glasses and move them to a projector that projects images to a user from a distance (1-2 meters), where users wear only passive optical eyepieces. +However, earlier implementations of these displays delivered poor resolutions (7 cycles per degree) without any optical focus cues and were introduced with a bulky form-factor eyepiece (\(\sim50~mm\) thick). +This paper introduces a new milestone for beaming displays, which we call HoloBeam. +In this new design, a custom holographic projector populates a micro-volume located at some distance (1-2 meters) with multiple planes of images. +Users view magnified copies of these images from this small volume with the help of an eyepiece that is either a Holographic Optical Element (HOE) or a set of lenses. +Our HoloBeam prototypes demonstrate the thinnest AR glasses to date with a submillimeter thickness (\eg HOE film is only \(120~\mu m\) thick). +In addition, HoloBeam prototypes demonstrate near retinal resolutions (\(24\) cycles per degree) with a \(70\) degrees wide field of view.

+

Results

+

As a next step in Beaming Displays , our work offers the thinnest and lightweight near-eye display to date. +Our wearable eyepieces could just be a lens or a holographic optical element.

+
+

Image title

+
+

In order to beam images to our eyepieces, we built a phase-only holographic projector.

+
+

Image title

+
+

We also show that a cheaper alternative of this projector could be built using common spatial light modulators.

+
+

Image title

+
+

In this work, we demonstrate the first Beaming Displays that can generate multiplane images using Computer-Generated Holography. +Below image is a moving animation showing a focal sweep of images.

+
+

Image title

+
+

Although we showed monochrome results mostly, HoloBeam can also show full color images.

+
+

Image title

+
+

Relevant research works

+

Here are relevant research works from the authors:

+ +

Outreach

+

We host a Slack group with more than 250 members. +This Slack group focuses on the topics of rendering, perception, displays and cameras. +The group is open to public and you can become a member by following this link.

+

Contact Us

+
+

Warning

+

Please reach us through email to provide your feedback and comments.

+
+

Acknowledgements

+

The authors would like to thank reviewers for their valuable feedback. +The authors wish to thank Koray Kavaklı for fruitful discussions.

+
+ + +
+

Kaan Akşit is supported by the Royal Society's RGS\R2\212229 - Research Grants 2021 Round 2 in building the hardware prototype. Kaan Akşit is also supported by Meta Reality Labs inclusive rendering initiative 2022. +
+
+
+
+
+
+

+
+ +
+

Yuta Itoh is supported by the JST FOREST Grant Number JPMJFR206E and JSPS KAKENHI Grant Number JP20J14971, 20H05958, and 21K19788, Japan. +
+
+
+
+
+
+

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/publications/index.html b/publications/index.html new file mode 100644 index 00000000..9041dc62 --- /dev/null +++ b/publications/index.html @@ -0,0 +1,3762 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + List of publications - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Publications

+

2024

+
+ +
+

Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions

+

+

Chuanjun Zheng, +Yicheng Zhan, +Liang Shi, +Ozan Cakmakci, +and Kaan Akşit

+

Project site + Manuscript +Supplementary + Code

+
+ Bibtex +
    @inproceedings{zheng2024focalholography,
+      title={Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions},
+      author={Chuanjun Zheng, Yicheng Zhan, Liang Shi, Ozan Cakmakci, and Kaan Ak{\c{s}}it},
+      booktitle = {SIGGRAPH Asia 2024 Technical Communications (SA Technical Communications '24)},
+      keywords = {Computer-Generated Holography, Light Transport, Optimization},
+      location = {Tokyo, Japan},
+      series = {SA '24},
+      month={December},
+      year={2024},
+      doi={https://doi.org/10.1145/3681758.3697989}
+    }
+
+
+


+
+ +
+

SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging

+

+

Ziyang Chen, +Mustafa Doğa Doğan, +Josef Spjut, +and Kaan Akşit

+

Project site + Manuscript + Poster + Code + Project video

+
+ Bibtex +
    @inproceedings{chen2024spectrack,
+      author = {Ziyang Chen and Mustafa Dogan and Josef Spjut and Kaan Ak{\c{s}}it},
+      title = {SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging},
+      booktitle = {SIGGRAPH Asia 2024 Posters (SA Posters '24)},
+      year = {2024},
+      location = {Tokyo, Japan},
+      publisher = {ACM},
+      address = {New York, NY, USA},
+      pages = {2},
+      doi = {10.1145/3681756.3697875},
+      url = {https://doi.org/10.1145/3681756.3697875},
+      month = {December 03--06}
+    }
+
+
+


+
+ +
+

All-optical image denoising using a diffractive visual processor

+

+

Çağatay Işıl, +Tianyi Gan, +Fazil Onuralp, +Koray Mentesoglu, +Jagrit Digani, +Huseyin Karaca, +Hanlong Chen, +Jingxi Li, +Deniz Mengu, +Mona Jarrahi, +Kaan Akşit, +and Ozcan Aydogan

+

Publisher site + Manuscript

+
+ Bibtex +
    @article{Işıl2024,
+      author = {I{\c{s}}{\i}l, {\c{C}}a{\u{g}}atay and Gan, Tianyi and Ardic, Fazil Onuralp and Mentesoglu, Koray and Digani, Jagrit and Karaca, Huseyin and Chen, Hanlong and Li, Jingxi and Mengu, Deniz and Jarrahi, Mona and Ak{\c{s}}it, Kaan and Ozcan, Aydogan},
+      title = {All-optical image denoising using a diffractive visual processor},
+      journal = {Light: Science {\&} Applications},
+      year = {2024},
+      month = feb,
+      day = {04},
+      volume = {13},
+      number = {1},
+      pages = {43},
+      issn = {2047-7538},
+      doi = {10.1038/s41377-024-01385-6},
+      url = {https://doi.org/10.1038/s41377-024-01385-6}
+   }
+
+
+


+
+ +
+

Autocolor: Learned Light Power Control for Multi-Color Holograms

+

+

Yicheng Zhan, +Hakan Urey, +Qi Sun, +and Kaan Akşit

+

Project site + Manuscript + Code

+
+ Bibtex +
    @article{zhan2023autocolor,
+      title = {AutoColor: Learned Light Power Control for Multi-Color Holograms},
+      author = {Zhan, Yicheng and Sun, Qi and Akşit, Kaan},
+      journal  = "arxiv",
+      year = {2023},
+      month = may,
+    }
+
+
+


+

2023

+
+ +
+

Multi-color Holograms Improve Brightnes in Holographic Displays

+

+

Koray Kavaklı, +Liang Shi, +Hakan Urey, +Wojciech Matusik, +and Kaan Akşit

+

Project site + Manuscript + Code + Project video

+
+ Bibtex +
    @inproceedings{kavakli2023multicolor,
+      title={Multi-color Holograms Improve Brightness in Holographic Displays},
+      author={Kavaklı, Koray and Shi, Liang and Urey, Hakan and Matusik, Wojciech and Akşit, Kaan},
+      booktitle = {SIGGRAPH Asia 2023 Conference Papers},
+      articleno = {20},
+      numpages = {11},
+      keywords = {Brightness, Computer-generated holography, Holographic displays},
+      location = {Sydney, NSW, Australia},
+      series = {SA '23},
+      month={December},
+      year={2023},
+      doi={https://doi.org/10.1145/3610548.3618135}
+    }
+
+
+


+
+ +
+

ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance

+

+

Ahmet Güzel, +Jeanne Beyazian, +Praneeth Chakravarthula, +and Kaan Akşit

+

Project site + Manuscript + Code + Project video

+
+ Bibtex +
    @ARTICLE{guzel2022prescription,
+      title    = "ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance",
+      author   = "Güzel, Ahmet and Beyazian, Jeanne and Chakravarthula, Praneeth and Akşit, Kaan",
+      journal  = "Biomedical Optics Express",
+      month    =  jan,
+      year     =  2023,
+    }
+
+
+


+
+ +
+

HoloBeam: Paper-Thin Near-Eye Displays

+

+

Kaan Akşit +and Yuta Itoh

+

Project site + Manuscript + Code

+
+ Bibtex +
    @ARTICLE{aksit2022holobeam,
+      title    = "HoloBeam: Paper-Thin Near-Eye Displays",
+      author   = "Akşit, Kaan and Itoh, Yuta",
+      journal  = "IEEE VR 2023",
+      month    =  Mar,
+      year     =  2023,
+    }
+
+
+


+
+ +
+

Realistic Defocus Blur for Multiplane Computer-Generated Holography

+

+

Koray Kavaklı, +Yuta Itoh, +Hakan Urey +and Kaan Akşit

+

Project site + Manuscript + Project video + Code

+
+ Bibtex +
    @misc{kavakli2022realisticdefocus,
+      doi = {10.48550/ARXIV.2205.07030},
+      url = {https://arxiv.org/abs/2205.07030},
+      author = {Kavaklı, Koray and Itoh, Yuta and Urey, Hakan and Akşit, Kaan},
+      keywords = {Computer Vision and Pattern Recognition (cs.CV), Graphics (cs.GR), FOS: Computer and information sciences, FOS: Computer and information sciences, I.3.3},
+      title = {Realistic Defocus Blur for Multiplane Computer-Generated Holography},
+      publisher = {IEEE VR 2023},
+      month = {Mar},
+      year = {2023},
+      copyright = {Creative Commons Attribution Non Commercial No Derivatives 4.0 International}
+    }
+
+
+


+

2022

+
+ +
+

Metameric Inpainting for Image Warping

+

+

Rafael Kuffner Dos Anjos, +David R. Walton, +Kaan Akşit, +Sebastian Friston, +David Swapp, +Anthony Steed +and Tobias Ritschel

+

Publisher site + Manuscript

+
+ Bibtex +
    @ARTICLE{Kuffner_Dos_Anjos2022-hm,
+        title    = "Metameric inpainting for image warping",
+        author   = "Kuffner Dos Anjos, Rafael and Walton, David R and Akşit, Kaan and
+                    Friston, Sebastian and Swapp, David and Steed, Anthony and
+                    Ritschel, Tobias",
+        journal  = "IEEE Trans. Vis. Comput. Graph.",
+        volume   = "PP",
+        month    =  oct,
+        year     =  2022,
+    }
+
+
+


+
+ +
+

Optimizing vision and visuals: lectures on cameras, displays and perception

+

+

Koray Kavaklı, +David Robert Walton, +Nick Antipa, +Rafał Mantiuk, +Douglas Lanman +and Kaan Akşit

+

Project site + Publisher site + Manuscript + Project video + Code

+
+ Bibtex +
    @incollection{kavakli2022optimizing,
+      title = {Optimizing vision and visuals: lectures on cameras, displays and perception},
+      author = {Kavaklı, Koray and Walton, David Robert and Antipa, Nick and Mantiuk, Rafał and Lanman, Douglas and Ak{\c{s}}it, Kaan},
+      booktitle = {ACM SIGGRAPH 2022 Courses},
+      pages = {1--66},
+      year = {2022},
+      doi = {https://doi.org/10.1145/3532720.3535650},
+      video = {https://youtu.be/z_AtSgct6_I},
+    }
+
+
+


+


+
+ +
+

Unrolled Primal-Dual Networks for Lensless Cameras

+

+

Oliver Kingshott, +Nick Antipa, +Emrah Bostan +and Kaan Akşit

+

Manuscript + Publisher site + Supplementary + Code

+
+ Bibtex +
    @article{kingshott2022unrolled,
+       selected={true},
+       title={Unrolled Primal-Dual Networks for Lensless Cameras},
+       author={Kingshott, Oliver and Antipa, Nick and Bostan, Emrah and Akşit, Kaan},
+       journal={Optics Express},
+       year={2022},
+       doi={https://doi.org/10.48550/arXiv.2203.04353}
+    }
+
+
+


+
+ +
+

Metameric Varifocal Holograms

+

+

David R. Walton, +Koray Kavaklı, +Rafael Kuffner Dos Anjos, +David Swapp, +Tim Weyrich, +Hakan Urey, +Anthony Steed, +Tobias Ritschel +and Kaan Akşit

+

Project site + Manuscript + Project video + Code

+
+ Bibtex +
    @article{walton2021metameric,
+             title={Metameric Varifocal Holography},
+             author={Walton, David R and Kavakl{\i}, Koray and Anjos, Rafael Kuffner dos and Swapp, David and Weyrich, Tim and Urey, Hakan and Steed, Anthony and Ritschel, Tobias and Ak{\c{s}}it, Kaan},
+             publisher = {IEEE VR},
+             month = {March},
+             year={2022}
+            }
+
+
+


+
+ +
+

Learned holographic light transport

+

+

Invited

+

Koray Kavaklı, +Hakan Urey +and Kaan Akşit

+

Publisher site + Manuscript + Code + Dataset

+
+ Bibtex +
    @article{Kavakli:22,
+      author = {Koray Kavakl{i} and Hakan Urey and Kaan Ak\c{s}it},
+      journal = {Appl. Opt.},
+      keywords = {Holographic displays; Holographic recording; Holographic techniques; Image quality; Image reconstruction; Visible light communications},
+      number = {5},
+      pages = {B50--B55},
+      publisher = {OSA},
+      title = {Learned holographic light transport: invited},
+      volume = {61},
+      month = {Feb},
+      year = {2022},
+      url = {http://www.osapublishing.org/ao/abstract.cfm?URI=ao-61-5-B50},
+      doi = {10.1364/AO.439401},
+    }
+
+
+


+

2021

+
+ +
+

Telelife: the future of remote living

+

+

Jason Orlosky, +Misha Sra, +Kenan Bektaş, +Huaishu Peng, +Jeeeun Kim, +Nataliya Kosmyna, +Tobias Hollerer, +Anthony Steed, +Kiyoshi Kiyokawa +and Kaan Akşit

+

Publisher site + Manuscript

+
+ Bibtex +
@ARTICLE{10.3389/frvir.2021.763340,
+AUTHOR={Orlosky, Jason and Sra, Misha and Bektaş, Kenan and Peng, Huaishu and Kim, Jeeeun and Kos’myna, Nataliya and Höllerer, Tobias and Steed, Anthony and Kiyokawa, Kiyoshi and Ak\c{s}it, Kaan},   
+TITLE={Telelife: The Future of Remote Living},      
+JOURNAL={Frontiers in Virtual Reality},      
+VOLUME={2},      
+PAGES={147},     
+YEAR={2021},      
+URL={https://www.frontiersin.org/article/10.3389/frvir.2021.763340},       
+DOI={10.3389/frvir.2021.763340},      
+ISSN={2673-4192},   
+}
+
+
+


+
+ +
+

SensiCut: material-aware laser cutting using speckle sensing and deep learning

+

+

Mustafa Doga Dogan, +Steven Vidal Acevedo Colon, +Varnika Sinha, +Kaan Akşit +and Stefanie Mueller

+

Publisher site + Project site + Manuscript + Project video + Presentation recording

+
+ Bibtex +
@inproceedings{dogan2021sensicut,
+  title={SensiCut: Material-Aware Laser Cutting Using Speckle Sensing and Deep Learning},
+  author={Dogan, Mustafa Doga and Acevedo Colon, Steven Vidal and Sinha, Varnika and Ak{\c{s}}it, Kaan and Mueller, Stefanie},
+  booktitle={The 34th Annual ACM Symposium on User Interface Software and Technology},
+  pages={24--38},
+  year={2021}
+}
+
+
+


+
+ +
+

Beyond blur: ventral metamers for foveated rendering

+

+

David R. Walton, +Rafael Kuffner Dos Anjos, +Sebastian Friston, +David Swapp, +Kaan Akşit, +Anthony Steed +and Tobias Ritschel

+

Publisher site + Project site + Manuscript

+
+ Bibtex +
@article{walton2021beyond,
+    author = {David R. Walton and Rafael Kuffner Dos Anjos and Sebastian Friston and David Swapp and Kaan Akşit and Anthony Steed and Tobias Ritschel},
+    title    = {Beyond Blur: Ventral Metamers for Foveated Rendering},
+    journal = {ACM Trans. Graph. (Proc. SIGGRAPH 2021)},
+    year = {2021},
+    volume = {40},
+    number = {4},
+}
+
+
+


+
+ +
+

Beaming displays

+

+

Best paper nominee at IEEE VR 2021

+

Yuta Itoh, +Takumi Kaminokado +and Kaan Akşit

+

Publisher site + Manuscript + Project video + Presentation recording

+
+ Bibtex +
@article{itoh2021beaming,
+    author = {Yuta Itoh, Takumi Kaminokado, and Kaan Ak{s}it},
+    keywords = {Near-eye displays},
+    publisher = {IEEE VR},
+    title = {Beaming Displays},
+    month = {April},
+    year = {2021}
+}
+
+
+


+

2020

+
+ +
+

Optical gaze tracking with spatially-sparse single-pixel detectors

+

+

Richard Li, +Eric Whitmire, +Michael Stengel, +Ben Boudaoud, +Jan Kautz, +David Luebke, +Shwetak Patel +and Kaan Akşit

+

Publisher site + Project site + Manuscript + Presentation recording

+
+ Bibtex +
@article{li2020opticalgaze,
+    author = {Richard Li, Eric Whitmire, Michael Stengel, Ben Boudaoud, Jan Kautz, David Luebke, Shwetak Patel, and Kaan Ak{s}it},
+    keywords = {Gaze tracking, eye tracking, LEDs, photodiodes},
+    publisher = {ISMAR},
+    title = {Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors},
+    month = {Nov},
+    year = {2020}
+}
+
+
+


+
+ +
+

Patch scanning displays: spatiotemporal enhancement for displays

+

+

Kaan Akşit

+

Publisher site + Manuscript + Project video

+
+ Bibtex +
@article{aksit2020patch,
+    author = {Kaan Ak\c{s}it},
+    journal = {Opt. Express},
+    keywords = {Digital micromirror devices; Image quality; Image reconstruction; Light sources; Optical components; Three dimensional imaging},
+    number = {2},
+    pages = {2107--2121},
+    publisher = {OSA},
+    title = {Patch scanning displays: spatiotemporal enhancement for displays},
+    volume = {28},
+    month = {Jan},
+    year = {2020},
+    url = {http://www.opticsexpress.org/abstract.cfm?URI=oe-28-2-2107}
+}
+
+
+


+

2019

+
+ +
+

Near-eye display and tracking technologies for virtual and augmented reality

+

+

George Alex Koulieris +Kaan Akşit, +Michael Stengel, +Rafał Mantiuk, +Katerina Mania +and Christian Richardt

+

Publisher site + Manuscript + Project video

+
+ Bibtex +
@article{NearEyeDisplayAndTrackingSTAR,
+author  = {George Alex Koulieris and Kaan Ak{\c{s}}it and Michael Stengel and Rafa{\l} K. Mantiuk and Katerina Mania and Christian Richardt},
+title   = {Near-Eye Display and Tracking Technologies for Virtual and Augmented Reality},
+journal = {Computer Graphics Forum},
+year    = {2019},
+volume  = {38},
+number  = {2},
+url     = {https://richardt.name/nedtt/},
+}
+
+
+


+
+ +
+

Foveated AR: dynamically-foveated augmented reality display

+

+

Emerging Technology best in show award at SIGGRAPH 2019

+

Jonghyun Kim, +Youngmo Jeong, +Michael Stengel, +Kaan Akşit, +Rachel Albert, +Ben Boudaoud, +Trey Greer, +Joohwan Kim, +Ward Lopes, +Zander Majercik, +Peter Shirley, +Josef Spjut, +Morgan Mcguire +and David Luebke

+

Publisher site + Manuscript + Project video

+
+ Bibtex +
@article{kim2019foveated,
+  title={Foveated AR: dynamically-foveated augmented reality display},
+  author={Kim, Jonghyun and Jeong, Youngmo and Stengel, Michael and Ak{\c{s}}it, Kaan and Albert, Rachel and Boudaoud, Ben and Greer, Trey and Kim, Joohwan and Lopes, Ward and Majercik, Zander and others},
+  journal={ACM Transactions on Graphics (TOG)},
+  volume={38},
+  number={4},
+  pages={1--15},
+  year={2019},
+  publisher={ACM New York, NY, USA}
+}
+
+
+


+

2018

+
+ +
+

FocusAR: auto-focus augmented reality eyeglasses for both real and virtual

+

+

Best paper award at ISMAR 2018

+

Presented at SIGGRAPH ASIA 2018

+

Praneeth Chakravarthula, +David Dunn, +Kaan Akşit +and Henry Fuchs

+

Publisher site + Manuscript + Presentation recording + Presentation source

+
+ Bibtex +
@article{chakravarthula2018focusar,
+  title={focusar: auto-focus augmented reality eyeglasses for both real and virtual},
+  author={chakravarthula, praneeth and dunn, david and ak{\c{s}}it, kaan and fuchs, henry},
+  journal={ieee transactions on visualization and computer graphics},
+  year={2018},
+  publisher={ieee}
+}
+
+
+


+
+ +
+

Manufacturing application-driven foveated near-eye displays

+

+

Best paper nominee at IEEE VR 2018

+

Emerging Technology best in show award at SIGGRAPH 2018

+

Kaan Akşit, +Praneeth Chakravarthula, +Kishore Rathinavel, +Youngmo Jeong, +Rachel Albert, +Henry Fuchs +and David Luebke

+

Publisher site + Manuscript + Project video + Presentation recording + Presentation source

+
+ Bibtex +
@article{akcsit2019manufacturing,
+  title={Manufacturing application-driven foveated near-eye displays},
+  author={Ak{\c{s}}it, Kaan and Chakravarthula, Praneeth and Rathinavel, Kishore and Jeong, Youngmo and Albert, Rachel and Fuchs, Henry and Luebke, David},
+  journal={IEEE transactions on visualization and computer graphics},
+  volume={25},
+  number={5},
+  pages={1928--1939},
+  year={2019},
+  publisher={IEEE}
+}
+
+
+


+

2017

+
+ +
+

Near-Eye varifocal augmented reality display using see-through screens

+

+

Kaan Akşit, +Ward Lopes, +Jonghyun Kim, +Peter Shirley +and David Luebke

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@Article{Aksit2017Varifocal,
+Title      = {Near-Eye Varifocal Augmented Reality Display using See-Through Screens},
+Author     = {K. Ak{\c{s}}it and W. Lopes and J. Kim and P. Shirley and D. Luebke},
+journal    = {ACM Trans. Graph. (SIGGRAPH)},
+issue      = {36},
+number     = {6},
+year = {2017}}
+
+
+


+
+ +
+

Wide field of view varifocal near-eye display using see-through deformable membrane mirrors

+

+

Best paper award at IEEE VR 2017

+

SIGGRAPH 2017 Emerging Technologies DCEXPO Special Prize

+

David Dunn, +Cary Tippets, +Kent Torell, +Petr Kellnhofer, +Kaan Akşit, +Piotr Didyk, +Karol Myszkowski, +David Luebke +and Henry Fuchs

+

Publisher site + Project site + Manuscript + Video

+
+ Bibtex +
@article{dunn2017wide,
+title={Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors},
+author={Dunn, David and Tippets, Cary and Torell, Kent and Kellnhofer, Petr and Ak{\c{s}}it, Kaan and Didyk, Piotr and Myszkowski, Karol and Luebke, David and Fuchs, Henry},
+journal={IEEE Transactions on Visualization and Computer Graphics},
+volume={23},
+number={4},
+pages={1322--1331},
+year={2017},
+publisher={IEEE}
+}}
+
+
+


+

2016

+
+ +
+

Gaze-sensing LEDs for head mounted displays

+

+

Kaan Akşit, +Jan Kautz +and David Luebke

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@article{akcsit2020gaze,
+  title={Gaze-sensing leds for head mounted displays},
+  author={Ak{\c{s}}it, Kaan and Kautz, Jan and Luebke, David},
+  journal={arXiv preprint arXiv:2003.08499},
+  year={2020}
+}
+
+
+


+

2015

+
+ +
+

Slim near-eye display using pinhole aperture arrays

+

+

Kaan Akşit, +Jan Kautz +and David Luebke

+

Publisher site + Project site + Manuscript + Video

+
+ Bibtex +
@article{Aksit:15, 
+author = {Kaan Ak\c{s}it and Jan Kautz and David Luebke}, 
+journal = {Appl. Opt.}, 
+keywords = {Apertures; Vision - binocular and stereopsis ; Computational imaging},
+number = {11}, 
+pages = {3422--3427}, 
+publisher = {OSA},
+title = {Slim near-eye display using pinhole aperture arrays}, 
+volume = {54}, 
+month = {Apr},
+year = {2015},
+url = {http://ao.osa.org/abstract.cfm?URI=ao-54-11-3422},
+doi = {10.1364/AO.54.003422},
+abstract = {We report a new technique for building a wide-angle, lightweight, thin-form-factor, cost-effective, easy-to-manufacture near-eye head-mounted display (HMD) for virtual reality applications. Our approach adopts an aperture mask containing an array of pinholes and a screen as a source of imagery. We demonstrate proof-of-concept HMD prototypes with a binocular field of view (FOV) of 70\&amp;\#xB0;\&amp;\#xD7;45\&amp;\#xB0;, or total diagonal FOV of 83\&amp;\#xB0;. This FOV should increase with increasing display panel size. The optical angular resolution supported in our prototype can go down to 1.4\&amp;\#x2013;2.1 arcmin by adopting a display with 20\&amp;\#x2013;30\&amp;\#xA0;\&amp;\#x3BC;m pixel pitch.},
+}
+
+
+


+

2014

+
+ +
+

Head-worn mixed reality projection display application

+

+

Kaan Akşit, +Daniel Kade, +Oğuzhan Özcan +and Hakan Urey

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@inproceedings{Aksit:2014:HMR:2663806.2663826,
+ author = {Ak\c{s}it, Kaan and Kade, Daniel and \"{O}zcan, O\u{g}uzhan and \"{U}rey, Hakan},
+ title = {Head-worn Mixed Reality Projection Display Application},
+ booktitle = {Proceedings of the 11th Conference on Advances in Computer Entertainment Technology},
+ series = {ACE '14},
+ year = {2014},
+ isbn = {978-1-4503-2945-3},
+ location = {Funchal, Portugal},
+ pages = {11:1--11:9},
+ articleno = {11},
+ numpages = {9},
+ url = {http://doi.acm.org/10.1145/2663806.2663826},
+ doi = {10.1145/2663806.2663826},
+ acmid = {2663826},
+ publisher = {ACM},
+ address = {New York, NY, USA},
+ keywords = {head-mounted projection display, immersive environments, laser projector, mixed reality, motion capture},
+} 
+
+
+


+
+ +
+

Super stereoscopy technique for comfortable and realistic 3D displays

+

+

Kaan Akşit, +Amir Niaki, +Erdem Ulusoy +and Hakan Urey

+

Publisher site + Manuscript

+
+ Bibtex +
@article{Aksit:14, 
+author = {Kaan Ak\c{s}it and Amir Hossein Ghanbari Niaki and Erdem Ulusoy and Hakan Urey}, 
+journal = {Opt. Lett.}, 
+keywords = {Displays; Vision - binocular and stereopsis ; Visual optics, accommodation},
+number = {24}, 
+pages = {6903--6906}, 
+publisher = {OSA},
+title = {Super stereoscopy technique for comfortable and realistic 3D displays}, 
+volume = {39}, 
+month = {Dec},
+year = {2014},
+url = {http://ol.osa.org/abstract.cfm?URI=ol-39-24-6903},
+doi = {10.1364/OL.39.006903},
+abstract = {Two well-known problems of stereoscopic displays are the accommodation-convergence conflict and the lack of natural blur for defocused objects. We present a new technique that we name Super Stereoscopy (SS3D) to provide a convenient solution to these problems. Regular stereoscopic glasses are replaced by SS3D glasses which deliver at least two parallax images per eye through pinholes equipped with light selective filters. The pinholes generate blur-free retinal images so as to enable correct accommodation, while the delivery of multiple parallax images per eye creates an approximate blur effect for defocused objects. Experiments performed with cameras and human viewers indicate that the technique works as desired. In case two, pinholes equipped with color filters per eye are used; the technique can be used on a regular stereoscopic display by only uploading a new content, without requiring any change in display hardware, driver, or frame rate. Apart from some tolerable loss in display brightness and decrease in natural spatial resolution limit of the eye because of pinholes, the technique is quite promising for comfortable and realistic 3D vision, especially enabling the display of close objects that are not possible to display and comfortably view on regular 3DTV and cinema.},
+}
+
+
+


+
+ +
+

From Sound to Sight: Using Audio Processing to enable Visible Light Communication

+

+

Stefan Schmid, +D. Schwyn, +Kaan Akşit, +Giorgio Corbellini, +Thomas Gross +and Stefan Mangold

+

Publisher site + Manuscript

+
+ Bibtex +
@INPROCEEDINGS{7063484,
+author={S. Schmid and D. Schwyn and K. Akşit and G. Corbellini and T. R. Gross and S. Mangold},
+booktitle={2014 IEEE Globecom Workshops (GC Wkshps)},
+title={From sound to sight: Using audio processing to enable visible light communication},
+year={2014},
+pages={518-523},
+keywords={audio signal processing;light emitting diodes;mobile handsets;optical communication;photodiodes;protocols;audio jack;audio processing;communication protocols;electrical signals;light signals;microphone input;mobile phones;on-board audio signal processing;passive components;peripheral device;photodiode;visible light communication;Decoding;Hardware;Lifting equipment;Light emitting diodes;Photodiodes;Protocols;Throughput},
+doi={10.1109/GLOCOMW.2014.7063484},
+ISSN={2166-0077},
+month={Dec},}
+
+
+


+
+ +
+

Connecting Networks of Toys and Smartphones with Visible Light Communication

+

+

Giorgio Corbellini, +Kaan Akşit, +Stefan Mangold +Stefan Schmid +and Thomas R. Gross

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@ARTICLE{6852086,
+author={G. Corbellini and K. Aksit and S. Schmid and S. Mangold and T. R. Gross},
+journal={IEEE Communications Magazine},
+title={Connecting networks of toys and smartphones with visible light communication},
+year={2014},
+volume={52},
+number={7},
+pages={72-78},
+keywords={light emitting diodes;optical communication;optical receivers;smart phones;LED;VLC systems;brightness;consumer electronics;illumination;light emitting diodes;light receivers;microcontrollers;public environment;residential environment;smartphones;toys;visible light communication;wireless communication interface;Cameras;Commercialization;Frequency measurement;Illumination;Light emitting diodes;Microcontrollers;Receivers;Smart phones;Transceivers},
+doi={10.1109/MCOM.2014.6852086},
+ISSN={0163-6804},
+month={July},}
+
+
+


+

2013

+
+ +
+

Dynamic exit pupil trackers for autostereoscopic displays

+

+

Kaan Akşit, +Hadi Baghsiahi, +P. Surman, +Selim Ӧlçer, +E. Willman, +David R. Selviah, +Sally Day +and Hakan Urey

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@article{Aksit:13, 
+author = {Kaan Ak\c{s}it and Hadi Baghsiahi and Phil Surman and Selim Ӧl\c{c}er and Eero Willman and David R. Selviah and Sally Day and Hakan Urey}, 
+journal = {Opt. Express}, 
+keywords = {Displays; Optical systems; Optoelectronics; Laser beam shaping; Vision - binocular and stereopsis},
+number = {12}, 
+pages = {14331--14341}, 
+publisher = {OSA},
+title = {Dynamic exit pupil trackers for autostereoscopic displays}, 
+volume = {21}, 
+month = {Jun},
+year = {2013},
+url = {http://www.opticsexpress.org/abstract.cfm?URI=oe-21-12-14331},
+doi = {10.1364/OE.21.014331},
+abstract = {This paper describes the first demonstrations of two dynamic exit pupil (DEP) tracker techniques for autostereoscopic displays. The first DEP tracker forms an exit pupil pair for a single viewer in a defined space with low intraocular crosstalk using a pair of moving shutter glasses located within the optical system. A display prototype using the first DEP tracker is constructed from a pair of laser projectors, pupil-forming optics, moving shutter glasses at an intermediate pupil plane, an image relay lens, and a Gabor superlens based viewing screen. The left and right eye images are presented time-sequentially to a single viewer and seen as a 3D image without wearing glasses and allows the viewer to move within a region of 40 cm {\texttimes} 20 cm in the lateral plane, and 30 cm along the axial axis. The second DEP optics can move the exit pupil location dynamically in a much larger 3D space by using a custom spatial light modulator (SLM) forming an array of shutters. Simultaneous control of multiple exit pupils in both lateral and axial axes is demonstrated for the first time and provides a viewing volume with an axial extent of 0.6{\textminus}3 m from the screen and within a lateral viewing angle of {\textpm} 20{\textdegree} for multiple viewers. This system has acceptable crosstalk (\&lt; 5\%) between the stereo image pairs. In this novel version of the display the optical system is used as an advanced dynamic backlight for a liquid crystal display (LCD). This has advantages in terms of overall display size as there is no requirement for an intermediate image, and in image quality. This system has acceptable crosstalk (\&lt; 5\%) between the stereo image pairs.},
+}
+
+
+


+
+ +
+

Multi-view autostereoscopic projection display using rotating screen

+

+

Spotlight on Optics

+

Osman Eldes, +Kaan Akşit +and Hakan Urey

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@article{Eldes:13,
+author = {Osman Eldes and Kaan Ak\c{s}it and Hakan Urey},
+journal = {Opt. Express},
+keywords = {Displays; Diffusers; Vision - binocular and stereopsis ; Autostereoscopic displays; Brightness; Fresnel lenses; Image registration; Pico projectors; Systems design},
+number = {23},
+pages = {29043--29054},
+publisher = {OSA},
+title = {Multi-view autostereoscopic projection display using rotating screen},
+volume = {21},
+month = {Nov},
+year = {2013},
+url = {http://www.osapublishing.org/oe/abstract.cfm?URI=oe-21-23-29043},
+doi = {10.1364/OE.21.029043},
+abstract = {A new technique for multi-view autostereoscopic projection display is proposed, and demonstrated. The technique uses two mobile projectors, a rotating retro-reflective diffuser screen, and a head-tracking camera. As two dynamic viewing slits are created at the viewer's position, the slits can track the position of the eyes by rotating the screen. The display allows a viewer to move approximately 700 mm along the horizontal axis, and 500 mm along the vertical axis with an average crosstalk below 5 \%. Two screen prototypes with different diffusers have been tried, and they provide luminance levels of 60 Cd/m2, and 160 Cd/m2 within the viewing field.},
+}
+
+
+


+

2012

+
+ +
+

Portable 3D Laser Projector Using Mixed Polarization Technique

+

+

Best 3D product award of International 3D Society (4th year)

+

Kaan Akşit, +Osman Eldeş, +Selvan Viswanathan, +Mark Freeman +and Hakan Urey

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@ARTICLE{6297485,
+  author={Aksit, Kaan and Eldes, Osman and Viswanathan, Selvan and Freeman, Mark O. and Urey, Hakan},
+  journal={Journal of Display Technology}, 
+  title={Portable 3D Laser Projector Using Mixed Polarization Technique}, 
+  year={2012},
+  volume={8},
+  number={10},
+  pages={582-589},
+  doi={10.1109/JDT.2012.2205664}}
+
+
+


+

2010

+
+ +
+

Heart rate monitoring via remote photoplethysmography with motion artifacts reduction

+

+

Giovanni Cennini, +Jeremie Arguel, +Kaan Akşit +and Arno van Leest

+

Publisher site + Manuscript + Video

+
+ Bibtex +
@article{Cennini:10, 
+author = {Giovanni Cennini and Jeremie Arguel and Kaan Ak\c{s}it and Arno van Leest}, 
+journal = {Opt. Express}, 
+keywords = {Medical optics instrumentation; Optical devices; Optical sensing and sensors},
+number = {5}, 
+pages = {4867--4875}, 
+publisher = {OSA},
+title = {Heart rate monitoring via remote photoplethysmography with motion artifacts reduction}, 
+volume = {18}, 
+month = {Mar},
+year = {2010},
+url = {http://www.opticsexpress.org/abstract.cfm?URI=oe-18-5-4867},
+doi = {10.1364/OE.18.004867},
+abstract = {In this paper, we present a novel photoplethysmographic device that operates remotely, i.e. not in contact with the skin. The device allows for real time measurements of heart rate with motion artifact reduction from a distance of a few centimeters up to several meters. High mobility of users is achieved in assessment of vital body signs, such as heart rate.},
+}
+
+
+


+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/publications/media/autocolor.png b/publications/media/autocolor.png new file mode 100644 index 00000000..3327c262 Binary files /dev/null and b/publications/media/autocolor.png differ diff --git a/publications/media/beaming_displays.png b/publications/media/beaming_displays.png new file mode 100644 index 00000000..b1808476 Binary files /dev/null and b/publications/media/beaming_displays.png differ diff --git a/publications/media/beyond_blur.png b/publications/media/beyond_blur.png new file mode 100644 index 00000000..fbe18ded Binary files /dev/null and b/publications/media/beyond_blur.png differ diff --git a/publications/media/connect_toys.png b/publications/media/connect_toys.png new file mode 100644 index 00000000..5a6b9b77 Binary files /dev/null and b/publications/media/connect_toys.png differ diff --git a/publications/media/diffractive_visual_processor.png b/publications/media/diffractive_visual_processor.png new file mode 100644 index 00000000..bb987080 Binary files /dev/null and b/publications/media/diffractive_visual_processor.png differ diff --git a/publications/media/dynamic_exit_pupil.png b/publications/media/dynamic_exit_pupil.png new file mode 100644 index 00000000..798d217a Binary files /dev/null and b/publications/media/dynamic_exit_pupil.png differ diff --git a/publications/media/focal_surface_lightprop_experimental_results_castle.png b/publications/media/focal_surface_lightprop_experimental_results_castle.png new file mode 100644 index 00000000..3cc71fea Binary files /dev/null and b/publications/media/focal_surface_lightprop_experimental_results_castle.png differ diff --git a/publications/media/focal_surface_lightprop_experimental_results_leaves.png b/publications/media/focal_surface_lightprop_experimental_results_leaves.png new file mode 100644 index 00000000..33584112 Binary files /dev/null and b/publications/media/focal_surface_lightprop_experimental_results_leaves.png differ diff --git a/publications/media/focal_surface_lightprop_experimental_results_leaves_capture.png b/publications/media/focal_surface_lightprop_experimental_results_leaves_capture.png new file mode 100644 index 00000000..477b1bc1 Binary files /dev/null and b/publications/media/focal_surface_lightprop_experimental_results_leaves_capture.png differ diff --git a/publications/media/focal_surface_lightprop_experimental_results_tiger.png b/publications/media/focal_surface_lightprop_experimental_results_tiger.png new file mode 100644 index 00000000..486dffcd Binary files /dev/null and b/publications/media/focal_surface_lightprop_experimental_results_tiger.png differ diff --git a/publications/media/focal_surfaec_lightprop_proposed_vs_conv.png b/publications/media/focal_surfaec_lightprop_proposed_vs_conv.png new file mode 100644 index 00000000..386fae06 Binary files /dev/null and b/publications/media/focal_surfaec_lightprop_proposed_vs_conv.png differ diff --git a/publications/media/focal_surfaec_lightprop_web_image.png b/publications/media/focal_surfaec_lightprop_web_image.png new file mode 100644 index 00000000..49abc2c9 Binary files /dev/null and b/publications/media/focal_surfaec_lightprop_web_image.png differ diff --git a/publications/media/focusar.png b/publications/media/focusar.png new file mode 100644 index 00000000..e3c3cd99 Binary files /dev/null and b/publications/media/focusar.png differ diff --git a/publications/media/foveated_displays.png b/publications/media/foveated_displays.png new file mode 100644 index 00000000..d78a3bfd Binary files /dev/null and b/publications/media/foveated_displays.png differ diff --git a/publications/media/gaze_sensing_leds.png b/publications/media/gaze_sensing_leds.png new file mode 100644 index 00000000..1bdd1fad Binary files /dev/null and b/publications/media/gaze_sensing_leds.png differ diff --git a/publications/media/head_worn_projector.png b/publications/media/head_worn_projector.png new file mode 100644 index 00000000..f9728e5b Binary files /dev/null and b/publications/media/head_worn_projector.png differ diff --git a/publications/media/heart_rate.png b/publications/media/heart_rate.png new file mode 100644 index 00000000..22785f91 Binary files /dev/null and b/publications/media/heart_rate.png differ diff --git a/publications/media/holobeam.png b/publications/media/holobeam.png new file mode 100644 index 00000000..7a2a19df Binary files /dev/null and b/publications/media/holobeam.png differ diff --git a/publications/media/holobeam_amplitude_only.png b/publications/media/holobeam_amplitude_only.png new file mode 100644 index 00000000..11d203d3 Binary files /dev/null and b/publications/media/holobeam_amplitude_only.png differ diff --git a/publications/media/holobeam_focal_sweep.gif b/publications/media/holobeam_focal_sweep.gif new file mode 100644 index 00000000..46fecd24 Binary files /dev/null and b/publications/media/holobeam_focal_sweep.gif differ diff --git a/publications/media/holobeam_full_color.png b/publications/media/holobeam_full_color.png new file mode 100644 index 00000000..904b8b56 Binary files /dev/null and b/publications/media/holobeam_full_color.png differ diff --git a/publications/media/holobeam_hires.png b/publications/media/holobeam_hires.png new file mode 100644 index 00000000..6367a252 Binary files /dev/null and b/publications/media/holobeam_hires.png differ diff --git a/publications/media/holobeam_phase_only.png b/publications/media/holobeam_phase_only.png new file mode 100644 index 00000000..69248284 Binary files /dev/null and b/publications/media/holobeam_phase_only.png differ diff --git a/publications/media/holohdr_visual_artifacts.png b/publications/media/holohdr_visual_artifacts.png new file mode 100644 index 00000000..28f6ec3f Binary files /dev/null and b/publications/media/holohdr_visual_artifacts.png differ diff --git a/publications/media/learned_light.gif b/publications/media/learned_light.gif new file mode 100644 index 00000000..1775eea6 Binary files /dev/null and b/publications/media/learned_light.gif differ diff --git a/publications/media/learned_prescription.png b/publications/media/learned_prescription.png new file mode 100644 index 00000000..e921750f Binary files /dev/null and b/publications/media/learned_prescription.png differ diff --git a/publications/media/manufacturing_displays.png b/publications/media/manufacturing_displays.png new file mode 100644 index 00000000..4423a828 Binary files /dev/null and b/publications/media/manufacturing_displays.png differ diff --git a/publications/media/metameric.png b/publications/media/metameric.png new file mode 100644 index 00000000..67a867dd Binary files /dev/null and b/publications/media/metameric.png differ diff --git a/publications/media/metameric_inpainting.png b/publications/media/metameric_inpainting.png new file mode 100644 index 00000000..7670edf8 Binary files /dev/null and b/publications/media/metameric_inpainting.png differ diff --git a/publications/media/multicolor.png b/publications/media/multicolor.png new file mode 100644 index 00000000..d954665f Binary files /dev/null and b/publications/media/multicolor.png differ diff --git a/publications/media/multicolor_experimental_results_fruit_lady.png b/publications/media/multicolor_experimental_results_fruit_lady.png new file mode 100644 index 00000000..0b62483a Binary files /dev/null and b/publications/media/multicolor_experimental_results_fruit_lady.png differ diff --git a/publications/media/multicolor_experimental_results_holographic_glasses.png b/publications/media/multicolor_experimental_results_holographic_glasses.png new file mode 100644 index 00000000..9c8fe04c Binary files /dev/null and b/publications/media/multicolor_experimental_results_holographic_glasses.png differ diff --git a/publications/media/multicolor_teaser.png b/publications/media/multicolor_teaser.png new file mode 100644 index 00000000..17770c92 Binary files /dev/null and b/publications/media/multicolor_teaser.png differ diff --git a/publications/media/multicolor_three_dimensional_multi_text.png b/publications/media/multicolor_three_dimensional_multi_text.png new file mode 100644 index 00000000..d3cd586a Binary files /dev/null and b/publications/media/multicolor_three_dimensional_multi_text.png differ diff --git a/publications/media/multicolor_visual_artifacts.png b/publications/media/multicolor_visual_artifacts.png new file mode 100644 index 00000000..28f6ec3f Binary files /dev/null and b/publications/media/multicolor_visual_artifacts.png differ diff --git a/publications/media/next_displays.png b/publications/media/next_displays.png new file mode 100644 index 00000000..eff573ee Binary files /dev/null and b/publications/media/next_displays.png differ diff --git a/publications/media/optimizing_vision_and_visuals.png b/publications/media/optimizing_vision_and_visuals.png new file mode 100644 index 00000000..291901a0 Binary files /dev/null and b/publications/media/optimizing_vision_and_visuals.png differ diff --git a/publications/media/patch_scan.png b/publications/media/patch_scan.png new file mode 100644 index 00000000..fe3c5e0a Binary files /dev/null and b/publications/media/patch_scan.png differ diff --git a/publications/media/pinhole.png b/publications/media/pinhole.png new file mode 100644 index 00000000..8b82b1f6 Binary files /dev/null and b/publications/media/pinhole.png differ diff --git a/publications/media/portable_3d.png b/publications/media/portable_3d.png new file mode 100644 index 00000000..19dc63fc Binary files /dev/null and b/publications/media/portable_3d.png differ diff --git a/publications/media/realistic_defocus_ar_prototype.png b/publications/media/realistic_defocus_ar_prototype.png new file mode 100644 index 00000000..ff106a88 Binary files /dev/null and b/publications/media/realistic_defocus_ar_prototype.png differ diff --git a/publications/media/realistic_defocus_ar_results.png b/publications/media/realistic_defocus_ar_results.png new file mode 100644 index 00000000..a7e8ecbc Binary files /dev/null and b/publications/media/realistic_defocus_ar_results.png differ diff --git a/publications/media/realistic_defocus_cgh.png b/publications/media/realistic_defocus_cgh.png new file mode 100644 index 00000000..9f57a84d Binary files /dev/null and b/publications/media/realistic_defocus_cgh.png differ diff --git a/publications/media/realistic_defocus_focus_stack.gif b/publications/media/realistic_defocus_focus_stack.gif new file mode 100644 index 00000000..59815d3f Binary files /dev/null and b/publications/media/realistic_defocus_focus_stack.gif differ diff --git a/publications/media/realistic_defocus_hardware.png b/publications/media/realistic_defocus_hardware.png new file mode 100644 index 00000000..252151e6 Binary files /dev/null and b/publications/media/realistic_defocus_hardware.png differ diff --git a/publications/media/realistic_defocus_teaser.png b/publications/media/realistic_defocus_teaser.png new file mode 100644 index 00000000..25d996d8 Binary files /dev/null and b/publications/media/realistic_defocus_teaser.png differ diff --git a/publications/media/rotating_3d.png b/publications/media/rotating_3d.png new file mode 100644 index 00000000..170ffb4c Binary files /dev/null and b/publications/media/rotating_3d.png differ diff --git a/publications/media/see_through_hoe.png b/publications/media/see_through_hoe.png new file mode 100644 index 00000000..5fbd6fce Binary files /dev/null and b/publications/media/see_through_hoe.png differ diff --git a/publications/media/sensicut.png b/publications/media/sensicut.png new file mode 100644 index 00000000..d745e7cc Binary files /dev/null and b/publications/media/sensicut.png differ diff --git a/publications/media/single_pixel_gaze.png b/publications/media/single_pixel_gaze.png new file mode 100644 index 00000000..9dad5b64 Binary files /dev/null and b/publications/media/single_pixel_gaze.png differ diff --git a/publications/media/sound_to_light.png b/publications/media/sound_to_light.png new file mode 100644 index 00000000..7cfb7ee0 Binary files /dev/null and b/publications/media/sound_to_light.png differ diff --git a/publications/media/spec_track_different_attributes.png b/publications/media/spec_track_different_attributes.png new file mode 100644 index 00000000..3acd843d Binary files /dev/null and b/publications/media/spec_track_different_attributes.png differ diff --git a/publications/media/spec_track_lightpath.png b/publications/media/spec_track_lightpath.png new file mode 100644 index 00000000..551ca312 Binary files /dev/null and b/publications/media/spec_track_lightpath.png differ diff --git a/publications/media/spec_track_nn.png b/publications/media/spec_track_nn.png new file mode 100644 index 00000000..2446a4d1 Binary files /dev/null and b/publications/media/spec_track_nn.png differ diff --git a/publications/media/spec_track_overlapping.png b/publications/media/spec_track_overlapping.png new file mode 100644 index 00000000..9efed6f7 Binary files /dev/null and b/publications/media/spec_track_overlapping.png differ diff --git a/publications/media/spec_track_testbed.png b/publications/media/spec_track_testbed.png new file mode 100644 index 00000000..1ff13124 Binary files /dev/null and b/publications/media/spec_track_testbed.png differ diff --git a/publications/media/super_stereoscopy.png b/publications/media/super_stereoscopy.png new file mode 100644 index 00000000..f88805c8 Binary files /dev/null and b/publications/media/super_stereoscopy.png differ diff --git a/publications/media/telelife.png b/publications/media/telelife.png new file mode 100644 index 00000000..db50dd60 Binary files /dev/null and b/publications/media/telelife.png differ diff --git a/publications/media/unrolled_primal_dual.png b/publications/media/unrolled_primal_dual.png new file mode 100644 index 00000000..9557ef07 Binary files /dev/null and b/publications/media/unrolled_primal_dual.png differ diff --git a/publications/media/varifocal_membrane.png b/publications/media/varifocal_membrane.png new file mode 100644 index 00000000..830c9c40 Binary files /dev/null and b/publications/media/varifocal_membrane.png differ diff --git a/publications/multi_color/index.html b/publications/multi_color/index.html new file mode 100644 index 00000000..afe76154 --- /dev/null +++ b/publications/multi_color/index.html @@ -0,0 +1,2797 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Multi-color Holograms Improve Brightness in Holographic Displays - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Multi-color Holograms Improve Brightness in Holographic Displays

+

People

+ + + + + + + + + + + + + + + + + +
/      /      /      /      /     

Koray Kavaklı1

Liang Shi2

Hakan Urey1

Wojciech Matusik2

Kaan Akşit3

+

+1Koç University, +2Massachusetts Institute of Technology, +3University College London +

+

SIGGRAPH Asia 2023

+ +

Resources

+

Manuscript + Supplementary + Code + Project video

+
+ Bibtex +
@inproceedings{kavakli2023multicolor,
+  title={Multi-color Holograms Improve Brightness in Holographic Displays},
+  author={Kavaklı, Koray and Shi, Liang and Urey, Hakan and Matusik, Wojciech and Akşit, Kaan},
+  booktitle = {SIGGRAPH Asia 2023 Conference Papers},
+  articleno = {20},
+  numpages = {11},
+  keywords = {Brightness, Computer-generated holography, Holographic displays},
+  location = {Sydney, NSW, Australia},
+  series = {SA '23},
+  month={December},
+  year={2023},
+  doi={https://doi.org/10.1145/3610548.3618135}
+}
+
+
+

Video

+ + +

Presentation

+ + +

Abstract

+

Holographic displays generate Three-Dimensional (3D) images by displaying single-color holograms time-sequentially, each lit by a single-color light source. +However, representing each color one by one limits brightness in holographic displays. +This paper introduces a new driving scheme for realizing brighter images in holographic displays. +Unlike the conventional driving scheme, our method utilizes three light sources to illuminate each displayed hologram simultaneously at various intensity levels. +In this way, our method reconstructs a multiplanar three-dimensional target scene using consecutive multi-color holograms and persistence of vision. +We co-optimize multi-color holograms and required intensity levels from each light source using a gradient descent-based optimizer with a combination of application-specific loss terms. +We experimentally demonstrate that our method can increase the intensity levels in holographic displays up to three times, reaching a broader range and unlocking new potentials for perceptual realism in holographic displays.

+

Results

+

Conventional holographic displays use a single Spatial Light Modulator (SLM) and reconstruct full-color images by time-sequentially displaying single-color holograms, each dedicated to a color channel. +When holographic displays reconstruct scenes with brightness levels beyond the peak intensity of their corresponding color channels, the result could often lead to darker images than the intended levels and produce visual distortions or color mismatches (see conventional case below figure). +In such cases, the dynamic range of the target is typically limited to the peak intensity of the light source, which is often not enough to deliver the desired visual experience.

+
+

Image title

+
+

Without altering hardware, we argue that holographic displays could dedicate extra time to each color channel to improve their perceived brightness levels, as demonstrated in below figure. +Our work aims to improve holographic displays' dynamic range by more effectively but aggressively utilizing color primaries and holograms. +For this purpose, we introduce a new Computer-Generated Holography (CGH) driving scheme. +In this scheme, multi-color holograms simultaneously operate over multiple wavelengths of light and provide 3D multiplanar images. +We calculate multi-color holograms using a Gradient Descent (GD) based solver guided by a combination of application-specific loss functions. +In the meantime, we co-optimize the brightness levels required to illuminate each multi-color hologram. +We experimentally verify our findings using a holographic display prototype by showing reconstructions of brighter scenes with a broader dynamic range in an artifact-free and color-accurate manner.

+
+

Image title

+
+

Below figure shows photographs from our holographic display for conventional and our schemes (more sample results in our supplementary). +For such a scene, our method can safely support up to \(\times1.8\) peak brightness without causing significant image distortions or artifacts. +On the other hand, the conventional hologram fails to support peak brightness higher than \(\times1.0\). +Beyond \(\times1.8\) peak brightness levels, images are typically heavily dominated by noise in the conventional case.

+
+

Image title

+
+

In contrast, our method loses color integrity slightly or generates noises similar to the conventional case's \(\times1.2\) peak brightness case.

+
+

Image title

+
+

Our method can also support three-dimensional multiplanar images.

+
+

Image title

+
+

Relevant research works

+

Here are relevant research works from the authors:

+ + +

Here are links related to our project such as videos, articles or podcasts:

+ +

Outreach

+

We host a Slack group with more than 250 members. +This Slack group focuses on the topics of rendering, perception, displays and cameras. +The group is open to public and you can become a member by following this link.

+

Contact Us

+
+

Warning

+

Please reach us through email to provide your feedback and comments.

+
+

Acknowledgements

+
+ + +
+

Kaan Akşit is supported by the Royal Society's RGS\R2\212229 - Research Grants 2021 Round 2 in building the hardware prototype. Kaan Akşit is also supported by Meta Reality Labs inclusive rendering initiative 2022. Liang Shi is supported by Meta Research PhD fellowship (2021-2023). +
+
+
+
+
+
+

+
+ + +
+

Hakan Urey is supported by the European Innovation Council’s HORIZON-EIC-2021-TRANSITION-CHALLENGES program Grant Number 101057672 and Tübitak’s 2247-A National Lead Researchers Program, Project Number 120C145. +
+
+
+
+
+
+

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/publications/realistic_defocus_cgh/index.html b/publications/realistic_defocus_cgh/index.html new file mode 100644 index 00000000..c79810ac --- /dev/null +++ b/publications/realistic_defocus_cgh/index.html @@ -0,0 +1,2740 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Realistic Defocus for Multiplane Computer-Generated Holography - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Realistic Defocus Blur for Multiplane Computer-Generated Holography

+

People

+ + + + + + + + + + + + + + + +
/      /      /      /     

Koray Kavaklı1

Yuta Itoh2

Hakan Ürey1

Kaan Akşit3

+

1Koç University, 2The University of Tokyo 3University College London

+

IEEE VR 2023

+ +

Resources

+

Manuscript + Project video + Code

+
+ Bibtex +
@misc{kavakli2022realisticdefocus,
+  doi = {10.48550/ARXIV.2205.07030},
+  url = {https://arxiv.org/abs/2205.07030},
+  author = {Kavaklı, Koray and Itoh, Yuta and Urey, Hakan and Akşit, Kaan},
+  keywords = {Computer Vision and Pattern Recognition (cs.CV), Graphics (cs.GR), FOS: Computer and information sciences, FOS: Computer and information sciences, I.3.3},
+  title = {Realistic Defocus Blur for Multiplane Computer-Generated Holography},
+  publisher = {arXiv},
+  year = {2022},
+  copyright = {Creative Commons Attribution Non Commercial No Derivatives 4.0 International}
+}
+
+
+

Presentation

+

+ +

+ +

Video

+

+ +

+ +

Abstract

+

This paper introduces a new multiplane CGH computation method to reconstruct artefact-free high-quality holograms with natural-looking defocus blur. +Our method introduces a new targeting scheme and a new loss function. +While the targeting scheme accounts for defocused parts of the scene at each depth plane, the new loss function analyzes focused and defocused parts separately in reconstructed images. +Our method support phase-only CGH calculations using various iterative (e.g., Gerchberg-Saxton, Gradient Descent) and non-iterative (e.g., Double Phase) CGH techniques. +We achieve our best image quality using a modified gradient descent-based optimization recipe where we introduce a constraint inspired by the double phase method. +We validate our method experimentally using our proof-of-concept holographic display, comparing various algorithms, including multi-depth scenes with sparse and dense contents.

+

Results

+

In this work, we demonstrate a new rendering pipeline for multiplane Computer-Generated Holography that can provide near-accurate defocus blur.

+
+

Image title

+
+

Our results suggest that our work can help alliviate unintended artifacts found on existing rendering pipelines for Computer-Generated Holography.

+
+

Image title

+
+

We capture these results using our in-house baked holographic display prototype.

+
+

Image title

+
+

Our technique is suitable for Augmented Reality applications (e.g., near-eye displays, heads-up displays). +Here we provide photographs of virtual images generated by our computer-generated holography pipeline overlayed on an actual scene. +Note that each image is focused at a different depth level.

+
+

Image title

+
+

Here we show a photograph of our holographic display prototype with Augmented Reality support.

+
+

Image title

+
+

Relevant works from our group

+

Here are relevant research works from our group:

+ +

Contact

+

Have any queries, questions, suggestions or comments, contact us via kaanaksit@kaanaksit.com.

+

Acknowledgements

+

We also thank +Erdem Ulusoy and Güneş Aydındoğan for discussions in the early phases of the project; +Tim Weyrich and Makoto Yamada for dedicating GPU resources in various experimentation phases; +David Walton for his feedback on the manuscript;

+
+ +
+

Yuta Itoh is supported by the JST FOREST Program Grant Number JPMJPR17J2 and JSPS KAKENHI Grant Number JP20H05958 and JP21K19788. +
+
+
+
+
+
+

+
+ +
+

Hakan Urey is supported by the European Innovation Council's HORIZON-EIC-2021-TRANSITION-CHALLENGES program Grant Number 101057672. +
+
+
+
+
+
+

+
+ +
+

Kaan Akşit is supported by the Royal Society's RGS\R2\212229 - Research Grants 2021 Round 2 in building the hardware prototype. +
+
+
+
+
+
+

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/publications/spec_track/index.html b/publications/spec_track/index.html new file mode 100644 index 00000000..a9c5012c --- /dev/null +++ b/publications/spec_track/index.html @@ -0,0 +1,2821 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SpecTrack: Learned Multi-Rotation Tracaking via Speckle Imaging - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging

+

People

+ + + + + + + + + + + + + + + +
/         /      /      /     

Ziyang Chen1

Doğa Doğan2

Josef Spjut3

Kaan Akşit1

+

+1University College London, +2Adobe Research, +3NVIDIA +

+

SIGGRAPH Asia 2024 Poster

+ +

Resources

+

Manuscript + Poster + Supplementary + Code

+ +
+ Bibtex +
@inproceedings{chen2024spectrack,
+  author = {Ziyang Chen and Mustafa Dogan and Josef Spjut and Kaan Ak{\c{s}}it},
+  title = {SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging},
+  booktitle = {SIGGRAPH Asia 2024 Posters (SA Posters '24)},
+  year = {2024},
+  location = {Tokyo, Japan},
+  publisher = {ACM},
+  address = {New York, NY, USA},
+  pages = {2},
+  doi = {10.1145/3681756.3697875},
+  url = {https://doi.org/10.1145/3681756.3697875},
+  month = {December 03--06}
+}
+
+
+

Video

+ + + + + +

Abstract

+

Precision pose detection is increasingly demanded in fields such as personal fabrication, Virtual Reality (VR), and robotics due to its critical role in ensuring accurate positioning information. +However, conventional vision-based systems used in these systems often struggle with achieving high precision and accuracy, particularly when dealing with complex environments or fast-moving objects. +To address these limitations, we investigate Laser Speckle Imaging (LSI), an emerging optical tracking method that offers promising potential for improving pose estimation accuracy. +Specifically, our proposed LSI-Based Tracking leverages the captures from a lensless camera and a retro-reflector marker with a coded aperture to achieve multi-axis rotational pose estimation with high precision. Our extensive trials using our in-house built testbed have shown that SpecTrack achieves an accuracy of \(0.31^\circ\) (std=\(0.43^\circ\)) +, significantly outperforming state-of-the-art approaches and improving accuracy up to \(200\%\).

+

Proposed Method

+

We aim to remotely obtain multiple absolute rotation angles from a coded retroreflective marker by utilizing the overlapping patterns generated by the multi-wavelength laser. +the laser beam from the source (\(S\)) hits an arbitrary point (\(P\)) and diffracts at slightly different angles due to the different wavelengths (\(\lambda_0\) and \(\lambda_1\)). +This phenomenon shows a correlation between the surface rotation angle and the captured speckle image.

+

The first image below shows the structure of the proposed sensor, which contains a bare sensor, laser source and beam splitter (\(10~mm \times 10~mm\)). +The beam splitter is placed in front of the bare imaging sensor to ensure that most of the light reflected from the marker covers a large area of the sensor. +Additionally, this co-axial optical layout eliminates the light source's lateral offsets, simplifying the speckle behavior in the rotations.

+
+

Image title

+
+

We can tell from the image below that the captured image formed overlappings in when the surface rotates \(10^\circ\) in the y-axis.

+
+

Image title

+
+

Using the Fast Fourier Transform (FFT) to get the magnitudes of speckle image from various poses (y-axis roation, z-axis rotations, and z-axis displacements) or coded surface reveals interpretable patterns:

+
+

Image title

+
+

We employ a shallow neural network to handle the non-linearities of physical aspects and estimate the absolute rotation angles from speckle patterns.

+
+

Image title

+
+

Firstly, we preprocess the captured monochrome speckle frames \(I_{speckle}\) (\(640\times360\)~px) by transforming them into the frequency domain \(\mathcal{F}(I_{speckle})\) using the FFT. +Then the frames are central cropped and concatenated into a tensor \([\mathcal{F}(I_{\text{speckle}, i})]_{i=1}^5\) with a shape of \((5,320,180)\). +From our practical experiences, this concatenated frame tensor provides more robust results when the marker is in motion because it incorporates temporal information. +After that, we feed the samples into three convolutional blocks, each comprising a 2D convolution layer, batch normalization, ReLU activation function, and max pooling. +After the convolution, the sample is flattened and inputted into a Multi Layer Perceptron (MLP) containing six linear layers each layer is followed by a batch normalization and ReLu activation function. +The final layer of MLP outputs the rotation angles \(\theta_y\), \(\theta_z\) and the arbitrary depth \(d_z\).

+

Since capturing samples in all six Degrees Of Freedom simultaneously is physically difficult, we focus on capturing the speckle imaging as the marker rotates in the z-axis and y-axis. +We add controlled close-loop motors to a rotary stage to automatically capture the speckle images when the marker is rotated in various axes, as shown below. +During the data collection, we control the motors to rotate the marker from \(0^\circ\) to \(40^\circ\) on the y-axis and \(0^\circ\) to \(90^\circ\) the z-axis. +Besides the rotations, we repeat the experiment in different depths from \(16~cm\) to \(28~cm\).

+
+

Image title

+
+

Conclusions

+

Baseline

+

We compare our work with the state-of-the-art from Gibson et al. +However, we lack direct access to accurate measurements, such as the wavelengths emitted by the off-the-shelf laser diode. +We subsequently employed a gradient descent-based optimization with a captured training set to get the unknown variables: dominant wavelength \(\lambda_0\), wavelength differences \(\Delta \lambda\), where \(\Delta \lambda = \lambda_0 - \lambda_1 \ll \lambda_0\), and light source position \(S\) in the 3D space. +Following this, we tested the analytical model proposed by the authors with the test set that contains the speckle images captured when the marker rotates from \(0^\circ\) to \(40^\circ\) in the y-axis. +The baseline result indicates the Mean Absolute Error (MAE) of \(0.60^\circ\) (\(std=0.35^\circ\)) on our testbed.

+

SpecTrack achieved a lower MAE and std: \(\mathbf{0.31^\circ}\), \(\mathbf{0.44^\circ}\), respectively. +At the same time, the model can estimate the z-axis rotations rotation with a MAE \(\mathbf{0.52^\circ}\) (\(std=\mathbf{0.36^\circ}\)). +Furthermore, the model adapts to varying depths, showing an accuracy of \(0.15~cm\).

+

Future work

+

Testing and optimizing the system in real-world environments, considering varying lighting, distances, and object motions, is crucial for successful operation in various applications including VR, AR, and robotics.

+

Relevant research works

+

Here are relevant research works from the authors:

+ + + +

Outreach

+

We host a Slack group with more than 250 members. +This Slack group focuses on the topics of rendering, perception, displays and cameras. +The group is open to public and you can become a member by following this link.

+

Contact Us

+
+

Warning

+

Please reach us through email to provide your feedback and comments.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/references.bib b/references.bib new file mode 100644 index 00000000..ade3b644 --- /dev/null +++ b/references.bib @@ -0,0 +1,1182 @@ +--- +--- + +@string{aps = {American Physical Society,}} + + +@article{cennini2010heart, + abbr={Optics Express}, + bibtex_show={true}, + title="{Heart rate monitoring via remote photoplethysmography with motion artifacts reduction}", + author={Cennini, Giovanni and Arguel, Jeremie and Akşit, Kaan and van Leest, Arno}, + journal={Optics Express}, + month=Mar, + volume={18}, + number={5}, + pages={4867--4875}, + year={2010}, + publisher={Optical Society of America}, + preview={heart_rate.jpg}, + video={https://youtu.be/5X1clsjqQnw}, + selected={true}, + doi={https://doi.org/10.1364/OE.18.004867} +} + + +@inproceedings{baghsiahi201148, + abbr={SID}, + bibtex_show={true}, + title="{48.4: Beam Forming for a Laser Based Auto-stereoscopic Multi-Viewer Display}", + author={Baghsiahi, H. and Selviah, D.R. and Willman, E. and Fern{\'a}ndez, A. and Day, S.E. and Akşit, Kaan and Ölçer, S. and Mostafazadeh, A. and Erden, E. and Kishore, V.C. and others}, + year={2011}, + organization={SID}, + doi={https://doi.org/10.1889/1.3621421} +} + + +@inproceedings{aksit2011light, + abbr={IEEE 3DTV}, + bibtex_show={true}, + title="{Light engine and optics for HELIUM3D auto-stereoscopic laser scanning display}", + author={Akşit, Kaan and Ölçer, S. and Erden, E. and Kishore, VC and Urey, H. and Willman, E. and Baghsiahi, H. and Day, S.E. and Selviah, D.R. and +An{\'\i}bal Fern{\'a}ndez, F. and others}, + booktitle={3DTV Conference: The True Vision-Capture, Transmission and Display of 3D Video (3DTV-CON), Turkey, 2011}, + pages={1--4}, + year={2011}, + organization={IEEE}, + doi={https://doi.org/10.1109/3DTV.2011.5877226} +} + + +@inproceedings{urey2012novel, + abbr={Optica Con.}, + bibtex_show={true}, + title={Novel 3D displays using micro-optics and MEMS}, + author={Urey, H. and Akşit, Kaan and Eldes, O.}, + booktitle={International Conference on Fibre Optics and Photonics}, + year={2012}, + organization={Optical Society of America}, + doi={https://doi.org/10.1364/PHOTONICS.2012.M2A.2} +} + + +@article{aksit2012portable, + abbr={JoDT}, + bibtex_show={true}, + title="{Portable 3D Laser Projector using Mixed Polarization Technique}", + author={Akşit, Kaan and Eldes, O. and Viswanathen, S. and Freeman, M. and Urey, H.}, + journal={Journal of Display Technology}, + volume={8}, + month=Sep, + pages={582--589}, + year={2012}, + publisher={Optical Society of America}, + award={Best 3D product award of International 3D Society (4th year)}, + video={https://youtu.be/mZXOTRDEyg0}, + preview={portable_3d.png}, + selected={true}, + doi={https://doi.org/10.1109/JDT.2012.2205664}, + pdf={AksitEtAl_JoDT2012_Portable_3d_laser_projector_using_mixed_polarization_technique.pdf}, +} + + +@inproceedings{akcsit2012multiple, + abbr={IMID}, + bibtex_show={true}, + title={Multiple body tracking for interactive mobile projectors}, + author={Akşit, Kaan and Eldes, O and Urey, Hakan}, + booktitle={IMID2012 conference, 2012, SID/KIDS}, + year={2012}, +} + + +@inproceedings{urey2013mems, + abbr={IEEE MEMS}, + bibtex_show={true}, + title={MEMS scanners and emerging 3D and interactive augmented reality display applications}, + author={Urey, H and Holmstrom, S and Baran, U and Akşit, Kaan and Hedili, MK and Eldes, O}, + booktitle={2013 Transducers \& Eurosensors XXVII: The 17th International Conference on Solid-State Sensors, Actuators and Microsystems (TRANSDUCERS \& EUROSENSORS XXVII)}, + pages={2485--2488}, + year={2013}, + organization={IEEE}, + doi={https://doi.org/10.1109/Transducers.2013.6627310} +} + + +@article{akcsit2013dynamic, + abbr={Optics Express}, + bibtex_show={true}, + title={Dynamic exit pupil trackers for autostereoscopic displays}, + author={Akşit, Kaan and Baghsiahi, Hadi and Surman, Phil and Ӧl{\c{c}}er, Selim and Willman, Eero and Selviah, David R and Day, Sally and Urey, Hakan}, + journal={Optics express}, + month=Jun, + volume={21}, + number={12}, + pages={14331--14341}, + year={2013}, + publisher={Optica Publishing Group}, + video={https://youtu.be/Oh1xDgdbvYU}, + selected={true}, + pdf={AksitEtAl_OpticsExpress2013_Dynamic_exit_pupil_trackers_for_autostereoscopic_displays.pdf}, + preview={dynamic_exit_pupil.png}, + doi={https://doi.org/10.1364/OE.21.014331} +} + + +@inproceedings{aksit2013paper, + abbr={SID}, + bibtex_show={true}, + title={Paper No 15.1: Augmented Reality and 3D Displays Using Pico-Projectors}, + author={Akşit, Kaan and Eldes, Osman and Hedili, M Kivanc and Urey, Hakan}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={44}, + pages={243--246}, + year={2013}, + organization={Wiley Online Library}, + doi={https://doi.org/10.1002/sdtp.74} +} + + +@inproceedings{surman2013paper, + abbr={SID}, + bibtex_show={true}, + title={Paper No 15.2: Head-Tracked Retroreflecting 3D Display}, + author={Surman, Phil and Day, Sally and Boby, Bonny and Chen, Hao and Urey, Hakan and Akşit, Kaan}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={44}, + pages={247--250}, + year={2013}, + organization={Wiley Online Library}, + doi={https://doi.org/10.1002/sdtp.73} +} + + +@inproceedings{eldes2013paper, + abbr={SID}, + bibtex_show={true}, + title={Paper No 17.4: Auto-Stereoscopic Projection Display Using Rotating Screen}, + author={Eldes, Osman and Akşit, Kaan and Urey, Hakan}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={44}, + pages={275--277}, + year={2013}, + organization={Wiley Online Library}, + doi={https://doi.org/10.1002/sdtp.79} +} + + +@inproceedings{surman2013single, + abbr={IEEE 3DTV}, + bibtex_show={true}, + title={Single and multi-user head tracked glasses-free 3D displays}, + author={Surman, Phil and Day, Sally and Akşit, Kaan and Urey, Hakan and Benjamin, Joshua and Jain, Kuber and Chen, Hao}, + booktitle={2013 3DTV Vision Beyond Depth (3DTV-CON)}, + pages={1--4}, + year={2013}, + organization={IEEE}, + doi={https://doi.org/10.1109/3DTV.2013.6676654} +} + + +@article{eldes2013multi, + abbr={Optics Express}, + bibtex_show={true}, + title={Multi-view autostereoscopic projection display using rotating screen}, + author={Eldes, Osman and Akşit, Kaan and Urey, Hakan}, + journal={Optics Express}, + volume={21}, + month=Nov, + number={23}, + pages={29043--29054}, + year={2013}, + publisher={Optica Publishing Group}, + preview={rotating_3d.png}, + award={Spotlight on Optics at Optica}, + selected={true}, + video={http://youtu.be/853-4knJ2Nc}, + pdf={EldesEtAl_OpticsExpress2013_Multi_view_autosteroscopic_projection_display_using_rotating_screen.pdf}, + doi={https://doi.org/10.1364/OE.21.029043} +} + + +@inproceedings{schmid2014sound, + abbr={IEEE Globecom}, + bibtex_show={true}, + title={From sound to sight: Using audio processing to enable visible light communication}, + author={Schmid, Stefan and Schwyn, Daniel and Akşit, Kaan and Corbellini, Giorgio and Gross, Thomas R and Mangold, Stefan}, + booktitle={2014 IEEE Globecom Workshops (GC Wkshps)}, + pages={518--523}, + year={2014}, + organization={IEEE}, + pdf={SchmidEtAl_WoOWC2014_From_sound_to_sight_using_audio_processing_to_enable_visible_light_communication.pdf}, + preview={sound_to_light.png}, + selected={true}, + doi={https://doi.org/10.1109/GLOCOMW.2014.7063484} +} + + +@inproceedings{akcsit201456, + abbr={SID}, + bibtex_show={true}, + title={56.6 L: Late-News Paper: Modular Multi-Projection Multi-View Autostereoscopic Display using MEMS Laser Projectors}, + author={Akşit, Kaan and Ölçer, Selim and Urey, Hakan}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={45}, + number={1}, + pages={828--831}, + year={2014}, + doi={https://doi.org/10.1002/j.2168-0159.2014.tb00218.x}, + organization={Wiley Online Library} +} + + +@inproceedings{akcsit2014p, + abbr={SID}, + bibtex_show={true}, + title={P-187L: Late-News Poster: Improved 3D with Super Stereoscopy Technique}, + author={Akşit, Kaan and Niaki, Amir Hossein Ghanbari and Urey, Hakan}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={45}, + month=Dec, + number={1}, + pages={1067--1069}, + year={2014}, + doi={https://doi.org/10.1002/j.2168-0159.2014.tb00277.x}, + organization={Wiley Online Library} +} + + +@article{akcsit2014next, + abbr={PhD thesis}, + bibtex_show={true}, + title={Next Generation 3D Display Applications using Laser Scanning Pico Projectors}, + author={Akşit, Kaan}, + journal={Available at SSRN 3698405}, + year={2014}, + doi={https://dx.doi.org/10.2139/ssrn.3698405} +} + + +@inproceedings{akcsit2014super3d, + abbr={IEEE 3DTV}, + bibtex_show={true}, + title={Super stereoscopy 3D glasses for more realistic 3D vision}, + author={Akşit, Kaan and Niaki, Amir Hossein Ghanbari and Eldes, Osman and Urey, Hakan}, + booktitle={2014 3DTV-Conference: The True Vision-Capture, Transmission and Display of 3D Video (3DTV-CON)}, + pages={1--3}, + year={2014}, + organization={IEEE} +} + + +@article{corbellini2014connecting, + abbr={IEEE Comm}, + bibtex_show={true}, + title={Connecting networks of toys and smartphones with visible light communication}, + author={Corbellini, Giorgio and Akşit, Kaan and Schmid, Stefan and Mangold, Stefan and Gross, Thomas R}, + journal={IEEE communications magazine}, + volume={52}, + number={7}, + pages={72--78}, + month=Jul, + year={2014}, + publisher={IEEE}, + video={https://youtu.be/10lv_FwlqMo}, + selected={true}, + preview={connect_toys.png}, + pdf={CorbelliniEtAl_IEEECommMag2014_Connecting_networks_of_toys_and_smartphones_with_visible_light_communication.pdf}, + doi={https://doi.org/10.1109/MCOM.2014.6852086} +} + + +@inproceedings{akcsit2014head, + abbr={ACM CACET}, + bibtex_show={true}, + title={Head-worn mixed reality projection display application}, + author={Akşit, Kaan and Kade, Daniel and {\"O}zcan, O{\u{g}}uzhan and Urey, Hakan}, + booktitle={Proceedings of the 11th Conference on Advances in Computer Entertainment Technology}, + pages={1--9}, + month=Nov, + year={2014}, + preview={head_worn_projector.png}, + pdf={AksitEtAl_ACE2014_Head_worn_mixed_reality_projection_display_application.pdf}, + selected={true}, + video={https://youtu.be/7F3Z-4UZWUc}, + doi={https://doi.org/10.1145/2663806.2663826} +} + + +@article{akcsit2014super, + abbr={Optics Letters}, + bibtex_show={true}, + title={Super stereoscopy technique for comfortable and realistic 3D displays}, + author={Akşit, Kaan and Niaki, Amir Hossein Ghanbari and Ulusoy, Erdem and Urey, Hakan}, + journal={Optics letters}, + volume={39}, + number={24}, + pages={6903--6906}, + year={2014}, + publisher={Optica Publishing Group}, + selected={true}, + preview={super_stereoscopy.png}, + pdf={AksitEtAl_OpticsLetters2014_Super_stereoscopy_technique_for_comfortable_and_realistic_3d_displays.pdf}, + doi={https://doi.org/10.1364/OL.39.006903} +} + + +@article{surman2015head, + abbr={SID}, + bibtex_show={true}, + title={Head tracked retroreflecting 3D display}, + author={Surman, Phil and Day, Sally and Liu, Xianzi and Benjamin, Joshua and Urey, Hakan and Akşit, Kaan}, + journal={Journal of the Society for Information Display}, + volume={23}, + number={2}, + pages={56--68}, + year={2015}, + publisher={Wiley Online Library}, + doi={https://doi.org/10.1002/jsid.295} +} + + +@article{akcsit2015slim, + abbr={Applied Optics}, + bibtex_show={true}, + title={Slim near-eye display using pinhole aperture arrays}, + author={Akşit, Kaan and Kautz, Jan and Luebke, David}, + journal={Applied optics}, + volume={54}, + number={11}, + pages={3422--3427}, + year={2015}, + publisher={Optica Publishing Group}, + video={https://youtu.be/UYGa6n_0aUs}, + selected={true}, + pdf={AksitEtAl_AppliedOptics2015_Slim_near_eye_display_using_pinhole_aperture_arrays.pdf}, + preview={pinhole.png}, + doi={https://doi.org/10.1364/AO.54.003422} +} + + +@article{kade2015head, + bibtex_show={true}, + title={Head-mounted mixed reality projection display for games production and entertainment}, + author={Kade, Daniel and Akşit, Kaan and Urey, Hakan and {\"O}zcan, O{\u{g}}uzhan}, + journal={Personal and Ubiquitous Computing}, + volume={19}, + number={3}, + pages={509--521}, + year={2015}, + publisher={Springer}, + doi={https://doi.org/10.1007/s00779-015-0847-y} +} + + +@article{dunn2017wide, + abbr={IEEE VR}, + bibtex_show={true}, + title={Wide field of view varifocal near-eye display using see-through deformable membrane mirrors}, + author={Dunn, David and Tippets, Cary and Torell, Kent and Kellnhofer, Petr and Akşit, Kaan and Didyk, Piotr and Myszkowski, Karol and Luebke, David and Fuchs, Henry}, + journal={IEEE transactions on visualization and computer graphics}, + volume={23}, + number={4}, + pages={1322--1331}, + year={2017}, + publisher={IEEE}, + pdf={DunnEtAl_IEEEVR2017_Wide_Field_of_View_Varifocal_Near_Eye_Display_Using_See_Through_Deformable_Membrane_Mirrors.pdf}, + doi={https://doi.org/10.1109/TVCG.2017.2657058}, + video={https://www.youtube.com/watch?v=aRZrtZfVKv0}, + award={Best Paper Award}, + selected={true}, + preview={varifocal_membrane.png}, +} + + +@inproceedings{aksit2017computational, + abbr={Optica 3DIAD}, + bibtex_show={true}, + title={Computational Displays for Virtual Reality and Augmented Reality Applications}, + author={Akşit, Kaan}, + booktitle={3D Image Acquisition and Display: Technology, Perception and Applications}, + pages={DTu4F--1}, + year={2017}, + organization={Optica Publishing Group}, + doi={https://doi.org/10.1364/3D.2017.DTu4F.1} +} + + +@incollection{akcsit2017varifocal, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Varifocal virtuality: a novel optical layout for near-eye display}, + author={Akşit, Kaan and Lopes, Ward and Kim, Jonghyun and Spjut, Josef and Patney, Anjul and Shirley, Peter and Luebke, David and Cholewiak, Steven A and Srinivasan, Pratul and Ng, Ren and others}, + booktitle={ACM SIGGRAPH 2017 Emerging Technologies}, + pages={1--2}, + year={2017}, + doi={https://doi.org/10.1145/3084822.3084829} +} + + +@incollection{dunn2017membrane, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Membrane AR: varifocal, wide field of view augmented reality display from deformable membranes}, + author={Dunn, David and Tippets, Cary and Torell, Kent and Fuchs, Henry and Kellnhofer, Petr and Myszkowski, Karol and Didyk, Piotr and Akşit, Kaan and Luebke, David}, + booktitle={ACM SIGGRAPH 2017 Emerging Technologies}, + pages={1--2}, + year={2017}, + award={Emerging Technologies DCEXPO Special Prize}, + selected={true}, + preview={varifocal_membrane.png}, + pdf={DunnEtAl_SIGGRAPH2017_Membrane_ar_varifocal_wide_field_of_view_augmented_reality_display_From_deformable_membranes.pdf}, + doi={https://doi.org/10.1145/3084822.3084846} +} + + +@article{akcsit2017near, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Near-eye varifocal augmented reality display using see-through screens}, + author={Akşit, Kaan and Lopes, Ward and Kim, Jonghyun and Shirley, Peter and Luebke, David}, + journal={ACM Transactions on Graphics (TOG)}, + volume={36}, + number={6}, + pages={1--13}, + year={2017}, + publisher={ACM New York, NY, USA}, + video={https://youtu.be/dN-8X0lUig4}, + selected={true}, + slides={https://docs.google.com/presentation/d/1MPwPV6HG_o83jRunhNnCrNh0o9KMjIGMvg66tXKzl4s/edit?usp=sharing}, + pdf={AksitEtAl_SiggraphAsia2017_Near_eye_varifocal_augmented_reality_display_using_see_through_screens.pdf}, + preview={see_through_hoe.png}, + doi={https://doi.org/10.1145/3130800.3130892} +} + +@inproceedings{koulieris2018cutting, + abbr={IEEE VR}, + bibtex_show={true}, + title={Cutting-edge VR/AR display technologies (gaze-, accommodation-, motion-aware and HDR-enabled)}, + author={Koulieris, George-Alex and Akşit, Kaan and Richardt, Christian and Mantiuk, Rafa{\l} and Mania, Katerina}, + booktitle={IEEE VR 2018-25th IEEE Conference on Virtual Reality and 3D User Interfaces}, + year={2018}, + web={https://vrdisplays.github.io/ieeevr2018/}, +} + + +@incollection{koulieris2018cutting_sigasia, + title={Cutting-edge VR/AR display technologies (gaze-, accommodation-, motion-aware and HDR-enabled)}, + author={Koulieris, George-Alex and Ak{\c{s}}it, Kaan and Richardt, Christian and Mantiuk, Rafa{\l}}, + booktitle={SIGGRAPH Asia 2018 Courses}, + pages={1--341}, + year={2018}, + doi={https://doi.org/10.1145/3277644.3277771} +} + + + +@inproceedings{dunn201810, + abbr={SID}, + bibtex_show={true}, + title={10-1: Towards Varifocal Augmented Reality Displays using Deformable Beamsplitter Membranes}, + author={Dunn, David and Chakravarthula, Praneeth and Dong, Qian and Akşit, Kaan and Fuchs, Henry}, + booktitle={SID Symposium Digest of Technical Papers}, + volume={49}, + number={1}, + pages={92--95}, + year={2018}, + organization={Wiley Online Library}, + doi={https://doi.org/10.1002/sdtp.12490}, +} + +@misc{cakmak2018electro, + abbr={US Patent}, + bibtex_show={true}, + title={Electro-stimulation Device}, + author={Cakmak, Yusuf Ozgur and Urey, Hakan and Ölçer, Selim and Akşit, Kaan}, + year={2018}, + month=dec # "~2", + publisher={Google Patents}, + note={US Patent 9855426B2} + +} + +@incollection{rathinavel2018steerable, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Steerable application-adaptive near eye displays}, + author={Rathinavel, Kishore and Chakravarthula, Praneeth and Akşit, Kaan and Spjut, Josef and Boudaoud, Ben and Whitted, Turner and Luebke, David and Fuchs, Henry}, + booktitle={ACM SIGGRAPH 2018 Emerging Technologies}, + pages={1--2}, + year={2018}, + selected={true}, + preview={manufacturing_application_driven.png}, + award={Emerging Technology Best in Show Award}, + doi={https://doi.org/10.1145/3214907.3214911} +} + + +@article{chakravarthula2018focusar, + abbr={IEEE ISMAR}, + bibtex_show={true}, + title={Focusar: Auto-focus augmented reality eyeglasses for both real world and virtual imagery}, + author={Chakravarthula, Praneeth and Dunn, David and Akşit, Kaan and Fuchs, Henry}, + journal={IEEE transactions on visualization and computer graphics}, + volume={24}, + number={11}, + pages={2906--2916}, + year={2018}, + publisher={IEEE}, + pdf={ChakravarthulaEtAl_IEEEISMAR18_Focusar_auto_focus_augmented_reality_glasses_for_both_real_world_and_virtual_imagery.pdf}, + award={Best paper award}, + selected={true}, + preview={focusar.png}, + video={https://youtu.be/6rC_XGXk3CY}, + doi={https://doi.org/10.1109/TVCG.2018.2868532} +} + + +@misc{kim2018holographic, + abbr={US Patent}, + bibtex_show={true}, + title={Holographic reflective slim virtual/augmented reality display system and method}, + author={Kim, Jonghyun and Akşit, Kaan and Lopes, Ward and Luebke, David Patrick}, + year={2018}, + month=dec # "~11", + publisher={Google Patents}, + note={US Patent 10,151,924} + +} + + +@article{akcsit2019manufacturing, + abbr={IEEE VR}, + bibtex_show={true}, + title={Manufacturing application-driven foveated near-eye displays}, + author={Akşit, Kaan and Chakravarthula, Praneeth and Rathinavel, Kishore and Jeong, Youngmo and Albert, Rachel and Fuchs, Henry and Luebke, David}, + journal={IEEE transactions on visualization and computer graphics}, + volume={25}, + number={5}, + pages={1928--1939}, + year={2019}, + publisher={IEEE}, + pdf={AksitEtAl_IEEEVR2019_manufacturing_application_driven_foveated_near_eye_displays.pdf}, + preview={manufacturing_application_driven.png}, + selected={true}, + presentation={https://www.youtube.com/watch?v=jYWjqPeMSw8}, + slides={https://docs.google.com/presentation/d/1vbKkoeCTMRO0J47ufjzRgJUSF-T9hKxBnv2b1yVMt0k/edit?usp=sharing}, + award={Best paper nominee}, + doi={https://doi.org/10.1109/TVCG.2019.2898781} +} + + +@inproceedings{krzanich2019retrotracker, + abbr={IEEE VR}, + bibtex_show={true}, + title={RetroTracker: Upgrading Existing Virtual Reality Tracking Systems}, + author={Krzanich, Kylee M and Whitmire, Eric and Stengel, Michael and Kass, Michael and Akşit, Kaan and Luebke, David}, + booktitle={2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)}, + pages={1034--1035}, + year={2019}, + organization={IEEE}, + doi={https://doi.org/10.1109/VR.2019.8798220} +} + + +@inproceedings{koulieris2019near, + abbr={Eurographics}, + bibtex_show={true}, + title={Near-eye display and tracking technologies for virtual and augmented reality}, + author={Koulieris, George Alex and Akşit, Kaan and Stengel, Michael and Mantiuk, Rafa{\l} K and Mania, Katerina and Richardt, Christian}, + booktitle={Computer Graphics Forum}, + volume={38}, + number={2}, + pages={493--519}, + year={2019}, + organization={Wiley Online Library}, + selected={true}, + preview={egstar.png}, + html={https://richardt.name/publications/near-eye-display-and-tracking-technologies/}, + pdf={NearEyeDisplayAndTracking-KoulierisEtAl-CGF2019-STAR.pdf}, + doi={https://doi.org/10.1111/cgf.13654} +} + + +@misc{aksit2019catadioptric, + abbr={US Patent}, + bibtex_show={true}, + title={Catadioptric on-axis virtual/augmented reality glasses system and method}, + author={Akşit, Kaan and Luebke, David Patrick}, + year={2019}, + month=jun # "~11", + publisher={Google Patents}, + note={US Patent 10,317,678} +} + + +@article{kim2019foveated, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Foveated AR: dynamically-foveated augmented reality display.}, + author={Kim, Jonghyun and Jeong, Youngmo and Stengel, Michael and Akşit, Kaan and Albert, Rachel A and Boudaoud, Ben and Greer, Trey and Kim, Joohwan and Lopes, Ward and Majercik, Zander and others}, + journal={ACM Trans. Graph.}, + volume={38}, + number={4}, + pages={99--1}, + year={2019}, + preview={foveated_displays.png}, + pdf={KimEtAl_Siggraph2019_Foveated_ar_dynamically_foveated_augmented_reality_display.pdf}, + video={https://youtu.be/IknBUoRGUkM}, + selected={true}, + doi={https://doi.org/10.1145/3306346.3322987} +} + + +@incollection{kim2019matching, + abbr={SIGGRAPH}, + bibtex_show={true}, + title={Matching prescription \& visual acuity: Towards ar for humans}, + author={Kim, Jonghyun and Stengel, Michael and Wu, Jui-Yi and Boudaoud, Ben and Spjut, Josef and Akşit, Kaan and Albert, Rachel and Greer, Trey and Jeong, Youngmo and Lopes, Ward and others}, + booktitle={ACM SIGGRAPH 2019 Emerging Technologies}, + pages={1--2}, + year={2019}, + preview={foveated_displays.png}, + selected={true}, + award={Emerging technologies best in show award}, + doi={https://doi.org/10.1145/3305367.3327978} +} + +@misc{kim2019holographic, + abbr={US patent}, + bibtex_show={true}, + title={Holographic reflective slim virtual/augmented reality display system and method}, + author={Kim, Jonghyun and Akşit, Kaan and Lopes, Ward and Luebke, David Patrick}, + year={2019}, + month=sep # "~3", + publisher={Google Patents}, + note={US Patent 10,401,623} +} + + +@misc{lopes2019system, + abbr={US patent}, + bibtex_show={true}, + title={System and method for foveated image generation using an optical combiner}, + author={Lopes, Ward and Akşit, Kaan}, + year={2019}, + month=nov # "~19", + publisher={Google Patents}, + note={US Patent 10,481,684} +} + + +@article{akcsit2020patch, + abbr={Optics Express}, + bibtex_show={true}, + title={Patch scanning displays: spatiotemporal enhancement for displays}, + author={Akşit, Kaan}, + journal={Optics express}, + volume={28}, + number={2}, + pages={2107--2121}, + year={2020}, + publisher={Optical Society of America}, + preview={patch_scan.png}, + selected={true}, + video={https://youtu.be/c3okd_gIlrg}, + pdf={AksitEtAl_OpticsExpress2020_Patch_scanning_displays_spatiotemporal_enhancement_for_displays.pdf}, + doi={https://doi.org/10.1364/OE.380858} +} + + +@article{spjut2020toward, + abbr={IEEE VR}, + bibtex_show={true}, + selected={true}, + title={Toward standardized classification of foveated displays}, + author={Spjut, Josef and Boudaoud, Ben and Kim, Jonghyun and Greer, Trey and Albert, Rachel and Stengel, Michael and Akşit, Kaan and Luebke, David}, + journal={IEEE transactions on visualization and computer graphics}, + volume={26}, + number={5}, + pages={2126--2134}, + year={2020}, + publisher={IEEE}, + preview={fovea.png}, + arxiv={1905.06229}, + pdf={SpjutEtAl_IEEEVR2020_Toward_standardized_classification_of_foveated_displays.pdf}, + doi={https://doi.org/10.1109/TVCG.2020.2973053} +} + + +@article{akcsit2020gaze, + abbr={arxiv}, + bibtex_show={true}, + title={Gaze-sensing leds for head mounted displays}, + author={Akşit, Kaan and Kautz, Jan and Luebke, David}, + journal={arXiv preprint arXiv:2003.08499}, + year={2020}, + arxiv={2003.08499}, + doi={https://doi.org/10.48550/arXiv.2003.08499} +} + + +@misc{mcguire2020computational, + abbr={US Patent}, + bibtex_show={true}, + title={Computational blur for varifocal displays}, + author={McGuire, Morgan and Akşit, Kaan and Shirley, Pete and Luebke, David}, + year={2020}, + month=jun # "~30", + publisher={Google Patents}, + note={US Patent 10,699,383} +} + + +@inproceedings{li2020optical, + abbr={IEEE ISMAR}, + bibtex_show={true}, + selected={true}, + title={Optical gaze tracking with spatially-sparse single-pixel detectors}, + author={Li, Richard and Whitmire, Eric and Stengel, Michael and Boudaoud, Ben and Kautz, Jan and Luebke, David and Patel, Shwetak and Akşit, Kaan}, + booktitle={2020 IEEE international symposium on mixed and augmented reality (ISMAR)}, + pages={117--126}, + year={2020}, + organization={IEEE}, + html={http://lichard49.github.io/nextgaze.html}, + slides={https://docs.google.com/presentation/d/1G3LeAFX_PLvFB_ZaByiGPMdHHPMbBXMIazifKAjMMkE/edit?usp=sharing}, + video={https://youtu.be/7_hMGwTGdhg}, + preview={single_pixel_gaze.png}, + doi={https://doi.org/10.1109/ISMAR50242.2020.00033} +} + +@misc{whitmire2020gaze, + abbr={US Patent}, + bibtex_show={true}, + title={Gaze tracking system for use in head mounted displays}, + author={Whitmire, Eric and Akşit, Kaan and Stengel, Michael and Kautz, Jan and Luebke, David and Boudaoud, Ben}, + year={2020}, + month=nov # "~17", + publisher={Google Patents}, + note={US Patent 10,838,492} +} + + +@article{walton2021beyond, + abbr={SIGGRAPH}, + bibtex_show={true}, + selected={true}, + title={Beyond blur: Real-time ventral metamers for foveated rendering}, + author={Walton, David R and Dos Anjos, Rafael Kuffner and Friston, Sebastian and Swapp, David and Akşit, Kaan and Steed, Anthony and Ritschel, Tobias}, + journal={ACM Transactions on Graphics}, + month=Aug, + volume={40}, + number={4}, + pages={1--14}, + year={2021}, + publisher={Association for Computing Machinery (ACM)}, + html={https://vr-unity-viewer.cs.ucl.ac.uk/}, + pdf={WaltonEtAl_SIGGRAPH2021_Beyond_blur_real_time_ventral_metamers_for_foveated_rendering.pdf}, + preview={beyond_blur.png}, + doi={https://doi.org/10.1145/3450626.3459943} +} + + +@inproceedings{dogan2021sensicut, + abbr={ACM UIST}, + bibtex_show={true}, + selected={true}, + title={Sensicut: Material-aware laser cutting using speckle sensing and deep learning}, + author={Dogan, Mustafa Doga and Acevedo Colon, Steven Vidal and Sinha, Varnika and Akşit, Kaan and Mueller, Stefanie}, + booktitle={The 34th Annual ACM Symposium on User Interface Software and Technology}, + pages={24--38}, + year={2021}, + html={https://hcie.csail.mit.edu/research/sensicut/sensicut.html}, + video={https://youtu.be/BdvSAJaukI8}, + presentation={https://youtu.be/fxD5GEMQ8kk}, + pdf={DoganEtAl_UIST2021_Sensicut_material_aware_laser_cutting_using_speckle_sensing_and_deep_learning.pdf}, + dataset={https://www.kaggle.com/dogadgn/sensicut-speckle-patterns}, + preview={sensicut.png}, + doi={https://doi.org/10.1145/3472749.3474733} +} + + +@misc{aksit2021method, + abbr={US Patent}, + bibtex_show={true}, + title={Method and apparatus for spatiotemporal enhancement of patch scanning displays}, + author={Akşit, Kaan}, + year={2021}, + month=aug # "~24", + publisher={Google Patents}, + note={US Patent 11,100,830} +} + + +@article{itoh2021beaming, + abbr={IEEE VR}, + bibtex_show={true}, + selected={true}, + title={Beaming displays}, + author={Itoh, Yuta and Kaminokado, Takumi and Ak{\c{s}}it, Kaan}, + journal={IEEE Transactions on Visualization and Computer Graphics}, + volume={27}, + number={5}, + pages={2659--2668}, + month={Mar}, + year={2021}, + publisher={IEEE}, + preview={beaming_displays.png}, + pdf={ItohEtAl_IEEEVR2021_Beaming_displays.pdf}, + video={https://youtu.be/TKl1l3b-LDs}, + presentation={https://www.youtube.com/watch?v=j0nY4_cauZY}, + award={Best paper nominee}, + doi={https://doi.org/10.1109/TVCG.2021.3067764} +} + + +@misc{whitmire2021driver, + abbr={US Patent}, + bibtex_show={true}, + title={Driver gaze tracking system for use in vehicles}, + author={Whitmire, Eric and Akşit, Kaan and Stengel, Michael and Kautz, Jan and Luebke, David and Boudaoud, Ben}, + year={2021}, + month=mar # "~25", + publisher={Google Patents}, + note={US Patent App. 16/578,077} +} + + +@ARTICLE{10.3389/frvir.2021.763340, + abbr={Frontiers in Virtual Reality}, + bibtex_show={true}, + selected={true}, + AUTHOR={Orlosky, Jason and Sra, Misha and Bektaş, Kenan and Peng, Huaishu and Kim, Jeeeun and Kos’myna, Nataliya and Höllerer, Tobias and Steed, Anthony and Kiyokawa, Kiyoshi and Akşit, Kaan}, + TITLE={Telelife: The Future of Remote Living}, + JOURNAL={Frontiers in Virtual Reality}, + VOLUME={2}, + YEAR={2021}, + URL={https://www.frontiersin.org/articles/10.3389/frvir.2021.763340}, + ISSN={2673-4192}, + pdf={OrloskyEtAl_FrontiersInVirtualReality2021_Telelife_the_future_of_remote_living.pdf}, + preview={telelife.png}, + doi={https://doi.org/10.3389/frvir.2021.763340} +} + + +@inproceedings{akcsit2021towards, + abbr={Optica}, + bibtex_show={true}, + title={Towards Remote Pixelless Displays}, + author={Akşit, Kaan}, + booktitle={Digital Holography and Three-Dimensional Imaging}, + pages={DW4B--1}, + year={2021}, + organization={Optical Society of America}, + doi={https://doi.org/10.1364/DH.2021.DW4B.1} +} + + +@misc{aksit2021methodpatch, + abbr={US Patent}, + bibtex_show={true}, + title={Method and apparatus for spatiotemporal enhancement of patch scanning displays}, + author={Akşit, Kaan}, + year={2021}, + month=aug # "~24", + publisher={Google Patents}, + note={US Patent 11,100,830} +} + + + +@article{kavakli2022optimizing, + title={Optimizing vision and visuals: lectures on cameras, displays and perception}, + author={Kavaklı, Koray and Walton, David Robert and Antipa, Nick and Mantiuk, Rafał and Lanman, Douglas and Akşit, Kaan}, + journal={ACM SIGGRAPH 2022}, + booktitle={ACM SIGGRAPH 2022 Courses}, + pages={1--66}, + month={Aug}, + year={2022}, + bibtex_show={true}, + selected={true}, + html={https://complightlab.com/teaching/siggraph2022_optimizing_vision_and_visuals/}, + doi={https://doi.org/10.1145/3532720.3535650}, + code={https://github.com/complight/cameras-displays-perception-course}, + preview={optimizing_vision_and_visuals.png}, + video={https://youtu.be/z_AtSgct6_I}, + pdf={https://github.com/complight/cameras-displays-perception-course/blostermain/latex/course.pdf} +} + + +@inproceedings{walton2022metameric, + abbr={IEEE VR}, + bibtex_show={true}, + selected={true}, + title={Metameric Varifocal Holograms}, + author={Walton, David R and Kavakl{\i}, Koray and Dos Anjos, Rafael Kuffner and Swapp, David and Weyrich, Tim and Urey, Hakan and Steed, Anthony and Ritschel, Tobias and Akşit, Kaan}, + booktitle={2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)}, + pages={746--755}, + month={Mar}, + year={2022}, + organization={IEEE}, + preview={metameric.png}, + code={https://github.com/complight/metameric_holography}, + video={https://vimeo.com/623474853}, + arxiv={2110.01981}, + pdf={WaltonEtAl_IEEEVR2022_Metameric_varifocal_holograms.pdf}, + doi={https://doi.org/10.1109/VR51125.2022.00096} +} + + + +@article{kingshott2022unrolled, + abbr={Optics Express}, + bibtex_show={true}, + selected={true}, + title={Unrolled Primal-Dual Networks for Lensless Cameras}, + author={Kingshott, Oliver and Antipa, Nick and Bostan, Emrah and Akşit, Kaan}, + journal={Optics Express}, + year={2022}, + month={Dec}, + arxiv={2203.04353}, + preview={unrolled_primal_dual.png}, + code={https://github.com/oliland/lensless-primal-dual}, + pdf={KingshottEtAl_OpticsExpress2022_Unrolled_primal_dual_networks_for_lensless_cameras.pdf}, + doi={https://doi.org/10.1364/OE.475521}, + supp={KingshottEtAl_OpticsExpress2022_Unrolled_primal_dual_networks_for_lensless_cameras_Supplementarty.pdf} +} + + + +@article{kavakli2022learned, + abbr={Applied Optics}, + bibtex_show={true}, + selected={true}, + title={Learned holographic light transport}, + author={Kavakl{\i}, Koray and Urey, Hakan and Akşit, Kaan}, + journal={Applied Optics}, + volume={61}, + number={5}, + month={Feb}, + pages={B50--B55}, + year={2022}, + publisher={Optical Society of America}, + code={https://github.com/complight/realistic_holography}, + dataset={https://rdr.ucl.ac.uk/articles/dataset/Phase-only_holograms_and_captured_photographs/15087867}, + preview={learned_light.gif}, + award={Invited}, + pdf={KavakliEtAl_AppliedOptics2022_Learned_holographic_light_transport.pdf}, + doi={https://doi.org/10.1364/AO.439401} +} + + +@inproceedings{aksit2022beaming, + abbr={SPIE PW}, + bibtex_show={true}, + title={Beaming displays: towards displayless augmented reality near-eye displays}, + author={Akşit, Kaan and Itoh, Yuta and Kaminokado, Takumi}, + booktitle={AI and Optical Data Sciences III}, + volume={12019}, + pages={34--37}, + month={Jan}, + year={2022}, + organization={SPIE}, + doi={https://doi.org/10.1117/12.2610285} +} + + +@inproceedings{aksit2022perceptually, + abbr={SPIE PW}, + bibtex_show={true}, + title={Perceptually guided computer-generated holography}, + author={Akşit, Kaan and Kavaklı, Koray and Walton, David and Steed, Anthony and Urey, Hakan and Dos Anjos, Rafael Kuffner and Friston, Sebastian and Weyrich, Tim and Ritschel, Tobias}, + booktitle={Advances in Display Technologies XII}, + volume={12024}, + pages={11--14}, + year={2022}, + month={Jan}, + organization={SPIE}, + doi={https://doi.org/10.1117/12.2610251} +} + + +@inproceedings{bektacs2022telelife, + abbr={SIGCHI}, + bibtex_show={true}, + title={Telelife: A vision of remote living in 2035}, + author={Bekta{\c{s}}, Kenan and Kim, Jeeeun and Peng, Huaishu and Kiyokawa, Kiyoshi and Steed, Anthony and H{\"o}llerer, Tobias and Kos’myna, Nataliya and Sra, Misha and Orlosky, Jason and Akşit, Kaan}, + booktitle={Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems, CHI}, + volume={22}, + month={Apr}, + year={2022}, + doi={https://doi.org/10.1145/3491101.3516505} +} + + +@inproceedings{aksit2022perceptually2, + abbr={LIM}, + bibtex_show={true}, + title={Perceptually guided Computer-Generated Holography}, + author={Akşit, Kaan}, + booktitle={London Imaging Meeting 2022}, + month=aug, + year={2022}, + pdf={AksitKaan_LIM2022_perceptually_guided_computer_generated_holography.pdf} +} + + +@inproceedings{kavakli2022introduction, + abbr={Optica}, + bibtex_show={true}, + title={Introduction to Odak: a Differentiable Toolkit for Optical Sciences, Vision Sciences and Computer Graphics}, + author={Kavakl{\i}, Koray and Akşit, Kaan}, + booktitle={Frontiers in Optics}, + pages={FTu1A--1}, + year={2022}, + month={Oct}, + organization={Optica Publishing Group} +} + + +@ARTICLE{Kuffner_Dos_Anjos2022-hm, + abbr = {IEEE TVCG}, + bibtex_show={true}, + preview = {metameric_inpainting.png}, + title = "Metameric inpainting for image warping", + author = "Dos Anjos, Rafael Kuffner and Walton, David R and Akşit, Kaan and + Friston, Sebastian and Swapp, David and Steed, Anthony and + Ritschel, Tobias", + journal = "IEEE Trans. Vis. Comput. Graph.", + volume = "PP", + month = oct, + year = 2022, + language = "en", + pdf = {KuffnerEtAl_IEEETVCG2022_Metameric_inpainting_for_image_warping.pdf}, + doi = {https://doi.org/10.1109/tvcg.2022.3216712} +} + + +@inproceedings{kavakli2023mitigating, + abbr={SPIE PW}, + bibtex_show={true}, + title={Mitigating edge fringe effects in multiplane holography}, + author={Kavaklı, Koray and Akşit, Kaan and Itoh, Yuta and Urey, Hakan}, + booktitle={Optical Architectures for Displays and Sensing in Augmented, Virtual, and Mixed Reality (AR, VR, MR) IV}, + volume={12449}, + pages={124491R}, + year={2023}, + month=Feb, + organization={SPIE} +} + + +@inproceedings{akcsit2023flexible, + abbr={SPIE PW}, + bibtex_show={true}, + title={Flexible modeling of next-generation displays using a differentiable toolkit}, + author={Akşit, Kaan and Kavaklı, Koray}, + booktitle={Practical Holography XXXVII: Displays, Materials, and Applications}, + volume={12445}, + pages={131--132}, + year={2023}, + month=Feb, + organization={SPIE} +} + + +@article{zhan2023autocolor, + abbr={arxiv}, + bibtex_show={true}, + selected={true}, + title={AutoColor: Learned Light Power Control for Multi-Color Holograms}, + author={Zhan, Yicheng and Kavakl{\i}, Koray and Urey, Hakan and Sun, Qi and Akşit, Kaan}, + journal={arxiv}, + year={2023}, + month=May, + arxiv={2305.01611}, + html={https://complightlab.com/autocolor_}, + pdf={ZhanEtAl_OpticsLetters2023_Autocolor_learned_light_power_control_for_multi_color_holograms.pdf}, + code={https://github.com/complight/autocolor}, + preview={autocolor.png}, + doi={https://doi.org/10.48550/arXiv.2305.01611} +} + + +@article{kavakli2023holohdr, + abbr={arxiv}, + bibtex_show={true}, + selected={true}, + title={HoloHDR: Multi-color Holograms improve Dynamic Range}, + author={Kavakl{\i}, Koray and Shi, Liang and Urey, Hakan and Matusik, Wojciech and Akşit, Kaan}, + journal={arxiv}, + year={2023}, + month=Mar, + arxiv={2301.09950}, + html={https://complightlab.com/publications/holohdr}, + pdf={KavakliEtAl_OpticsExpress2023_HoloHDR_multi_color_holograms_improve_dynamic_range.pdf}, + code={https://github.com/complight/holohdr}, + preview={holohdr.png}, + doi={https://doi.org/10.48550/arXiv.2301.09950} +} + + +@ARTICLE{guzel2022prescription, + abbr = {Biomedical Optics Express}, + selected = {true}, + bibtex_show = {true}, + preview = {learned_prescription.png}, + title = "ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance", + author = "Güzel, Ahmet and Beyazian, Jeanne and Chakravarthula, Praneeth and Akşit, Kaan", + journal = "Biomedical Optics Express", + volume = "2166-2180", + month = Apr, + year = 2023, + language = "en", + html = {http://complightlab.com/ChromaCorrect/}, + code = {https://github.com/complight/ChromaCorrect}, + video = {https://www.youtube.com/watch?v=fjexa7ga-tQ}, + pdf = {GüzelEtAl_BiomedicalOpticsExpress2023_Chromacorrect_prescription_correction_in_virtual_reality_headsets_through_perceptual_guidance.pdf}, + doi = {https://doi.org/10.1364/BOE.485776}, + arxiv = {2212.04264} +} + + +@ARTICLE{aksit2023holobeam, + abbr = {IEEE VR}, + selected = {true}, + bibtex_show = {true}, + preview = {holobeam.png}, + title = "HoloBeam: Paper-Thin Near-Eye Displays", + author = "Akşit, Kaan and Itoh, Yuta", + journal = "IEEE VR 2023", + volume = "PP", + month = Mar, + year = 2023, + language = "en", + html = {https://complightlab.com/publications/holobeam/}, + code = {https://github.com/complight/multiholo}, + pdf = {AkşitEtAl_IEEEVR2023_HoloBeam_Paper_thin_near_eye_displays.pdf}, + presentation = {https://youtu.be/dDs0rAXX2yk}, + arxiv = {2212.05057} +} + + +@article{kavakli2023realistic, + abbr={IEEE VR}, + bibtex_show={true}, + selected={true}, + title={Realistic Defocus Blur for Multiplane Computer-Generated Holography}, + author={Kavakl{\i}, Koray and Itoh, Yuta and Urey, Hakan and Akşit, Kaan}, + journal={IEEE VR 2023}, + year={2023}, + month=Mar, + arxiv={2205.07030}, + html={https://complightlab.com/publications/realistic_defocus_cgh/}, + pdf={KavakliEtAl_IEEEVR2023_Realistic_defocus_blur_for_multiplane_computer_generated_holography.pdf}, + code={https://github.com/complight/realistic_defocus}, + preview={realistic_defocus_cgh.png}, + doi={https://doi.org/10.48550/arXiv.2205.07030}, + presentation = {https://youtu.be/Y5CQvtoOggU}, + video={https://youtu.be/5tG8SaJGpUc}, +} + + diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..95d5c3db --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome","text":""},{"location":"#introduction","title":"Introduction","text":"Who are we?

Led by Kaan Ak\u015fit, the Computational light laboratory conducts research and development in light-related sciences, including computer-generated holography, computer graphics, computational imaging, computational displays, and visual perception. We share our scientific output in the form of published articles and papers. Our primary software toolkit to tackle our research problems is public and open-source. We host our toolkit as Odak in GitHub. We translate our scientific output to actual lecture modules, and we create useful documentation for our research and development. To read more about our recent activities, please visit our recent timeline. Our research focus in terms of applications is depicted in the below conceptual figure.

Interested in registering to our seminars and discussion group?

Our laboratory organizes weekly seminars, and hosts a research hub in the form of a public Slack group.

Subscribe to our mailing list and slack group

Where are we located?

The computational light laboratory is part of Computer Science Department at University College London. Computational light laboratory is located at room G06, 169 Euston Road, London NW1 2AE, United Kingdom.

Interested in joining our research group?

If you are interested in joining our group as an intern, an undergraduate student, a master student, a Ph.D. student, a postdoctoral researher or a visiting researcher, please do not hesitate to reach out to Kaan Ak\u015fit.

"},{"location":"#research-highlights","title":"Research Highlights","text":"

Most downloaded paper award from Nature's Light and Science Applications.

Multi-color holograms improve brightness in holographic displays (SIGGRAPH ASIA 2023)

Ahmet G\u00fczel received the best poster award at UKRI AI CDT conference.

HoloBeam: Paper-Thin Near-Eye Displays (IEEE VR 2023)

Realistic Defocus Blur for Multiplane Computer-Generated Holography (IEEE VR 2023)

ChromaCorrect: Perceptual Prescription Correction in Virtual Reality (Optics Express)

Optimizing vision and visuals (SIGGRAPH 2022)

Unrolled Primal-Dual Networks for Lensless Imaging (Optics Express)

Metameric Varifocal Holograms (IEEE VR 2022)

Learned Holographic Light Transport (Applied Optics)

Telelife: the future of remote living (Frontiers in VR)

SensiCut: material-aware laser cutting using speckle sensing and deep learning (UIST 2021)

Beaming Displays (IEEE VR 2021)

"},{"location":"documentation/","title":"Documentation","text":"

This page provides links to a list of documents. These documents vary in their topics, from workplace guidances to technical digests.

"},{"location":"documentation/#for-candidates","title":"For candidates","text":"Documents Description How to become a doctoral student in our laboratory This documentation describes the steps involved in becoming a doctoral student at our laboratory."},{"location":"documentation/#for-newcomers","title":"For newcomers","text":"Documents Description Establishing yourself as a member This documentation is designed to help newcomers to establish themselves as a member of the Computational light laboratory. Explore the completed projects This link will get you to the list of completed projects. It can be a good resource to inspire your next and help you get aligned with the rest of the team. Our codebases This link will get you to the list of code bases that we have compiled and published online. These codes will serve you a lot to get started with your developments. Our toolkit This link will get you to the repository where we host our toolkit, Odak. Remember that we rely on Odak in our research and it grows with the contributions of users and collaborators. Logo of our team This documentation will describe to you the meaning of our team's logo, and it is also a place to download the source file of our logo."},{"location":"documentation/#resources","title":"Resources","text":"Documents Description Learn Light, Computation and Computational Light This documentation will link you to our course notes. Learn more about raytracing and geometric optics This documentation will link you to a specific section in our course notes. Learn more about computer-generated holography This documentation will link you to a specific section in our course notes. Explore our printable designs Often times, we design and print our equipment for our laboratory experiments. This link will navigate you to a catalog of items that we designed for our experiments. Tips and tricks for using our 3D printer Often times, we design and print our equipment for our laboratory experiments. This link will navigate you to a catalog of items that we designed for our experiments."},{"location":"documentation/3d_printing/","title":"3D Printing at CompLightLab","text":"

The Computational Light Laboratory has a Snapmaker 2.0 3D Printer. This page will provide some brief instructions on using the printer, and some notes on using specific types of filament.

"},{"location":"documentation/3d_printing/#software","title":"Software","text":"

Snapmaker provide their own slicing software, Snapmaker Luban to slice 3D models and produce the .gcode files compatible with the printer. Generally using this involves:

  • Switching to 3D printing mode (click the cube icon on the left of the window).
  • Adding your model using the blue Add button at the top left. Further models can be added by pressing Add again.
  • Rotate, scale and arrange your model(s) for optimal printing.
  • Selecting your filament material (check on the roll at the top of the printer) at the top right.
  • Change settings as necessary (supports, adhesion etc.). Click Customize under Printing Settings, and then the black + symbol in order to change the settings (see notes below).
  • Click Generate G-Code to slice the model.
  • Check the preview of your sliced model looks OK.
  • Click Export G-Code to File and export it onto a USB stick with appropriate filename.
  • Insert the USB stick to the port on the right side of the printer.
  • Select your model in the printer interface, and begin the print.

Note that supports are disabled by default, but depending on your model, switching them on may be necessary. Also by default a skirt will be printed, but we recommend disabling this setting as they can be challenging to remove from the build plate (under Adhesion, set the type to None).

"},{"location":"documentation/3d_printing/#filaments","title":"Filaments","text":"

Profiles are provided for PLA and ABS filaments. We also have PETG filament in the lab - this can be useful as it has greater strength and temperature resistance than PLA. However it requires different temperature settings for printing.

I recommend adding a PETG profile by clicking the black + symbol under Material, and setting the temperatures as appropriate. The following settings performed well with the Snapmaker official PETG filament:

As a quick safety note, these settings heat up the build plate substantially more than when printing PLA. Please take extra care when removing prints, and allow the build plate to cool to a safe temperature before removing it. The temperature is displayed on the printer's screen as it cools.

"},{"location":"documentation/become_phd_student/","title":"How do you become a doctoral student at our laboratory?","text":"

I wrote this documentation to shed light on the subject of how can you become a doctoral student at our laboratory. Please continue reading the steps listed below to get a better idea of the case.

"},{"location":"documentation/become_phd_student/#0-do-you-know-us","title":"(0) Do you know us?","text":"

This one is a crucial question. In any case, whenever you apply to something, you have to understand what you are applying for. We describe ourselves in many ways within this website. As a starter, have an excursion on this website. Try to get your perspective on our research activities, where we publish, or what kind of output we generate for the public. Once you have an image in your mind about our laboratory, ask yourself if you are interested in similar lines of research.

Info

If the answer is yes, continue reading the next step.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#1-are-you-unique","title":"(1) Are you unique?","text":"

Being exceptional may mean many things. Most importantly, being exceptional here refers to being rare. You may be thinking in a unique form, or you care about specific topics more than others. We value that kind of uniqueness. To highlight such uniqueness in your character, you need evidence from your previous experiences that can inform us to understand you better. This evidence can be all or some of these listed items:

  • You are an exceptional person in your current programme (academic or industrial), and the highest-ranked person you work with is ready to support your case by providing a reference letter.
  • You maintain public software or hardware related projects that are part of your previous research, and these repositories are exciting other people.
  • You have helped organize events in your research field or participated in exciting venues where you demonstrated your work in public to other people, and they got excited.
  • You publish at highly reputable top-tier conferences or journals that are relevant to our laboratory's work. Here are some examples of these conferences and journals: ACM SIGGRAPH, ACM SIGCHI, ACM UIST, IEEE VR, IEEE ISMAR, Optica Optica, Optica Optics Express, Nature Photonics, Nature Light or alike.
  • (Optional -- not a must) You have an award or funding to support your doctoral studies in the future.

Note that this list is not the advice of our university's doctoral admission process; you should check the requirements of our doctoral program before applying.

Info

If the answer is yes, continue reading the next step.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#2-do-you-meet-the-requirements-of-our-doctoral-program","title":"(2) Do you meet the requirements of our doctoral program?","text":"

This bit is pretty significant. Please check that you are eligible to apply to our doctoral program. Remember, you can always ask people who are in charge of our doctoral applications system, and you will find their email for enquiries on the application website.

Info

If the answer is yes, continue reading the next step.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#3-are-you-ready-to-reach-to-us","title":"(3) Are you ready to reach to us?","text":"

Given you are at this step, it is now time for you to decide if you want to reach us. Please carefully revise your resume such that a third person can easily find your achievements and links to the evidence of your previous successes. I often talk to close collaborators, friends and family, even at this stage when I prepare submissions (e.g., grants, awards, papers, etc.). Find that trustable person to review your resume before sending it our way. Once you have your resume, prepare an email as short as possible. Make sure not to write a pages-long email. Remember you want us to get our attention on you. Make sure to spend time finding how to describe yourself with the least amount of words. Make sure to spend time finding what you should be highlighting in your email about you (e.g., my recent superb paper, funding that I have for doctoral training, your unique disruptive line of research). Make sure to state if you have funding at hand.

Info

If the answer is yes, add your resume to your email and send it to kaanaksit@kaanaksit.com.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#4-do-you-have-your-application-in-the-system","title":"(4) Do you have your application in the system?","text":"

Use our doctoral program's website for your application. After your submission, if all goes well, you will be interviewed by academics at our university. You can find a copy of our interview form. Note that interview questions are not limited to this specific form.

Info

If the answer is yes, continue reading the next step.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#5-do-you-have-an-offer-from-our-university","title":"(5) Do you have an offer from our university?","text":"

Our admission and offer process can take a long time. Make sure to keep in touch with us. Once you get your offer from the university, let us know immediately.

Info

If the answer is yes, continue reading the next step.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/become_phd_student/#6-do-you-or-do-we-have-the-proper-funding-for-your-studies","title":"(6) Do you or do we have the proper funding for your studies?","text":"

You may have no funds at hand, or our laboratory is not able to fund you at this time. However, please make sure to contact us. Depending on your profile, we may direct you to sources that can help you obtain your funding. Or even better, you or we may have the proper funding for your studies.

Info

If the answer is yes, you will be admitted to our laboratory and university. We look forward to collaborating with you and conducting exciting research with you in the future.

Warning

If the answer is no, you can be in many other cool places, and please pay attention to finding your research home. Applying for a doctoral study is about you! Giving the right amount of care is highly important.

"},{"location":"documentation/getting_started/","title":"Welcome aboard!","text":"

Quote

Welcome aboard!

The computational light laboratory conducts research and development in light related sciences, including computer-generated holography, computer graphics, computational imaging, computational displays and visual perception.

Our core mission is to show our societies that there can be better services, experiences, and goods that serve the benefits of humanity by using light. We are here to invent the next in light-based techniques and unlock the mystery of light.

We build our tools to perform work and invent new methods to improve state of the art. Most importantly, we document our steps so that the others can follow. Finally, we release our work to the public on our GitHub organization. We have multiple social media outlets to promote our work. These include our Twitter account, our LinkedIn account, our YouTube account and our webpage. We don't shy away from going public and participate in public demonstrations with our prototypes.

I wholeheartedly welcome every member at every stage to the Computational light laboratory. We can improve the state of the world, and I need your help in doing that!

Kaan Ak\u015fit

"},{"location":"documentation/getting_started/#getting-aboard","title":"Getting aboard!","text":"

In the rest of this documentation, you will find a checklist that will help you establish yourself as a member of the Computational light laboratory. There is also an additional subsection that provides a list of suggestions to help you get you to establish collaborative work ethics. Note that this and the other documents that you will find on this website are always subject to change. In fact, as a member, please do not hesitate to suggest improvements and be the change by actually having a pull request in the source repository.

"},{"location":"documentation/getting_started/#checklist","title":"Checklist","text":"
  • Are you full registered for the graduate programme? Is all the administrator work done? Relevant contact: cs.phdadmissions@ucl.ac.uk.
  • Do you know when you will receive your first paycheck? Relevant contact: cs.phdadmissions@ucl.ac.uk.
  • Do you have a UCL identity card? Relevant contact: securitysystems@ucl.ac.uk.
  • Do you know which building is our office building? Reach out to Kaan or any other member and ask, kaanaksit@kaanaksit.com.
  • Can you get into the building where our office is using your UCL identity card? Relevant contact: facilities@cs.ucl.ac.uk.
  • Do you have a desk and a chair reserved for you in the office? Relevant contact: facilities@cs.ucl.ac.uk.
  • Do you know where Kaan's office is? Reach out to Kaan or any other member and ask, kaanaksit@kaanaksit.com.
  • Do you know where our laboratory space is? Reach out to Kaan or any other member and ask, kaanaksit@kaanaksit.com.
  • Do you have a computer to conduct your research? Reach out to Kaan and ask, kaanaksit@kaanaksit.com. For queries such as where to get a display mount, cable for this and that, try reaching out to facilities@cs.ucl.ac.uk.
  • Do you have access to remote computational resources that you may be needing in your work? Reach out to Kaan and ask, kaanaksit@kaanaksit.com.
  • Do you have access to hardware resources that you may be needing in your work? Reach out to Kaan and ask, kaanaksit@kaanaksit.com.
  • Make sure to meet other members. Send emails! They are listed on this website. Ask about their experiences and thoughts. Explore what they are conducting in their research.
  • Make sure to discuss with Kaan Ak\u015fit to see how you can contribute to Odak in the near future.
  • Do you know what you will be focusing on? Do you know what projects are carried out in the team? Are you up-to-date with what the team has achieved recently?
  • Are you listed as a member in the GitHub organization? In that organization which team do you belong to? Reach out to Kaan and ask, kaanaksit@kaanaksit.com.
  • Do you have a weekly 1:1 meeting arranged with Kaan? Reach out to Kaan and ask, kaanaksit@kaanksit.com.
  • Are you receiving calendar invitations for weekly group meetings? Reach out to Kaan or any other team member and ask, kaanaksit@kaanaksit.com.
  • Do you have a research plan? What are your goals? How will your research impact the society in the near and far future? Tinker deeply in a structured manner. Agree with Kaan and your other supervisors. Reach out to them and initiate conversations about your roadmap towards your degree.
  • Do you know where you can find the term dates and closures? Visit this website and this website for more.
  • Do you know where you can book the meeting room 402 at 169 Euston Road? Visit CMIS GO system for booking purposes.
"},{"location":"documentation/getting_started/#suggestions","title":"Suggestions","text":"

These are some suggestions to help you get establishing yourself as a collaborative member of the group.

  • Install software that helps you send emails. With that software, make sure you can schedule emails. Please do not send emails to people you don't know well outside 8 am to 6 pm (unless they are in a different time zone).

  • Life brings many challenges, and not all days are sunny. Even if communication degrades over time, keep the kindness. Control yourself. Never say anything that you will regret! (Life is not war)

  • We are all collaborators. The best things happen when people collaborate. Being a Swiss knife is good, but there isn't a leader in history that leads no one. There was no human on this planet can exist by themself.

  • Avoid unnecessary communication, leave others room to organize themselves.

  • If you are angry, stand up and walk. Take a break, be somewhere else for some time. When it is time, and you are calm, come back.

  • Smile, stand up, walk, be kind and love yourself, and respect yourself.

  • Know yourself!

  • Spend time to understand things if you want to be an expert in the topic. Do not worry about how much it takes, but worry if you don't understand.

  • You will be exposed to noise most of the time in your communications. Improve your filters to extract useful information.

  • Do it now if you can. Tomorrow will arrive with new tasks.

  • The work is not complete until it is complete. Don't be handwavy. Ensure that you provide a working solution (not an \"it can work easily in the next step\" solution).

  • Research impact means the beneficial application of expertise, knowledge, analysis or discovery. It can also be described as an effect on change or benefit to the economy, society, culture, public policy or services, health, the environment or quality of life beyond academia.

  • Build it. They will come.

"},{"location":"lectures/","title":"A secret page","text":"

Aha! You found a secret page.

Pssst, if Kaan told you this, click this link!

"},{"location":"outreach/","title":"Outreach","text":""},{"location":"outreach/#research-hub","title":"Research Hub","text":"

Info

We started a public Slack group dedicated to scientists researching computer graphics, human visual perception, computational photography and computational displays. We aim to build a single hub for everyone and provide all members with a gateway to:

  • meet others in the field,
  • find collaborators worldwide,
  • introduce open-source tools for research,
  • announce and plan events in major conferences (e.g., SIGGRAPH, CVPR, IEEE VR, SPIE PW),
  • advertise opportunities for others (e.g., internships, jobs, initiatives, grants),
  • promote their most recent research,
  • find subjects for their experiments,

But most of all, the primary goal is to stay connected to sustain a healthy research field. To join our Slack channel and contribute to future conversations, please use the provided below link:

Subscribe to our Slack

Please do not hesitate to share the invitation link with other people in your field. If you encounter any issue with the link, please do not hesitate to reach us using kaanaksit@kaanaksit.com.

"},{"location":"outreach/#seminars","title":"Seminars","text":"

We organize a seminar series named High-Beams. High-Beams seminar series is an exclusive event where we host experts across the industry and academia. Overall, seminars are a blend of internal and external presenters.

Question

If you are wondering how to get an invitation to the next seminar series, please do not hesitate to email Kaan Ak\u015fit or subscribe yourself to our mailing list (open to public).

Subscribe to our mailing list

"},{"location":"outreach/#2024","title":"2024","text":"

These seminars are organized by Kaan Ak\u015fit.

"},{"location":"outreach/#manu-gopakumar-stanford-university","title":"Manu Gopakumar (Stanford University)","text":"Details

Date: 16th October 2024

Presenter: Manu Gopakumar, Ph.D. Candidate, Stanford University

Title: Full-color 3D holographic augmented reality displays with metasurface waveguides

Watch: Recording (Password protected)

"},{"location":"outreach/#guosheng-hu-university-of-bristol","title":"Guosheng Hu (University of Bristol)","text":"Details

Date: 10th October 2024

Presenter: Guosheng Hu, Senior Lecturer, University of Bristol

Title: Reduce AI\u2019s Carbon Footprint

Watch: Recording (Password protected)

"},{"location":"outreach/#binglun-wang-university-college-london","title":"Binglun Wang (University College London)","text":"Details

Date: 2nd October 2024

Presenter: Binglun Wang, Ph.D. candidate at University College London

Title: 3D Editings using Diffusion Models

Watch: Recording (Password protected)

"},{"location":"outreach/#henry-fuchs-university-of-north-carolina-at-chapel-hill","title":"Henry Fuchs (University of North Carolina at Chapel Hill)","text":"Details

Date: 20th June 2024

Presenter: Henry Fuchs, Professor at the University of North Carolina at Chapel Hill

Title: Everyday Augmented Reality Glasses: Past Predictions, Present Problems, Future Possibilities

Watch: Not recorded

"},{"location":"outreach/#zian-wang-university-of-toronto-and-nvidia","title":"Zian Wang (University of Toronto and NVIDIA)","text":"Details

Date: 24th April 2024

Presenter: Zian Wang, PhD student at the University of Toronto

Title: Hybrid Rendering: Bridging Volumetric and Surface Representations for Efficient 3D Content Modeling

Watch: Recording (Password protected)

"},{"location":"outreach/#litu-rout-the-university-of-texas-austin","title":"Litu Rout (The University of Texas Austin)","text":"Details

Date: 10th April 2024

Presenter: Litu Rout, PhD student at the University of Texas, Austin

Title: On Solving Inverse Problems using Latent Diffusion

Watch: Recording (Password protected)

"},{"location":"outreach/#yingsi-qin-carnegie-mellon-university","title":"Yingsi Qin (Carnegie Mellon University)","text":"Details

Date: 3rd April 2024

Presenter: Yingsi Qin, PhD Candidate at Carnegie Mellon University

Title: Split-Lohmann Multifocal Displays

Watch: Recording (Password protected)

"},{"location":"outreach/#seung-hwan-baek-postech","title":"Seung-Hwan Baek (Postech)","text":"Details

Date: 20th March 2024

Presenter: Seung-Hwan Baek, Assistant Professor at POSTECH

Title: High-dimensional Visual Computing

Watch: Recording (Password protected)

"},{"location":"outreach/#divya-kothandaraman-university-of-maryland-college-park","title":"Divya Kothandaraman (University of Maryland College Park)","text":"Details

Date: 13th March 2024

Presenter: Divya Kothandaraman, PhD student at the University of Maryland College Park

Title: Text Controlled Aerial-View Synthesis from a Single Image using Diffusion Models

Watch: Recording (Password protected)

"},{"location":"outreach/#cheng-zheng-massachusetts-institute-of-technology","title":"Cheng Zheng (Massachusetts Institute of Technology)","text":"Details

Date: 6th March 2024

Presenter: Cheng Zheng, PhD student at Massachusetts

Title: Neural Lithography: Close the Design to Manufacturing Gap in Computational Optics

Watch: Recording (Password protected)

"},{"location":"outreach/#taimoor-tariq-universita-della-svizzera-italiana","title":"Taimoor Tariq (Universit\u00e0 della Svizzera Italiana)","text":"Details

Date: 28th February 2024

Presenter: Taimoor Tariq, PhD student at Universit\u00e0 della Svizzera Italiana

Title: Its all in the Eyes: Towards Perceptually Optimized Real-Time VR

Watch: Recording (Password protected)

"},{"location":"outreach/#mose-sakashita-cornell-university","title":"Mose Sakashita (Cornell University)","text":"Details

Date: 21st February 2024

Presenter: Mose Sakashita, PhD student at Cornell University

Title: Enhancing Remote Design Collaboration through Motion-Controlled Telepresence Robots

Watch: Recording (Password protected)

"},{"location":"outreach/#ruoshi-liu-columbia-university","title":"Ruoshi Liu (Columbia University)","text":"Details

Date: 14th February 2024

Presenter: Ruoshi Liu, PhD student at Columbia University

Title: Neural Network Inversion for Imaging, Vision, Robotics, and Beyond

Watch: Recording (Password protected)

"},{"location":"outreach/#madalina-nicolae-saarland-university-and-polytechnic-institute-of-paris","title":"Madalina Nicolae (Saarland University and Polytechnic Institute of Paris)","text":"Details

Date: 7th February 2024

Presenter: Madalina Nicolae, PhD student at Saarland University and Polytechnic Institute of Paris

Title: Towards Digital Biofabrication and Sustainable Innovation

Watch: Recording (Password protected)

"},{"location":"outreach/#2023","title":"2023","text":"

These seminars are organized by Kaan Ak\u015fit. Simon Julier invited Stephen Ellis and moderated the session.

"},{"location":"outreach/#daiseku-iwai-osaka-university","title":"Daiseku Iwai (Osaka University)","text":"Details

Date: 29th November 2023

Presenter: Daisuke Iwai, Associate Professor at Osaka University

Title: Computational displays in projection mapping

Watch: Recording (Password protected)

"},{"location":"outreach/#lior-yariv-weizmann-institute-of-science","title":"Lior Yariv (Weizmann Institute of Science)","text":"Details

Date: 22nd November 2023

Presenter: Lior Yariv, PhD student at Weizmann Institute of Science

Title: MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation

Watch: Recording (Password protected)

"},{"location":"outreach/#ziya-erkoc-technical-university-of-munich","title":"Ziya Erko\u00e7 (Technical University of Munich)","text":"Details

Date: 15th November 2023

Presenter: Ziya Erko\u00e7, PhD student at the Technical University of Munich

Title: Generative Modeling with Neural Field Weights

Watch: Recording (Password protected)

"},{"location":"outreach/#guillaume-thekkadath-national-research-council-of-canada","title":"Guillaume Thekkadath (National Research Council of Canada)","text":"Details

Date: 8th November 2023

Presenter: Guillaume Thekkadath, Postdoctoral Fellow at National Research Council of Canada

Title: Intensity correlation holography: applications in single photon and remote imaging

Watch: Recording (Password protected)

"},{"location":"outreach/#kenan-bektas-university-of-saint-gallen","title":"Kenan Bektas (University of Saint Gallen)","text":"Details

Date: 1st November 2023

Presenter: Kenan Bektas, Postdoctoral Researcher at the University of Saint Gallen

Title: Gaze-Enabled Mixed Reality for Human Augmentation in Ubiquitous Computing Environments

Watch: Recording (Password protected)

"},{"location":"outreach/#koya-narumi-the-university-of-tokyo","title":"Koya Narumi (The University of Tokyo)","text":"Details

Date: 25th October 2023

Presenter: Koya Narumi, Assistant Professor at the University of Tokyo

Title: Computational Origami Fabrication

Watch: Recording (Password protected)

"},{"location":"outreach/#stephen-ellis-nasa","title":"Stephen Ellis (NASA)","text":"Details

Date: 11th October 2023

Presenter: Stephen Ellis

Title: Complexity -complicated

Watch: N/A

"},{"location":"outreach/#simeng-qiu-king-abdullah-university-of-science-and-technology","title":"Simeng Qiu (King Abdullah University of Science and Technology)","text":"Details

Date: 4th October 2023

Presenter: Simeng Qiu, PhD Candidate at King Abdullah University of Science and Technology

Title: MoireTag: Angular Measurement and Tracking with a Passive Marker

Watch: Recording (Password protected)

"},{"location":"outreach/#suyeon-choi-stanford-university","title":"Suyeon Choi (Stanford University)","text":"Details

Date: 27th September 2023

Presenter: Suyeon Choi, PhD student at Stanford University

Title: Neural Holography for Next-generation Virtual and Augmented Reality Displays

Watch: Recording (Password protected)

"},{"location":"outreach/#ulugbek-kamilov-university-of-washington-in-saint-louis","title":"Ulugbek Kamilov (University of Washington in Saint Louis)","text":"Details

Date: 20th September 2023

Presenter: Ulugbek Kamilov, Associate Professor of Electrical & Systems Engineering and Computer Science & Engineering at Washington University in St. Louis

Title: Plug-and-Play Models for Large-Scale Computational Imaging

Watch: Recording (Password protected)

"},{"location":"outreach/#shariq-bhat-king-abdullah-university-of-science-and-technology","title":"Shariq Bhat (King Abdullah University of Science and Technology)","text":"Details

Date: 13th September 2023

Presenter: Shariq Bhat, PhD Student at King Abdullah University of Science and Technology

Title: A Journey Towards State-of-the-art Monocular Depth Estimation Using Adaptive Bins

Watch: Recording (Password protected)

"},{"location":"outreach/#congli-wang-university-of-california-berkeley","title":"Congli Wang (University of California, Berkeley)","text":"Details

Date: 6th September 2023

Presenter: Congli Wang, Postdoctoral Researcher at University of California, Berkeley

Title: Computational sensing with intelligent optical instruments

Watch: Recording (Password protected)

"},{"location":"outreach/#silvia-sellan-university-of-toronto","title":"Silvia Sell\u00e1n (University of Toronto)","text":"Details

Date: 14th June 2023

Presenter: Silvia Sell\u00e1n, PhD student at University Toronto

Title: Uncertain Surface Reconstruction

Watch: Recording (Password protected)

"},{"location":"outreach/#omer-shapira-nvidia","title":"Omer Shapira (NVIDIA)","text":"Details

Date: 26th May 2023

Presenter: Omer Shapira, Engineer and Researcher at NVIDIA

Title: Cloud Computing Around the Body: Theoretical Limits and Practical Applications

Watch: Recording (Password protected)

"},{"location":"outreach/#michael-fischer-university-college-london","title":"Michael Fischer (University College London)","text":"Details

Date: 17th May 2023

Presenter: Michael Fischer, PhD student at University College London

Title: Advanced Machine Learning for Rendering

Watch: Recording (Password protected)

"},{"location":"outreach/#michael-proulx-meta-reality-labs-and-university-of-bath","title":"Michael Proulx (Meta Reality Labs and University of Bath)","text":"Details

Date: 3rd May 2023

Presenter: Michael Proulx, Reader at University of Bath and Research Scientist at Meta Reality Labs

Title: Visual interactions in Extended Reality

Watch: Recording (Password protected)

"},{"location":"outreach/#inci-ayhan-bogazici-university","title":"\u0130nci Ayhan (Bogazici University)","text":"Details

Date: 26th April 2023

Presenter: \u0130nci Ayhan, Associate Professor at Bogazici University

Title: Cognitive Embodiment and Affordance Perception in the Virtual Reality Environment

Watch: Recording (Password protected)

"},{"location":"outreach/#zerrin-yumak-utrecht-university","title":"Zerrin Yumak (Utrecht University)","text":"Details

Date: 12th April 2023

Presenter: Zerrin Yumak, Assistant Professor at Utrecht University

Title: AI-driven Virtual Humans with Non-verbal Communication Skills

Watch: Recording (Password protected)

"},{"location":"outreach/#elia-gatti-university-college-london","title":"Elia Gatti (University College London)","text":"Details

Date: 5th April 2023

Presenter: Elia Gatti, Assistant Professor at University College London

Title: AI-driven Virtual Humans with Non-verbal Communication Skills

Watch: Recording (Password protected)

"},{"location":"outreach/#yuhao-zhu-university-of-rochester","title":"Yuhao Zhu (University of Rochester)","text":"Details

Date: 29th March 2023

Presenter: Yuhao Zhu, University of Rochester

Title: Rethinking Imaging-Computing Interface

Watch: Recording (Password protected)

"},{"location":"outreach/#taejun-kim-kaist","title":"Taejun Kim (KAIST)","text":"Details

Date: 22nd March 2023

Presenter: Taejun Kim, PhD Student at KAIST

Title: Interface Control with Eye Movement

Watch: Recording (Password protected)

"},{"location":"outreach/#josef-spjut-nvidia","title":"Josef Spjut (NVIDIA)","text":"Details

Date: 15th March 2023

Presenter: Josef Spjut, Senior Research Scientist at NVIDIA

Title: Esports Rendering and Display: Psychophysical Experimentation

Watch: Recording (Password protected)

"},{"location":"outreach/#ruth-rosenholtz-massachusetts-institute-of-technology","title":"Ruth Rosenholtz (Massachusetts Institute of Technology)","text":"Details

Date: 1st March 2023

Presenter: Ruth Rosenholtz, Principal Research Scientist at Massachusetts Institute of Technology

Title: Human vision at a glance

Watch: Recording (Password protected)

"},{"location":"outreach/#qi-sun-nyu","title":"Qi Sun (NYU)","text":"Details

Date: 21st February 2023

Presenter: Qi Sun, Assistant Professor at NYU

Title: Co-Optimizing Human-System Performance in VR/AR

Watch: Recording (Password protected)

"},{"location":"outreach/#towaki-takikawa-nvidia","title":"Towaki Takikawa (NVIDIA)","text":"Details

Date: 8th February 2023

Presenter: Towaki Takikawa, Research Scientist at NVIDIA

Title: Towards Volumetric Multimedia Compression and Transport with Neural Fields

Watch: Recording (Password protected)

"},{"location":"outreach/#2022","title":"2022","text":"

The seminar series of 2022 is conducted with the help of several key people at University College London. Many of these seminars are coordinated by Kaan Ak\u015fit. Kaan has received help from Simon Julier, Oliver Kingshott, Klara Brandst\u00e4tter, and Felix Thiel for the moderation and organization of several of these events.

"},{"location":"outreach/#ernst-kruijff-bonn-rhein-sieg-university-of-applied-sciences","title":"Ernst Kruijff (Bonn-Rhein-Sieg University of Applied Sciences)","text":"Details

Date: 29th November 2022

Presenter: Ernst Kruijff, Professor of Human Computer INteraction at Bonn-Rhein-Sieg University

Title: Multi-sensory feedback for 3D User Interfaces

Watch: Recording (Password protected)

"},{"location":"outreach/#aykut-erdem-koc-university","title":"Aykut Erdem (Ko\u00e7 University)","text":"Details

Date: 23th November 2022

Presenter: Aykut Erdem, Associate Professor at Ko\u00e7 University.

Title: Disentangling Content and Motion for Text-Based Neural Video Manipulation

Watch: Recording (Password protected)

"},{"location":"outreach/#gul-varol-ecole-des-ponts-paristech","title":"G\u00fcl Varol (\u00c9cole des Ponts ParisTech)","text":"Details

Date: 16th November 2022

Presenter: G\u00fcl Varol, Assistant Professor at \u00c9cole des Ponts ParisTech

Title: Controllable 3D human motion synthesis

Watch: Recording (Password protected)

"},{"location":"outreach/#ana-serrano-universidad-de-zaragoza","title":"Ana Serrano (Universidad de Zaragoza)","text":"Details

Date: 2nd November 2022

Presenter: Ana Serrano, Universidad de Zaragoza

Title: Material Appearance Perception and Applications

Watch: Recording (Password protected)

"},{"location":"outreach/#praneeth-chakravarthula-princenton-university","title":"Praneeth Chakravarthula (Princenton University)","text":"Details

Date: 27th October 2022

Presenter: Praneeth Chakravarthula, Research Scholar at Princenton University

Title: The Present Developments and Future Challenges of Holographic Near-Eye Displays

Watch: Recording (Password protected)

"},{"location":"outreach/#koki-nagano-nvidia","title":"Koki Nagano (NVIDIA)","text":"Details

Date: 12th October 2022

Presenter: Koki Nagano, Senior Research Scientist at NVIDIA

Title: Frontiers of Neural Human Synthesis

Watch: Recording (Password protected)

"},{"location":"outreach/#peter-wonka-king-abdullah-university-of-science-and-technology","title":"Peter Wonka (King Abdullah University of Science and Technology)","text":"Details

Date: 28th September 2022

Presenter: Peter Wonka, Computer Science at King Abdullah University of Science and Technology (KAUST) and Interim Director of the Visual Computing Center (VCC)

Title: Recent Research Efforts for Building 3D GANs

Watch: Recording (Password protected)

"},{"location":"outreach/#rob-lindeman-university-of-canterbury","title":"Rob Lindeman (University of Canterbury)","text":"Details

Date: 21st September 2022

Presenter: Rob Lindeman, Professor at the University of Canterbury

Title: Comfortable VR: Supporting Regular and Long-term Immersion

Watch: Recording (Password protected)

"},{"location":"outreach/#felix-heide-princenton-university","title":"Felix Heide (Princenton University)","text":"Details

Date: 7th September 2022

Presenter: Felix Heide, Assistant Professor at Princeton University and Co-Founder and Chief Technology Officer of self-driving vehicle startup Algolux

Title: Neural Nanophotonic Cameras

Watch: Recording (Password protected)

"},{"location":"outreach/#yulia-gryaditskaya-surrey-institute-for-people-centred-artifical-intelligence","title":"Yulia Gryaditskaya (Surrey Institute for People-Centred Artifical Intelligence)","text":"Details

Date: 1st June 2022

Presenter: Yulia Gryadistkaya, Assistant Professor at CVSSP and Surrey Institute for People-Centered Artifical Intelligence

Title: Amateur Sketches

Watch: Recording (Password protected)

"},{"location":"outreach/#michael-bauer-nvidia","title":"Michael Bauer (NVIDIA)","text":"Details

Date: 25th May 2022

Presenter: Michael Bauer, Principal Scientist at NVIDIA

Title: Running Unmodified NumPy Programs on Hundreds of GPUs with cuNumeric

Watch: Recording (Password protected)

"},{"location":"outreach/#mark-pauly-epfl","title":"Mark Pauly (EPFL)","text":"Details

Date: 18th May 2022

Presenter: Mark Pauly, Professor of Computer Graphics at \u00c9cole polytechnique f\u00e9d\u00e9rale de Lausanne

Title: Computational Inverse Design of Deployable Structures

Watch: Recording (Password protected)

"},{"location":"outreach/#tuanfeng-wang-adobe","title":"Tuanfeng Wang (Adobe)","text":"Details

Date: 11th May 2022

Presenter: Tuanfeng Wang, Research Scientist at Adobe

Title: Synthesizing dynamic human appearance

Watch: Recording (Password protected)

"},{"location":"outreach/#tim-weyrich-fau-and-ucl","title":"Tim Weyrich (FAU and UCL)","text":"Details

Date: 4th May 2022

Presenter: Tim Weyrich, Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg and Professor of Visual Computing at University College London

Title: Digital Reality: Visual Computing Interacting With The Real World

Watch: Recording (Password protected)

"},{"location":"outreach/#sanjeev-muralikrishnan-ucl","title":"Sanjeev Muralikrishnan (UCL)","text":"Details

Date: 27th April 2022

Presenter: Sanjeev Muralikrishnan, PhD student at UCL

Title: GLASS: Geometric Latent Augmentation For Shape Spaces

Watch: Recording (Password protected)

"},{"location":"outreach/#valentin-deschaintre-adobe","title":"Valentin Deschaintre (Adobe)","text":"Details

Date: 20th April 2022

Presenter: Valentin Deschaintre, Research Scientist at Adobe

Title: Material Creation for Virtual Environments

Watch: Recording (Password protected)

"},{"location":"outreach/#dan-archer-university-college-london-and-animesh-karnewar-university-college-london","title":"Dan Archer (University College London) and Animesh Karnewar (University College London)","text":"Details

Date: 23rd March 2022

Presenter:

  • Dan Archer, PhD Student at University College London
  • Animesh Karnewar, Phd Student at University College London

Title:

  • Optimizing Performance through Stress and Embodiment Levels in Virtual Reality Using Autonomic Responses
  • ReLU Fields: The Little Non-linearity That Could ...

Watch: Recording (Password protected)

"},{"location":"outreach/#oya-celiktutan-kings-college-london","title":"Oya Celiktutan (King's College London)","text":"Details

Date: 23rd March 2022

Presenter: Oya Celiktutan, Assistant Professor at King's College London

Title: Towards Building Socially Informed and Adaptive Robotic Systems

Watch: Recording (Password protected)

"},{"location":"outreach/#iuri-frosio-nvidia","title":"Iuri Frosio (NVIDIA)","text":"Details

Date: 17th March 2022

Presenter: Iuri Frosio, Principal Research Scientist at NVIDIA

Title: Research & videogames @ NVIDIA \u2013 the cases of saliency estimation and cheating prevention

Watch: Recording (Password protected)

"},{"location":"outreach/#avi-bar-zeev-realityprime","title":"Avi Bar-Zeev (RealityPrime)","text":"Details

Date: 9th March 2022

Presenter: Avi Bar-Zeev

Title: Beyond Meta - AR and the Road Ahead

Watch: Recording (Password protected)

"},{"location":"outreach/#vinoba-vinayagamoorthy-british-broadcasting-corporation","title":"Vinoba Vinayagamoorthy (British Broadcasting Corporation)","text":"Details

Date: 2nd March 2022

Presenter: Vinoba Vinayagamoorthy, Researcher at British Broadcasting Corporation

Title: Designing for the Future: Exploring the Impact of (Immersive) Experiences on BBC Audiences

Watch: Recording (Password protected)

"},{"location":"outreach/#lauria-waller-university-of-california-berkeley","title":"Lauria Waller (University of California, Berkeley)","text":"Details

Date: 23rd February 2022

Presenter: Laura Waller, Associate Professor, Department of Electrical Engineering and Computer Sciences, University of California, Berkeley

Title: Computational Microscopy

Watch: Recording (Password protected)

"},{"location":"outreach/#doga-dogan-massachusetts-institute-of-technology","title":"Do\u011fa Do\u011fan (Massachusetts Institute of Technology)","text":"Details

Date: 16th February 2022

Presenter: Do\u011fa Do\u011fan, Phd Candidate at Massachusetts Institute of Technology

Title: Unobtrusive Machine-Readable Tags for Seamless Ineractions with Real-World Objects

Watch: Recording (Password protected)

"},{"location":"outreach/#anthony-steed-university-college-london","title":"Anthony Steed (University College London)","text":"Details

Date: 2nd February 2022

Presenter: Anthony Steed, Professor at University College London

Title: So you want to build a Metaverse

Watch: Recording (Password protected)

"},{"location":"outreach/#2021","title":"2021","text":"

The seminar series of 2021 is conducted with the help of several key people at University College London. Many of these seminars are coordinated by Kaan Ak\u015fit. Kaan has received help from Klara Brandst\u00e4tter, Felix Thiel, Oliver Kingshott, Tobias Ritschel, Tim Weyrich and Anthony Steed for moderation and organization of several of these events.

"},{"location":"outreach/#sebastian-friston-university-college-london","title":"Sebastian Friston (University College London)","text":"Details

Date: 24th November 2021

Presenter: Sebastian Friston, Research Associate at University College London

Title: Ubiq

Watch: Recording (Password protected)

"},{"location":"outreach/#wolfgang-sturzlinger-simon-fraser-university","title":"Wolfgang St\u00fcrzlinger (Simon Fraser University)","text":"Details

Date: 17th November 2021

Presenter: Wolfgang St\u00fcrzlinger, Professor at Simon Fraser University

Title: Current Challenges and Solutions for Virtual and Augmented Reality

Watch: Recording (Password protected)

"},{"location":"outreach/#nels-numan-university-college-london-and-koray-kavakl-koc-university","title":"Nels Numan (University College London) and Koray Kavakl\u0131 (Ko\u00e7 University)","text":"Details

Date: 10th November 2021

Presenters:

  • Koray Kavakl\u0131, MSc student at Ko\u00e7 University
  • Nels Numan, PhD student at University College London

Title:

  • Learned Holographic Light Transport
  • Asymmetric Collaborative Mixed Reality

Watch: Recording (Password protected)

"},{"location":"outreach/#david-swapp-university-college-london","title":"David Swapp (University College London)","text":"Details

Date: 3th November 2021

Presenters: David Swapp, PhDSenior Research Fellow at University College London

Title: Who are VR systems designed for?

Watch: Recording (Password protected)

"},{"location":"outreach/#katharina-krosl-vrvis-zentrum-fur-virtual-reality-and-visualisierung","title":"Katharina Kr\u00f6sl (VRVis Zentrum f\u00fcr Virtual Reality and Visualisierung)","text":"Details

Date: 20th October 2021

Presenters: Katharina Kr\u00f6sl, Researcher at VRVis Zentrum f\u00fcr Virtual Reality und Visualisierung

Title: Simulating Vision Impairments in XR

Watch: Recording (Password protected)

"},{"location":"outreach/#morgan-mcguire-roblox","title":"Morgan Mcguire (Roblox)","text":"Details

Date: 14th October 2021

Presenters: Morgan Mcguire, Chief Scientist at Roblox

Title: Metaverse Research

Watch: Recording (Password protected)

"},{"location":"outreach/#wenzel-jakob-ecole-polytechnique-federale-de-lausanne","title":"Wenzel Jakob (\u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne)","text":"Details

Date: 6th October 2021

Presenters: Wenzel Jakob, Assistant Professor at \u00c9cole Polytechnique F\u00e9d\u00e9rale de Lausanne

Title: Differentiable Simulation of Light

Watch: Recording (Password protected)

"},{"location":"outreach/#gordon-wetzstein-stanford-university","title":"Gordon Wetzstein (Stanford University)","text":"Details

Date: 29th September 2021

Presenters: Gordon Wetzstein, Associate Professor at Stanford University

Title: Towards Neural Signal Processing and Imaging

Watch: Recording (Password protected)

"},{"location":"outreach/#anjul-patney-nvidia","title":"Anjul Patney (NVIDIA)","text":"Details

Date: 22nd September 2021

Presenters: Anjul Patney, Principal Scientist at NVIDIA

Title: Peripheral Perception & Pixels

Watch: Recording (Password protected)

"},{"location":"outreach/#douglas-lanman-facebook","title":"Douglas Lanman (Facebook)","text":"Details

Date: 15th September 2021

Presenters: Douglas Lanman, Director of Display Systems Research at Facebook Reality Labs, Affiliate Instructor at University of Washington

Title: How to Pass the Visual Turing Test with AR/VR Displays

Watch: Recording (Password protected)

"},{"location":"outreach/#sylvia-xueni-pan-gold-smiths-university-of-london","title":"Sylvia Xueni Pan (Gold Smiths, University of London)","text":"Details

Date: 8th September 2021

Presenters: Sylvia Xueni Pan, Lecturer in Graphics, Gold Smiths, University of London

Title: Virtual Social Interaction in VR

Watch: Recording (Password protected)

"},{"location":"outreach/#duygu-ceylan-adobe","title":"Duygu Ceylan (Adobe)","text":"Details

Date: 28th July 2021

Presenters: Duygu Ceylan, Senior Research Scientist, Adobe

Title: Neural Dynamic Characters

Watch: Recording (Password protected)

"},{"location":"outreach/#oliver-kingshott-and-michael-fischer-university-college-london","title":"Oliver Kingshott and Michael Fischer (University College London)","text":"Details

Date: 21th July 2021

Presenters:

  • Oliver Kingshott, MSc student at University College London
  • Michael Fischer, PhD student at University College London

Title:

  • Lensless Learning
  • Learning to Overfit

Watch: Recording (Password protected)

"},{"location":"outreach/#yuta-itoh-tokyo-institute-of-technology","title":"Yuta Itoh (Tokyo Institute of Technology)","text":"Details

Date: 14th July 2021

Presenters: Yuta Itoh, Project Associate Professor at the University of Tokyo

Title: Vision Augmentation: overwriting our visual world via computation

Watch: Recording (Password protected)

"},{"location":"outreach/#kaan-aksit-university-college-london","title":"Kaan Ak\u015fit (University College London)","text":"Details

Date: 7th July 2021

Presenters: Kaan Ak\u015fit, Associate Professor at University College London

Title: Towards remote pixelless displays

Watch: Recording (Password protected)

"},{"location":"outreach/#cengiz-oztireli-university-of-cambridge-google","title":"Cengiz \u00d6ztireli (University of Cambridge, Google)","text":"Details

Date: 28th June 2021

Presenters: Cengiz \u00d6ztireli, Associate Professor at University of Cambridge, Senior Researcher at Google

Title: 3D Digital Reality - Modeling for Perception

Watch: Recording (Password protected)

"},{"location":"outreach/#paul-linton-city-university-of-london","title":"Paul Linton (City, University of London)","text":"Details

Date: 23rd June 2021

Presenters: Paul Linton, Research Fellow, Centre for Applied Vision Research, City, University of London

Title: Size and Distance Perception for Virtual Reality

Watch: Recording (Password protected)

"},{"location":"outreach/#luca-morreale-and-lisa-izzouzi-university-college-london","title":"Luca Morreale and Lisa Izzouzi (University College London)","text":"Details

Date: 16th June 2021

Presenters:

  • Luca Morreale, PhD student at University College London
  • Lisa Izzouzi, Phd student at University College London

Title: - Interpretable Neural Surface Maps - Meaningful meetups in Virtual Reality

Watch: Recording (Password protected)

"},{"location":"outreach/#rafa-mantiuk-cambridge-university","title":"Rafa\u0142 Mantiuk (Cambridge University)","text":"Details

Date: 9th June 2021

Presenter: Rafa\u0142 Mantiuk, Reader in Graphics and Displays at the University of Cambridge

Title: Modelling the quality of high frame-rate graphics for adaptive refresh rate and resolution

Watch: Recording (Password protected)

"},{"location":"outreach/#peter-shirley-nvidia","title":"Peter Shirley (NVIDIA)","text":"Details

Date: 2nd June 2021

Presenter: Peter Shirley, Distinguished Research Scientist at NVIDIA

Title: A tour of the rapidly moving target of computer graphics

Watch: Recording (Password protected)

"},{"location":"outreach/#david-walton-and-rafel-kuffner-dos-anjos-university-college-london","title":"David Walton and Rafel Kuffner dos Anjos (University College London)","text":"Details

Date: 26th May 2021

Presenters:

  • David Walton, Postdoctoral researcher at University College London
  • Rafael Kuffner dos Anjos, Postdoctoral researcher at University College London

Title:

  • Beyond Blur: Ventral Metamers for Foveated Rendering
  • Metameric Inpainting for Image Warping

Watch: Recording (Password protected)

"},{"location":"outreach/#tobias-ritschel-university-college-london","title":"Tobias Ritschel (University College London)","text":"Details

Date: 19th May 2021

Presenters: Tobias Ritschel, Professor of Computer Graphics at University College London

Title: Blue noise plots

Watch: Not recorded

"},{"location":"outreach/#philip-henzler-and-david-griffiths-university-college-london","title":"Philip Henzler and David Griffiths (University College London)","text":"Details

Date: 12th May 2021

Presenters:

  • Philip Henzler, PhD student at University College London
  • David Griffiths, PhD student at University College London

Title:

  • Generative Modelling of BRDF Textures from Flash Images
  • 3D object detection without scene labels

Watch: Recording (Password protected)

"},{"location":"outreach/#klara-brandstatter-and-felix-thiel-university-college-london","title":"Klara Brandst\u00e4tter and Felix Thiel (University College London)","text":"Details

Date: 5th May 2021

Presenters:

  • Klara Brandst\u00e4tter, PhD student at University College London
  • Felix Thiel, PhD student at University College London

Title:

  • Creating Lively Interactive Populated Environments
  • You have control. I have control

Watch: Recording (Password protected)

"},{"location":"outreach/#victoria-rege-and-alex-titterton-graphcore","title":"Victoria Rege and Alex Titterton (Graphcore)","text":"Details

Date: 28th April 2021

Presenters:

  • Victoria Rege, Director, Alliances & Strategic Partnerships at Graphcore
  • Alex Titterton, Field Engineer at Graphcore (and former CERN Physicist)

Title: Next in Machine Intelligence

Watch: Recording (Password protected)

"},{"location":"people/","title":"People","text":"

This page will give you a complete of our current members. At the end of the page, you will also find our alumni list as a separate section.

"},{"location":"people/#current-members","title":"Current members","text":"

All our current members are located in 169 Euston Road, London NW1 2AE, United Kingdom.

"},{"location":"people/#faculty","title":"Faculty","text":"

Kaan Ak\u015fit

Associate Professor of Computational Light

E-mail

Office: R409

"},{"location":"people/#doctoral-students","title":"Doctoral students","text":"

Yicheng Zhan

Ph.D. Student

E-mail

Office: R404.188

Ziyang Chen

Ph.D. Student

E-mail

Office: R404.187

"},{"location":"people/#interns","title":"Interns","text":"

Henry Kam

MSc Student (New York University)

E-mail

Office: Virtual

Weijie Xie

MSc Student (University College London)

E-mail

Office: Virtual

Chuanjun Zheng

MSc Student (Shenzhen University)

E-mail

Office: Virtual

Koray Kavakl\u0131

Ph.D. Student (Ko\u00e7 University)

E-mail

Office: Virtual

"},{"location":"people/#alumni","title":"Alumni","text":""},{"location":"people/#post-doctoral-researchers","title":"Post-Doctoral Researchers","text":"
  • David Robert Walton, Investigation on perceptually guided display technology, 2021-2022, Next: Lecturer at Birmingham City University.
"},{"location":"people/#master-students","title":"Master Students","text":"
  • Do\u011fa Y\u0131lmaz, Learned Single-Pass Multitasking Perceptual Graphics for Immersive Displays, 2024, Next: Ph.D. student at University College London.
  • Weijie Xie, Learned Method for Computer Generated Hologram, 2024, Next: Intern Researcher at University College London.
  • Pengze Li, Text to hologram, 2024, Next: -.
  • Ziyang Chen, Speckle imaging with a lensless camera, 2023, Next: Ph.D. student at University College London.
  • Jeanne Beyazian, Hologram Compression, 2022, Next: Computer Vision Developer at Glimpse Analytics.
  • Yilin Qu, Predicting Next Frames of a RGBD video, 2022, Next: Machine Learning Software Engineer at Qualcomm Incorporated.
  • Gbemisola Akinola-Alli, Differentiable Ray Tracing for Designing Optical Parts, 2022, Next: Senior Engineer at MBDA.
  • Oliver Kingshott, Learned Point-spread Functions for Lensless Imaging, 2021, Next: Ph.D. Student at University College London.
  • Koray Kavakl\u0131, Towards Improving Visual Quality in Computer-Generated Holography, 2021, Next: Ph.D. Student at Ko\u00e7 University.
  • Chengkun Li, Neural Optical Beam Propagation, 2021, Next: Ph.D. student at the Chinese University of Hong Kong.
  • Yuze Yang, Learned 3D Representations: Point Cloud, Depth Maps and Holograms, 2021, Next: -.
"},{"location":"people/#research-interns","title":"Research Interns","text":"
  • Ahmet Hamdi G\u00fczel,Perceptual Prescription Correction, 2022-2024, Next: Ph.D. Student at University College London.
  • Yichen Zou, 3D Dataset generation, 2022, Next: Graduate Student at McGill Univesity.
  • Nerea Sainz De La Maza, Printable camera casing design, 2022, Next: Bachelor of Science at University College London.
  • Kerem Ero\u011flu, Embedding data to images, 2022, Next: MEng at University College London.
  • Serhat Aksoy, Volume rendering tool, 2022, Next: Bachelor of Science at Istanbul Technical University.
  • Debosmit Neogi, Compressing RGBD data, 2022, Next: Master of Science at University at Buffalo.
  • Josh Kaizer, as a part of In2Science UK programme, 2022, Next: -.
  • Abubakar Sharif, as a part of In2Science UK programme, 2022, Next: -.
"},{"location":"publications/","title":"Publications","text":""},{"location":"publications/#2024","title":"2024","text":"

Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions

Chuanjun Zheng, Yicheng Zhan, Liang Shi, Ozan Cakmakci, and Kaan Ak\u015fit

Project site Manuscript Supplementary Code

Bibtex
    @inproceedings{zheng2024focalholography,\n      title={Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions},\n      author={Chuanjun Zheng, Yicheng Zhan, Liang Shi, Ozan Cakmakci, and Kaan Ak{\\c{s}}it},\n      booktitle = {SIGGRAPH Asia 2024 Technical Communications (SA Technical Communications '24)},\n      keywords = {Computer-Generated Holography, Light Transport, Optimization},\n      location = {Tokyo, Japan},\n      series = {SA '24},\n      month={December},\n      year={2024},\n      doi={https://doi.org/10.1145/3681758.3697989}\n    }\n

SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging

Ziyang Chen, Mustafa Do\u011fa Do\u011fan, Josef Spjut, and Kaan Ak\u015fit

Project site Manuscript Poster Code Project video

Bibtex
    @inproceedings{chen2024spectrack,\n      author = {Ziyang Chen and Mustafa Dogan and Josef Spjut and Kaan Ak{\\c{s}}it},\n      title = {SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging},\n      booktitle = {SIGGRAPH Asia 2024 Posters (SA Posters '24)},\n      year = {2024},\n      location = {Tokyo, Japan},\n      publisher = {ACM},\n      address = {New York, NY, USA},\n      pages = {2},\n      doi = {10.1145/3681756.3697875},\n      url = {https://doi.org/10.1145/3681756.3697875},\n      month = {December 03--06}\n    }\n

All-optical image denoising using a diffractive visual processor

\u00c7a\u011fatay I\u015f\u0131l, Tianyi Gan, Fazil Onuralp, Koray Mentesoglu, Jagrit Digani, Huseyin Karaca, Hanlong Chen, Jingxi Li, Deniz Mengu, Mona Jarrahi, Kaan Ak\u015fit, and Ozcan Aydogan

Publisher site Manuscript

Bibtex
    @article{I\u015f\u0131l2024,\n      author = {I{\\c{s}}{\\i}l, {\\c{C}}a{\\u{g}}atay and Gan, Tianyi and Ardic, Fazil Onuralp and Mentesoglu, Koray and Digani, Jagrit and Karaca, Huseyin and Chen, Hanlong and Li, Jingxi and Mengu, Deniz and Jarrahi, Mona and Ak{\\c{s}}it, Kaan and Ozcan, Aydogan},\n      title = {All-optical image denoising using a diffractive visual processor},\n      journal = {Light: Science {\\&} Applications},\n      year = {2024},\n      month = feb,\n      day = {04},\n      volume = {13},\n      number = {1},\n      pages = {43},\n      issn = {2047-7538},\n      doi = {10.1038/s41377-024-01385-6},\n      url = {https://doi.org/10.1038/s41377-024-01385-6}\n   }\n

Autocolor: Learned Light Power Control for Multi-Color Holograms

Yicheng Zhan, Hakan Urey, Qi Sun, and Kaan Ak\u015fit

Project site Manuscript Code

Bibtex
    @article{zhan2023autocolor,\n      title = {AutoColor: Learned Light Power Control for Multi-Color Holograms},\n      author = {Zhan, Yicheng and Sun, Qi and Ak\u015fit, Kaan},\n      journal  = \"arxiv\",\n      year = {2023},\n      month = may,\n    }\n

"},{"location":"publications/#2023","title":"2023","text":"

Multi-color Holograms Improve Brightnes in Holographic Displays

Koray Kavakl\u0131, Liang Shi, Hakan Urey, Wojciech Matusik, and Kaan Ak\u015fit

Project site Manuscript Code Project video

Bibtex
    @inproceedings{kavakli2023multicolor,\n      title={Multi-color Holograms Improve Brightness in Holographic Displays},\n      author={Kavakl\u0131, Koray and Shi, Liang and Urey, Hakan and Matusik, Wojciech and Ak\u015fit, Kaan},\n      booktitle = {SIGGRAPH Asia 2023 Conference Papers},\n      articleno = {20},\n      numpages = {11},\n      keywords = {Brightness, Computer-generated holography, Holographic displays},\n      location = {Sydney, NSW, Australia},\n      series = {SA '23},\n      month={December},\n      year={2023},\n      doi={https://doi.org/10.1145/3610548.3618135}\n    }\n

ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance

Ahmet G\u00fczel, Jeanne Beyazian, Praneeth Chakravarthula, and Kaan Ak\u015fit

Project site Manuscript Code Project video

Bibtex
    @ARTICLE{guzel2022prescription,\n      title    = \"ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance\",\n      author   = \"G\u00fczel, Ahmet and Beyazian, Jeanne and Chakravarthula, Praneeth and Ak\u015fit, Kaan\",\n      journal  = \"Biomedical Optics Express\",\n      month    =  jan,\n      year     =  2023,\n    }\n

HoloBeam: Paper-Thin Near-Eye Displays

Kaan Ak\u015fit and Yuta Itoh

Project site Manuscript Code

Bibtex
    @ARTICLE{aksit2022holobeam,\n      title    = \"HoloBeam: Paper-Thin Near-Eye Displays\",\n      author   = \"Ak\u015fit, Kaan and Itoh, Yuta\",\n      journal  = \"IEEE VR 2023\",\n      month    =  Mar,\n      year     =  2023,\n    }\n

Realistic Defocus Blur for Multiplane Computer-Generated Holography

Koray Kavakl\u0131, Yuta Itoh, Hakan Urey and Kaan Ak\u015fit

Project site Manuscript Project video Code

Bibtex
    @misc{kavakli2022realisticdefocus,\n      doi = {10.48550/ARXIV.2205.07030},\n      url = {https://arxiv.org/abs/2205.07030},\n      author = {Kavakl\u0131, Koray and Itoh, Yuta and Urey, Hakan and Ak\u015fit, Kaan},\n      keywords = {Computer Vision and Pattern Recognition (cs.CV), Graphics (cs.GR), FOS: Computer and information sciences, FOS: Computer and information sciences, I.3.3},\n      title = {Realistic Defocus Blur for Multiplane Computer-Generated Holography},\n      publisher = {IEEE VR 2023},\n      month = {Mar},\n      year = {2023},\n      copyright = {Creative Commons Attribution Non Commercial No Derivatives 4.0 International}\n    }\n

"},{"location":"publications/#2022","title":"2022","text":"

Metameric Inpainting for Image Warping

Rafael Kuffner Dos Anjos, David R. Walton, Kaan Ak\u015fit, Sebastian Friston, David Swapp, Anthony Steed and Tobias Ritschel

Publisher site Manuscript

Bibtex
    @ARTICLE{Kuffner_Dos_Anjos2022-hm,\n        title    = \"Metameric inpainting for image warping\",\n        author   = \"Kuffner Dos Anjos, Rafael and Walton, David R and Ak\u015fit, Kaan and\n                    Friston, Sebastian and Swapp, David and Steed, Anthony and\n                    Ritschel, Tobias\",\n        journal  = \"IEEE Trans. Vis. Comput. Graph.\",\n        volume   = \"PP\",\n        month    =  oct,\n        year     =  2022,\n    }\n

Optimizing vision and visuals: lectures on cameras, displays and perception

Koray Kavakl\u0131, David Robert Walton, Nick Antipa, Rafa\u0142 Mantiuk, Douglas Lanman and Kaan Ak\u015fit

Project site Publisher site Manuscript Project video Code

Bibtex
    @incollection{kavakli2022optimizing,\n      title = {Optimizing vision and visuals: lectures on cameras, displays and perception},\n      author = {Kavakl\u0131, Koray and Walton, David Robert and Antipa, Nick and Mantiuk, Rafa\u0142 and Lanman, Douglas and Ak{\\c{s}}it, Kaan},\n      booktitle = {ACM SIGGRAPH 2022 Courses},\n      pages = {1--66},\n      year = {2022},\n      doi = {https://doi.org/10.1145/3532720.3535650},\n      video = {https://youtu.be/z_AtSgct6_I},\n    }\n

Unrolled Primal-Dual Networks for Lensless Cameras

Oliver Kingshott, Nick Antipa, Emrah Bostan and Kaan Ak\u015fit

Manuscript Publisher site Supplementary Code

Bibtex
    @article{kingshott2022unrolled,\n       selected={true},\n       title={Unrolled Primal-Dual Networks for Lensless Cameras},\n       author={Kingshott, Oliver and Antipa, Nick and Bostan, Emrah and Ak\u015fit, Kaan},\n       journal={Optics Express},\n       year={2022},\n       doi={https://doi.org/10.48550/arXiv.2203.04353}\n    }\n

Metameric Varifocal Holograms

David R. Walton, Koray Kavakl\u0131, Rafael Kuffner Dos Anjos, David Swapp, Tim Weyrich, Hakan Urey, Anthony Steed, Tobias Ritschel and Kaan Ak\u015fit

Project site Manuscript Project video Code

Bibtex
    @article{walton2021metameric,\n             title={Metameric Varifocal Holography},\n             author={Walton, David R and Kavakl{\\i}, Koray and Anjos, Rafael Kuffner dos and Swapp, David and Weyrich, Tim and Urey, Hakan and Steed, Anthony and Ritschel, Tobias and Ak{\\c{s}}it, Kaan},\n             publisher = {IEEE VR},\n             month = {March},\n             year={2022}\n            }\n

Learned holographic light transport

Invited

Koray Kavakl\u0131, Hakan Urey and Kaan Ak\u015fit

Publisher site Manuscript Code Dataset

Bibtex
    @article{Kavakli:22,\n      author = {Koray Kavakl{i} and Hakan Urey and Kaan Ak\\c{s}it},\n      journal = {Appl. Opt.},\n      keywords = {Holographic displays; Holographic recording; Holographic techniques; Image quality; Image reconstruction; Visible light communications},\n      number = {5},\n      pages = {B50--B55},\n      publisher = {OSA},\n      title = {Learned holographic light transport: invited},\n      volume = {61},\n      month = {Feb},\n      year = {2022},\n      url = {http://www.osapublishing.org/ao/abstract.cfm?URI=ao-61-5-B50},\n      doi = {10.1364/AO.439401},\n    }\n

"},{"location":"publications/#2021","title":"2021","text":"

Telelife: the future of remote living

Jason Orlosky, Misha Sra, Kenan Bekta\u015f, Huaishu Peng, Jeeeun Kim, Nataliya Kosmyna, Tobias Hollerer, Anthony Steed, Kiyoshi Kiyokawa and Kaan Ak\u015fit

Publisher site Manuscript

Bibtex
@ARTICLE{10.3389/frvir.2021.763340,\nAUTHOR={Orlosky, Jason and Sra, Misha and Bekta\u015f, Kenan and Peng, Huaishu and Kim, Jeeeun and Kos\u2019myna, Nataliya and H\u00f6llerer, Tobias and Steed, Anthony and Kiyokawa, Kiyoshi and Ak\\c{s}it, Kaan},   \nTITLE={Telelife: The Future of Remote Living},      \nJOURNAL={Frontiers in Virtual Reality},      \nVOLUME={2},      \nPAGES={147},     \nYEAR={2021},      \nURL={https://www.frontiersin.org/article/10.3389/frvir.2021.763340},       \nDOI={10.3389/frvir.2021.763340},      \nISSN={2673-4192},   \n}\n

SensiCut: material-aware laser cutting using speckle sensing and deep learning

Mustafa Doga Dogan, Steven Vidal Acevedo Colon, Varnika Sinha, Kaan Ak\u015fit and Stefanie Mueller

Publisher site Project site Manuscript Project video Presentation recording

Bibtex
@inproceedings{dogan2021sensicut,\n  title={SensiCut: Material-Aware Laser Cutting Using Speckle Sensing and Deep Learning},\n  author={Dogan, Mustafa Doga and Acevedo Colon, Steven Vidal and Sinha, Varnika and Ak{\\c{s}}it, Kaan and Mueller, Stefanie},\n  booktitle={The 34th Annual ACM Symposium on User Interface Software and Technology},\n  pages={24--38},\n  year={2021}\n}\n

Beyond blur: ventral metamers for foveated rendering

David R. Walton, Rafael Kuffner Dos Anjos, Sebastian Friston, David Swapp, Kaan Ak\u015fit, Anthony Steed and Tobias Ritschel

Publisher site Project site Manuscript

Bibtex
@article{walton2021beyond,\n    author = {David R. Walton and Rafael Kuffner Dos Anjos and Sebastian Friston and David Swapp and Kaan Ak\u015fit and Anthony Steed and Tobias Ritschel},\n    title    = {Beyond Blur: Ventral Metamers for Foveated Rendering},\n    journal = {ACM Trans. Graph. (Proc. SIGGRAPH 2021)},\n    year = {2021},\n    volume = {40},\n    number = {4},\n}\n

Beaming displays

Best paper nominee at IEEE VR 2021

Yuta Itoh, Takumi Kaminokado and Kaan Ak\u015fit

Publisher site Manuscript Project video Presentation recording

Bibtex
@article{itoh2021beaming,\n    author = {Yuta Itoh, Takumi Kaminokado, and Kaan Ak{s}it},\n    keywords = {Near-eye displays},\n    publisher = {IEEE VR},\n    title = {Beaming Displays},\n    month = {April},\n    year = {2021}\n}\n

"},{"location":"publications/#2020","title":"2020","text":"

Optical gaze tracking with spatially-sparse single-pixel detectors

Richard Li, Eric Whitmire, Michael Stengel, Ben Boudaoud, Jan Kautz, David Luebke, Shwetak Patel and Kaan Ak\u015fit

Publisher site Project site Manuscript Presentation recording

Bibtex
@article{li2020opticalgaze,\n    author = {Richard Li, Eric Whitmire, Michael Stengel, Ben Boudaoud, Jan Kautz, David Luebke, Shwetak Patel, and Kaan Ak{s}it},\n    keywords = {Gaze tracking, eye tracking, LEDs, photodiodes},\n    publisher = {ISMAR},\n    title = {Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors},\n    month = {Nov},\n    year = {2020}\n}\n

Patch scanning displays: spatiotemporal enhancement for displays

Kaan Ak\u015fit

Publisher site Manuscript Project video

Bibtex
@article{aksit2020patch,\n    author = {Kaan Ak\\c{s}it},\n    journal = {Opt. Express},\n    keywords = {Digital micromirror devices; Image quality; Image reconstruction; Light sources; Optical components; Three dimensional imaging},\n    number = {2},\n    pages = {2107--2121},\n    publisher = {OSA},\n    title = {Patch scanning displays: spatiotemporal enhancement for displays},\n    volume = {28},\n    month = {Jan},\n    year = {2020},\n    url = {http://www.opticsexpress.org/abstract.cfm?URI=oe-28-2-2107}\n}\n

"},{"location":"publications/#2019","title":"2019","text":"

Near-eye display and tracking technologies for virtual and augmented reality

George Alex Koulieris Kaan Ak\u015fit, Michael Stengel, Rafa\u0142 Mantiuk, Katerina Mania and Christian Richardt

Publisher site Manuscript Project video

Bibtex
@article{NearEyeDisplayAndTrackingSTAR,\nauthor  = {George Alex Koulieris and Kaan Ak{\\c{s}}it and Michael Stengel and Rafa{\\l} K. Mantiuk and Katerina Mania and Christian Richardt},\ntitle   = {Near-Eye Display and Tracking Technologies for Virtual and Augmented Reality},\njournal = {Computer Graphics Forum},\nyear    = {2019},\nvolume  = {38},\nnumber  = {2},\nurl     = {https://richardt.name/nedtt/},\n}\n

Foveated AR: dynamically-foveated augmented reality display

Emerging Technology best in show award at SIGGRAPH 2019

Jonghyun Kim, Youngmo Jeong, Michael Stengel, Kaan Ak\u015fit, Rachel Albert, Ben Boudaoud, Trey Greer, Joohwan Kim, Ward Lopes, Zander Majercik, Peter Shirley, Josef Spjut, Morgan Mcguire and David Luebke

Publisher site Manuscript Project video

Bibtex
@article{kim2019foveated,\n  title={Foveated AR: dynamically-foveated augmented reality display},\n  author={Kim, Jonghyun and Jeong, Youngmo and Stengel, Michael and Ak{\\c{s}}it, Kaan and Albert, Rachel and Boudaoud, Ben and Greer, Trey and Kim, Joohwan and Lopes, Ward and Majercik, Zander and others},\n  journal={ACM Transactions on Graphics (TOG)},\n  volume={38},\n  number={4},\n  pages={1--15},\n  year={2019},\n  publisher={ACM New York, NY, USA}\n}\n

"},{"location":"publications/#2018","title":"2018","text":"

FocusAR: auto-focus augmented reality eyeglasses for both real and virtual

Best paper award at ISMAR 2018

Presented at SIGGRAPH ASIA 2018

Praneeth Chakravarthula, David Dunn, Kaan Ak\u015fit and Henry Fuchs

Publisher site Manuscript Presentation recording Presentation source

Bibtex
@article{chakravarthula2018focusar,\n  title={focusar: auto-focus augmented reality eyeglasses for both real and virtual},\n  author={chakravarthula, praneeth and dunn, david and ak{\\c{s}}it, kaan and fuchs, henry},\n  journal={ieee transactions on visualization and computer graphics},\n  year={2018},\n  publisher={ieee}\n}\n

Manufacturing application-driven foveated near-eye displays

Best paper nominee at IEEE VR 2018

Emerging Technology best in show award at SIGGRAPH 2018

Kaan Ak\u015fit, Praneeth Chakravarthula, Kishore Rathinavel, Youngmo Jeong, Rachel Albert, Henry Fuchs and David Luebke

Publisher site Manuscript Project video Presentation recording Presentation source

Bibtex
@article{akcsit2019manufacturing,\n  title={Manufacturing application-driven foveated near-eye displays},\n  author={Ak{\\c{s}}it, Kaan and Chakravarthula, Praneeth and Rathinavel, Kishore and Jeong, Youngmo and Albert, Rachel and Fuchs, Henry and Luebke, David},\n  journal={IEEE transactions on visualization and computer graphics},\n  volume={25},\n  number={5},\n  pages={1928--1939},\n  year={2019},\n  publisher={IEEE}\n}\n

"},{"location":"publications/#2017","title":"2017","text":"

Near-Eye varifocal augmented reality display using see-through screens

Kaan Ak\u015fit, Ward Lopes, Jonghyun Kim, Peter Shirley and David Luebke

Publisher site Manuscript Video

Bibtex
@Article{Aksit2017Varifocal,\nTitle      = {Near-Eye Varifocal Augmented Reality Display using See-Through Screens},\nAuthor     = {K. Ak{\\c{s}}it and W. Lopes and J. Kim and P. Shirley and D. Luebke},\njournal    = {ACM Trans. Graph. (SIGGRAPH)},\nissue      = {36},\nnumber     = {6},\nyear = {2017}}\n

Wide field of view varifocal near-eye display using see-through deformable membrane mirrors

Best paper award at IEEE VR 2017

SIGGRAPH 2017 Emerging Technologies DCEXPO Special Prize

David Dunn, Cary Tippets, Kent Torell, Petr Kellnhofer, Kaan Ak\u015fit, Piotr Didyk, Karol Myszkowski, David Luebke and Henry Fuchs

Publisher site Project site Manuscript Video

Bibtex
@article{dunn2017wide,\ntitle={Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors},\nauthor={Dunn, David and Tippets, Cary and Torell, Kent and Kellnhofer, Petr and Ak{\\c{s}}it, Kaan and Didyk, Piotr and Myszkowski, Karol and Luebke, David and Fuchs, Henry},\njournal={IEEE Transactions on Visualization and Computer Graphics},\nvolume={23},\nnumber={4},\npages={1322--1331},\nyear={2017},\npublisher={IEEE}\n}}\n

"},{"location":"publications/#2016","title":"2016","text":"

Gaze-sensing LEDs for head mounted displays

Kaan Ak\u015fit, Jan Kautz and David Luebke

Publisher site Manuscript Video

Bibtex
@article{akcsit2020gaze,\n  title={Gaze-sensing leds for head mounted displays},\n  author={Ak{\\c{s}}it, Kaan and Kautz, Jan and Luebke, David},\n  journal={arXiv preprint arXiv:2003.08499},\n  year={2020}\n}\n

"},{"location":"publications/#2015","title":"2015","text":"

Slim near-eye display using pinhole aperture arrays

Kaan Ak\u015fit, Jan Kautz and David Luebke

Publisher site Project site Manuscript Video

Bibtex
@article{Aksit:15, \nauthor = {Kaan Ak\\c{s}it and Jan Kautz and David Luebke}, \njournal = {Appl. Opt.}, \nkeywords = {Apertures; Vision - binocular and stereopsis ; Computational imaging},\nnumber = {11}, \npages = {3422--3427}, \npublisher = {OSA},\ntitle = {Slim near-eye display using pinhole aperture arrays}, \nvolume = {54}, \nmonth = {Apr},\nyear = {2015},\nurl = {http://ao.osa.org/abstract.cfm?URI=ao-54-11-3422},\ndoi = {10.1364/AO.54.003422},\nabstract = {We report a new technique for building a wide-angle, lightweight, thin-form-factor, cost-effective, easy-to-manufacture near-eye head-mounted display (HMD) for virtual reality applications. Our approach adopts an aperture mask containing an array of pinholes and a screen as a source of imagery. We demonstrate proof-of-concept HMD prototypes with a binocular field of view (FOV) of 70\\&amp;\\#xB0;\\&amp;\\#xD7;45\\&amp;\\#xB0;, or total diagonal FOV of 83\\&amp;\\#xB0;. This FOV should increase with increasing display panel size. The optical angular resolution supported in our prototype can go down to 1.4\\&amp;\\#x2013;2.1 arcmin by adopting a display with 20\\&amp;\\#x2013;30\\&amp;\\#xA0;\\&amp;\\#x3BC;m pixel pitch.},\n}\n

"},{"location":"publications/#2014","title":"2014","text":"

Head-worn mixed reality projection display application

Kaan Ak\u015fit, Daniel Kade, O\u011fuzhan \u00d6zcan and Hakan Urey

Publisher site Manuscript Video

Bibtex
@inproceedings{Aksit:2014:HMR:2663806.2663826,\n author = {Ak\\c{s}it, Kaan and Kade, Daniel and \\\"{O}zcan, O\\u{g}uzhan and \\\"{U}rey, Hakan},\n title = {Head-worn Mixed Reality Projection Display Application},\n booktitle = {Proceedings of the 11th Conference on Advances in Computer Entertainment Technology},\n series = {ACE '14},\n year = {2014},\n isbn = {978-1-4503-2945-3},\n location = {Funchal, Portugal},\n pages = {11:1--11:9},\n articleno = {11},\n numpages = {9},\n url = {http://doi.acm.org/10.1145/2663806.2663826},\n doi = {10.1145/2663806.2663826},\n acmid = {2663826},\n publisher = {ACM},\n address = {New York, NY, USA},\n keywords = {head-mounted projection display, immersive environments, laser projector, mixed reality, motion capture},\n} \n

Super stereoscopy technique for comfortable and realistic 3D displays

Kaan Ak\u015fit, Amir Niaki, Erdem Ulusoy and Hakan Urey

Publisher site Manuscript

Bibtex
@article{Aksit:14, \nauthor = {Kaan Ak\\c{s}it and Amir Hossein Ghanbari Niaki and Erdem Ulusoy and Hakan Urey}, \njournal = {Opt. Lett.}, \nkeywords = {Displays; Vision - binocular and stereopsis ; Visual optics, accommodation},\nnumber = {24}, \npages = {6903--6906}, \npublisher = {OSA},\ntitle = {Super stereoscopy technique for comfortable and realistic 3D displays}, \nvolume = {39}, \nmonth = {Dec},\nyear = {2014},\nurl = {http://ol.osa.org/abstract.cfm?URI=ol-39-24-6903},\ndoi = {10.1364/OL.39.006903},\nabstract = {Two well-known problems of stereoscopic displays are the accommodation-convergence conflict and the lack of natural blur for defocused objects. We present a new technique that we name Super Stereoscopy (SS3D) to provide a convenient solution to these problems. Regular stereoscopic glasses are replaced by SS3D glasses which deliver at least two parallax images per eye through pinholes equipped with light selective filters. The pinholes generate blur-free retinal images so as to enable correct accommodation, while the delivery of multiple parallax images per eye creates an approximate blur effect for defocused objects. Experiments performed with cameras and human viewers indicate that the technique works as desired. In case two, pinholes equipped with color filters per eye are used; the technique can be used on a regular stereoscopic display by only uploading a new content, without requiring any change in display hardware, driver, or frame rate. Apart from some tolerable loss in display brightness and decrease in natural spatial resolution limit of the eye because of pinholes, the technique is quite promising for comfortable and realistic 3D vision, especially enabling the display of close objects that are not possible to display and comfortably view on regular 3DTV and cinema.},\n}\n

From Sound to Sight: Using Audio Processing to enable Visible Light Communication

Stefan Schmid, D. Schwyn, Kaan Ak\u015fit, Giorgio Corbellini, Thomas Gross and Stefan Mangold

Publisher site Manuscript

Bibtex
@INPROCEEDINGS{7063484,\nauthor={S. Schmid and D. Schwyn and K. Ak\u015fit and G. Corbellini and T. R. Gross and S. Mangold},\nbooktitle={2014 IEEE Globecom Workshops (GC Wkshps)},\ntitle={From sound to sight: Using audio processing to enable visible light communication},\nyear={2014},\npages={518-523},\nkeywords={audio signal processing;light emitting diodes;mobile handsets;optical communication;photodiodes;protocols;audio jack;audio processing;communication protocols;electrical signals;light signals;microphone input;mobile phones;on-board audio signal processing;passive components;peripheral device;photodiode;visible light communication;Decoding;Hardware;Lifting equipment;Light emitting diodes;Photodiodes;Protocols;Throughput},\ndoi={10.1109/GLOCOMW.2014.7063484},\nISSN={2166-0077},\nmonth={Dec},}\n

Connecting Networks of Toys and Smartphones with Visible Light Communication

Giorgio Corbellini, Kaan Ak\u015fit, Stefan Mangold Stefan Schmid and Thomas R. Gross

Publisher site Manuscript Video

Bibtex
@ARTICLE{6852086,\nauthor={G. Corbellini and K. Aksit and S. Schmid and S. Mangold and T. R. Gross},\njournal={IEEE Communications Magazine},\ntitle={Connecting networks of toys and smartphones with visible light communication},\nyear={2014},\nvolume={52},\nnumber={7},\npages={72-78},\nkeywords={light emitting diodes;optical communication;optical receivers;smart phones;LED;VLC systems;brightness;consumer electronics;illumination;light emitting diodes;light receivers;microcontrollers;public environment;residential environment;smartphones;toys;visible light communication;wireless communication interface;Cameras;Commercialization;Frequency measurement;Illumination;Light emitting diodes;Microcontrollers;Receivers;Smart phones;Transceivers},\ndoi={10.1109/MCOM.2014.6852086},\nISSN={0163-6804},\nmonth={July},}\n

"},{"location":"publications/#2013","title":"2013","text":"

Dynamic exit pupil trackers for autostereoscopic displays

Kaan Ak\u015fit, Hadi Baghsiahi, P. Surman, Selim \u04e6l\u00e7er, E. Willman, David R. Selviah, Sally Day and Hakan Urey

Publisher site Manuscript Video

Bibtex
@article{Aksit:13, \nauthor = {Kaan Ak\\c{s}it and Hadi Baghsiahi and Phil Surman and Selim \u04e6l\\c{c}er and Eero Willman and David R. Selviah and Sally Day and Hakan Urey}, \njournal = {Opt. Express}, \nkeywords = {Displays; Optical systems; Optoelectronics; Laser beam shaping; Vision - binocular and stereopsis},\nnumber = {12}, \npages = {14331--14341}, \npublisher = {OSA},\ntitle = {Dynamic exit pupil trackers for autostereoscopic displays}, \nvolume = {21}, \nmonth = {Jun},\nyear = {2013},\nurl = {http://www.opticsexpress.org/abstract.cfm?URI=oe-21-12-14331},\ndoi = {10.1364/OE.21.014331},\nabstract = {This paper describes the first demonstrations of two dynamic exit pupil (DEP) tracker techniques for autostereoscopic displays. The first DEP tracker forms an exit pupil pair for a single viewer in a defined space with low intraocular crosstalk using a pair of moving shutter glasses located within the optical system. A display prototype using the first DEP tracker is constructed from a pair of laser projectors, pupil-forming optics, moving shutter glasses at an intermediate pupil plane, an image relay lens, and a Gabor superlens based viewing screen. The left and right eye images are presented time-sequentially to a single viewer and seen as a 3D image without wearing glasses and allows the viewer to move within a region of 40 cm {\\texttimes} 20 cm in the lateral plane, and 30 cm along the axial axis. The second DEP optics can move the exit pupil location dynamically in a much larger 3D space by using a custom spatial light modulator (SLM) forming an array of shutters. Simultaneous control of multiple exit pupils in both lateral and axial axes is demonstrated for the first time and provides a viewing volume with an axial extent of 0.6{\\textminus}3 m from the screen and within a lateral viewing angle of {\\textpm} 20{\\textdegree} for multiple viewers. This system has acceptable crosstalk (\\&lt; 5\\%) between the stereo image pairs. In this novel version of the display the optical system is used as an advanced dynamic backlight for a liquid crystal display (LCD). This has advantages in terms of overall display size as there is no requirement for an intermediate image, and in image quality. This system has acceptable crosstalk (\\&lt; 5\\%) between the stereo image pairs.},\n}\n

Multi-view autostereoscopic projection display using rotating screen

Spotlight on Optics

Osman Eldes, Kaan Ak\u015fit and Hakan Urey

Publisher site Manuscript Video

Bibtex
@article{Eldes:13,\nauthor = {Osman Eldes and Kaan Ak\\c{s}it and Hakan Urey},\njournal = {Opt. Express},\nkeywords = {Displays; Diffusers; Vision - binocular and stereopsis ; Autostereoscopic displays; Brightness; Fresnel lenses; Image registration; Pico projectors; Systems design},\nnumber = {23},\npages = {29043--29054},\npublisher = {OSA},\ntitle = {Multi-view autostereoscopic projection display using rotating screen},\nvolume = {21},\nmonth = {Nov},\nyear = {2013},\nurl = {http://www.osapublishing.org/oe/abstract.cfm?URI=oe-21-23-29043},\ndoi = {10.1364/OE.21.029043},\nabstract = {A new technique for multi-view autostereoscopic projection display is proposed, and demonstrated. The technique uses two mobile projectors, a rotating retro-reflective diffuser screen, and a head-tracking camera. As two dynamic viewing slits are created at the viewer's position, the slits can track the position of the eyes by rotating the screen. The display allows a viewer to move approximately 700 mm along the horizontal axis, and 500 mm along the vertical axis with an average crosstalk below 5 \\%. Two screen prototypes with different diffusers have been tried, and they provide luminance levels of 60 Cd/m2, and 160 Cd/m2 within the viewing field.},\n}\n

"},{"location":"publications/#2012","title":"2012","text":"

Portable 3D Laser Projector Using Mixed Polarization Technique

Best 3D product award of International 3D Society (4th year)

Kaan Ak\u015fit, Osman Elde\u015f, Selvan Viswanathan, Mark Freeman and Hakan Urey

Publisher site Manuscript Video

Bibtex
@ARTICLE{6297485,\n  author={Aksit, Kaan and Eldes, Osman and Viswanathan, Selvan and Freeman, Mark O. and Urey, Hakan},\n  journal={Journal of Display Technology}, \n  title={Portable 3D Laser Projector Using Mixed Polarization Technique}, \n  year={2012},\n  volume={8},\n  number={10},\n  pages={582-589},\n  doi={10.1109/JDT.2012.2205664}}\n

"},{"location":"publications/#2010","title":"2010","text":"

Heart rate monitoring via remote photoplethysmography with motion artifacts reduction

Giovanni Cennini, Jeremie Arguel, Kaan Ak\u015fit and Arno van Leest

Publisher site Manuscript Video

Bibtex
@article{Cennini:10, \nauthor = {Giovanni Cennini and Jeremie Arguel and Kaan Ak\\c{s}it and Arno van Leest}, \njournal = {Opt. Express}, \nkeywords = {Medical optics instrumentation; Optical devices; Optical sensing and sensors},\nnumber = {5}, \npages = {4867--4875}, \npublisher = {OSA},\ntitle = {Heart rate monitoring via remote photoplethysmography with motion artifacts reduction}, \nvolume = {18}, \nmonth = {Mar},\nyear = {2010},\nurl = {http://www.opticsexpress.org/abstract.cfm?URI=oe-18-5-4867},\ndoi = {10.1364/OE.18.004867},\nabstract = {In this paper, we present a novel photoplethysmographic device that operates remotely, i.e. not in contact with the skin. The device allows for real time measurements of heart rate with motion artifact reduction from a distance of a few centimeters up to several meters. High mobility of users is achieved in assessment of vital body signs, such as heart rate.},\n}\n

"},{"location":"publications/focal_surface_light_transport/","title":"Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions","text":""},{"location":"publications/focal_surface_light_transport/#people","title":"People","text":"

Chuanjun Zheng1

Yicheng Zhan1

Liang Shi2

Ozan Cakmakci3

Kaan Ak\u015fit1

1University College London, 2Massachusetts Institute of Technology, 3Google

SIGGRAPH Asia 2024 Technical Communications

"},{"location":"publications/focal_surface_light_transport/#resources","title":"Resources","text":"

Manuscript Supplementary Code

Bibtex
@inproceedings{zheng2024focalholography,\n  title={Focal Surface Holographic Light Transport using Learned Spatially Adaptive Convolutions},\n  author={Chuanjun Zheng, Yicheng Zhan, Liang Shi, Ozan Cakmakci, and Kaan Ak{\\c{s}}it},\n  booktitle = {SIGGRAPH Asia 2024 Technical Communications (SA Technical Communications '24)},\n  keywords = {Computer-Generated Holography, Light Transport, Optimization},\n  location = {Tokyo, Japan},\n  series = {SA '24},\n  month={December},\n  year={2024},\n  doi={https://doi.org/10.1145/3681758.3697989}\n}\n
"},{"location":"publications/focal_surface_light_transport/#abstract","title":"Abstract","text":"

Computer-Generated Holography (CGH) is a set of algorithmic methods for identifying holograms that reconstruct Three-Dimensional (3D) scenes in holographic displays. CGH algorithms decompose 3D scenes into multiplanes at different depth levels and rely on simulations of light that propagated from a source plane to a targeted plane. Thus, for \\(n\\) planes, CGH typically optimizes holograms using \\(n\\) plane-to-plane light transport simulations, leading to major time and computational demands. Our work replaces multiple planes with a focal surface and introduces a learned light transport model that could propagate a light field from a source plane to the focal surface in a single inference. Our model leverages spatially adaptive convolution to achieve depth-varying propagation demanded by targeted focal surfaces. The proposed model reduces the hologram optimization process up to \\(1.5x\\), which contributes to hologram dataset generation and the training of future learned CGH models.

"},{"location":"publications/focal_surface_light_transport/#focal-surface-holographic-light-transport","title":"Focal Surface Holographic Light Transport","text":"

Simulating light propagation among multiple planes in a 3D volume is computationally demanding, as a 3D volume is represented with multiple planes and each plane requires a separate calculation of light propagation to reconstruct the target image. Thus, for \\(n\\) planes, conventional light transport simulation methods require \\(n\\) plane-to-plane simulations, leading to major time and computational demands. Our work replaces multiple planes with a focal surface and introduces a learned light transport model that could propagate a light field from a source plane to the focal surface in a single inference, reducing simulation time by \\(10x\\).

"},{"location":"publications/focal_surface_light_transport/#results","title":"Results","text":"

When simulating a full-color, all-in-focus 3D image across a focal surface, conventional Angular Spectrum Method (ASM) requires eighteen forward passes to simulate the 3D image with six depth planes given there are three color primaries. In contrast, our model simulates the three colorprimary images simultaneously onto a focal surface with a single forward pass. In the mean time, our model preserves more high-frequency content than U-Net, providing finer details and sharper edges, closer to the ground truth.

We utilize our model for a 3D phase-only hologram optimization application under \\(0 mm\\) propagation distance. Optimizing holograms with six target planes using ASM is denoted as ASM 6, while Ours 6 represents optimizing holograms using our model with six focal surfaces. When comparing the simulation results, all holograms are reconstructed using ASM for performance assessment. Ours 6 achieves comparable results with about \\(70\\%\\) of the optimization time compared to ASM 6.

We also apply our model for a 3D phase-only hologram optimization application under \\(10 mm\\) propagation distance.

"},{"location":"publications/focal_surface_light_transport/#relevant-research-works","title":"Relevant research works","text":"

Here are relevant research works from the authors:

  • Multi-color Holograms Improve Brightness in Holographic Displays
  • HoloBeam: Paper-Thin Near-Eye Displays
  • Realistic Defocus for Multiplane Computer-Generated Holography
  • Optimizing Vision and Visuals: Lectures on Cameras, Displays, and Perception
  • Learned Holographic Light Transport
  • Metameric Varifocal Holograms
  • Odak
"},{"location":"publications/focal_surface_light_transport/#outreach","title":"Outreach","text":"

We host a Slack group with more than 250 members. This Slack group focuses on the topics of rendering, perception, displays and cameras. The group is open to public and you can become a member by following this link.

"},{"location":"publications/focal_surface_light_transport/#contact-us","title":"Contact Us","text":"

Warning

Please reach us through email to provide your feedback and comments.

"},{"location":"publications/holobeam/","title":"HoloBeam: Paper-Thin Near-Eye Displays","text":""},{"location":"publications/holobeam/#people","title":"People","text":"

Kaan Ak\u015fit1

Yuta Itoh2

1University College London, 2The University of Tokyo

IEEE VR 2023

"},{"location":"publications/holobeam/#resources","title":"Resources","text":"

Manuscript Code

Bibtex
@inproceedings{aksit2022holobeam,\n  title = \"HoloBeam: Paper-Thin Near-Eye Displays\",\n  author = \"Ak\u015fit, Kaan and Itoh, Yuta\",\n  booktitle ={2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)},\n  pages = {581--591},\n  year = {2023},\n}\n
"},{"location":"publications/holobeam/#presentation","title":"Presentation","text":""},{"location":"publications/holobeam/#abstract","title":"Abstract","text":"

An emerging alternative to conventional Augmented Reality (AR) glasses designs, Beaming displays promise slim AR glasses free from challenging design trade-offs, including battery-related limits or computational budget-related issues. These beaming displays remove active components such as batteries and electronics from AR glasses and move them to a projector that projects images to a user from a distance (1-2 meters), where users wear only passive optical eyepieces. However, earlier implementations of these displays delivered poor resolutions (7 cycles per degree) without any optical focus cues and were introduced with a bulky form-factor eyepiece (\\(\\sim50~mm\\) thick). This paper introduces a new milestone for beaming displays, which we call HoloBeam. In this new design, a custom holographic projector populates a micro-volume located at some distance (1-2 meters) with multiple planes of images. Users view magnified copies of these images from this small volume with the help of an eyepiece that is either a Holographic Optical Element (HOE) or a set of lenses. Our HoloBeam prototypes demonstrate the thinnest AR glasses to date with a submillimeter thickness (\\eg HOE film is only \\(120~\\mu m\\) thick). In addition, HoloBeam prototypes demonstrate near retinal resolutions (\\(24\\) cycles per degree) with a \\(70\\) degrees wide field of view.

"},{"location":"publications/holobeam/#results","title":"Results","text":"

As a next step in Beaming Displays , our work offers the thinnest and lightweight near-eye display to date. Our wearable eyepieces could just be a lens or a holographic optical element.

In order to beam images to our eyepieces, we built a phase-only holographic projector.

We also show that a cheaper alternative of this projector could be built using common spatial light modulators.

In this work, we demonstrate the first Beaming Displays that can generate multiplane images using Computer-Generated Holography. Below image is a moving animation showing a focal sweep of images.

Although we showed monochrome results mostly, HoloBeam can also show full color images.

"},{"location":"publications/holobeam/#relevant-research-works","title":"Relevant research works","text":"

Here are relevant research works from the authors:

  • Beaming Displays
  • Realistic Defocus for Multiplane Computer-Generated Holography
  • Optimizing Vision and Visuals: Lectures on Cameras, Displays, and Perception
  • Learned Holographic Light Transport
  • Metameric Varifocal Holograms
  • Odak
"},{"location":"publications/holobeam/#outreach","title":"Outreach","text":"

We host a Slack group with more than 250 members. This Slack group focuses on the topics of rendering, perception, displays and cameras. The group is open to public and you can become a member by following this link.

"},{"location":"publications/holobeam/#contact-us","title":"Contact Us","text":"

Warning

Please reach us through email to provide your feedback and comments.

"},{"location":"publications/holobeam/#acknowledgements","title":"Acknowledgements","text":"

The authors would like to thank reviewers for their valuable feedback. The authors wish to thank Koray Kavakl\u0131 for fruitful discussions.

Kaan Ak\u015fit is supported by the Royal Society's RGS\\R2\\212229 - Research Grants 2021 Round 2 in building the hardware prototype. Kaan Ak\u015fit is also supported by Meta Reality Labs inclusive rendering initiative 2022.

Yuta Itoh is supported by the JST FOREST Grant Number JPMJFR206E and JSPS KAKENHI Grant Number JP20J14971, 20H05958, and 21K19788, Japan.

"},{"location":"publications/multi_color/","title":"Multi-color Holograms Improve Brightness in Holographic Displays","text":""},{"location":"publications/multi_color/#people","title":"People","text":"

Koray Kavakl\u01311

Liang Shi2

Hakan Urey1

Wojciech Matusik2

Kaan Ak\u015fit3

1Ko\u00e7 University, 2Massachusetts Institute of Technology, 3University College London

SIGGRAPH Asia 2023

"},{"location":"publications/multi_color/#resources","title":"Resources","text":"

Manuscript Supplementary Code Project video

Bibtex
@inproceedings{kavakli2023multicolor,\n  title={Multi-color Holograms Improve Brightness in Holographic Displays},\n  author={Kavakl\u0131, Koray and Shi, Liang and Urey, Hakan and Matusik, Wojciech and Ak\u015fit, Kaan},\n  booktitle = {SIGGRAPH Asia 2023 Conference Papers},\n  articleno = {20},\n  numpages = {11},\n  keywords = {Brightness, Computer-generated holography, Holographic displays},\n  location = {Sydney, NSW, Australia},\n  series = {SA '23},\n  month={December},\n  year={2023},\n  doi={https://doi.org/10.1145/3610548.3618135}\n}\n
"},{"location":"publications/multi_color/#video","title":"Video","text":""},{"location":"publications/multi_color/#presentation","title":"Presentation","text":""},{"location":"publications/multi_color/#abstract","title":"Abstract","text":"

Holographic displays generate Three-Dimensional (3D) images by displaying single-color holograms time-sequentially, each lit by a single-color light source. However, representing each color one by one limits brightness in holographic displays. This paper introduces a new driving scheme for realizing brighter images in holographic displays. Unlike the conventional driving scheme, our method utilizes three light sources to illuminate each displayed hologram simultaneously at various intensity levels. In this way, our method reconstructs a multiplanar three-dimensional target scene using consecutive multi-color holograms and persistence of vision. We co-optimize multi-color holograms and required intensity levels from each light source using a gradient descent-based optimizer with a combination of application-specific loss terms. We experimentally demonstrate that our method can increase the intensity levels in holographic displays up to three times, reaching a broader range and unlocking new potentials for perceptual realism in holographic displays.

"},{"location":"publications/multi_color/#results","title":"Results","text":"

Conventional holographic displays use a single Spatial Light Modulator (SLM) and reconstruct full-color images by time-sequentially displaying single-color holograms, each dedicated to a color channel. When holographic displays reconstruct scenes with brightness levels beyond the peak intensity of their corresponding color channels, the result could often lead to darker images than the intended levels and produce visual distortions or color mismatches (see conventional case below figure). In such cases, the dynamic range of the target is typically limited to the peak intensity of the light source, which is often not enough to deliver the desired visual experience.

Without altering hardware, we argue that holographic displays could dedicate extra time to each color channel to improve their perceived brightness levels, as demonstrated in below figure. Our work aims to improve holographic displays' dynamic range by more effectively but aggressively utilizing color primaries and holograms. For this purpose, we introduce a new Computer-Generated Holography (CGH) driving scheme. In this scheme, multi-color holograms simultaneously operate over multiple wavelengths of light and provide 3D multiplanar images. We calculate multi-color holograms using a Gradient Descent (GD) based solver guided by a combination of application-specific loss functions. In the meantime, we co-optimize the brightness levels required to illuminate each multi-color hologram. We experimentally verify our findings using a holographic display prototype by showing reconstructions of brighter scenes with a broader dynamic range in an artifact-free and color-accurate manner.

Below figure shows photographs from our holographic display for conventional and our schemes (more sample results in our supplementary). For such a scene, our method can safely support up to \\(\\times1.8\\) peak brightness without causing significant image distortions or artifacts. On the other hand, the conventional hologram fails to support peak brightness higher than \\(\\times1.0\\). Beyond \\(\\times1.8\\) peak brightness levels, images are typically heavily dominated by noise in the conventional case.

In contrast, our method loses color integrity slightly or generates noises similar to the conventional case's \\(\\times1.2\\) peak brightness case.

Our method can also support three-dimensional multiplanar images.

"},{"location":"publications/multi_color/#relevant-research-works","title":"Relevant research works","text":"

Here are relevant research works from the authors:

  • HoloBeam: Paper-Thin Near-Eye Displays
  • Realistic Defocus for Multiplane Computer-Generated Holography
  • Optimizing Vision and Visuals: Lectures on Cameras, Displays, and Perception
  • Learned Holographic Light Transport
  • Metameric Varifocal Holograms
  • Odak
"},{"location":"publications/multi_color/#external-other-links","title":"External Other Links","text":"

Here are links related to our project such as videos, articles or podcasts:

  • ACM SIGGRAPH Asia 2023, Technical Papers Fast Forward (Preview the presentations on 13 Dec, Day 2)
"},{"location":"publications/multi_color/#outreach","title":"Outreach","text":"

We host a Slack group with more than 250 members. This Slack group focuses on the topics of rendering, perception, displays and cameras. The group is open to public and you can become a member by following this link.

"},{"location":"publications/multi_color/#contact-us","title":"Contact Us","text":"

Warning

Please reach us through email to provide your feedback and comments.

"},{"location":"publications/multi_color/#acknowledgements","title":"Acknowledgements","text":"

Kaan Ak\u015fit is supported by the Royal Society's RGS\\R2\\212229 - Research Grants 2021 Round 2 in building the hardware prototype. Kaan Ak\u015fit is also supported by Meta Reality Labs inclusive rendering initiative 2022. Liang Shi is supported by Meta Research PhD fellowship (2021-2023).

Hakan Urey is supported by the European Innovation Council\u2019s HORIZON-EIC-2021-TRANSITION-CHALLENGES program Grant Number 101057672 and T\u00fcbitak\u2019s 2247-A National Lead Researchers Program, Project Number 120C145.

"},{"location":"publications/realistic_defocus_cgh/","title":"Realistic Defocus Blur for Multiplane Computer-Generated Holography","text":""},{"location":"publications/realistic_defocus_cgh/#people","title":"People","text":"

Koray Kavakl\u01311

Yuta Itoh2

Hakan \u00dcrey1

Kaan Ak\u015fit3

1Ko\u00e7 University, 2The University of Tokyo 3University College London

IEEE VR 2023

"},{"location":"publications/realistic_defocus_cgh/#resources","title":"Resources","text":"

Manuscript Project video Code

Bibtex
@misc{kavakli2022realisticdefocus,\n  doi = {10.48550/ARXIV.2205.07030},\n  url = {https://arxiv.org/abs/2205.07030},\n  author = {Kavakl\u0131, Koray and Itoh, Yuta and Urey, Hakan and Ak\u015fit, Kaan},\n  keywords = {Computer Vision and Pattern Recognition (cs.CV), Graphics (cs.GR), FOS: Computer and information sciences, FOS: Computer and information sciences, I.3.3},\n  title = {Realistic Defocus Blur for Multiplane Computer-Generated Holography},\n  publisher = {arXiv},\n  year = {2022},\n  copyright = {Creative Commons Attribution Non Commercial No Derivatives 4.0 International}\n}\n
"},{"location":"publications/realistic_defocus_cgh/#presentation","title":"Presentation","text":""},{"location":"publications/realistic_defocus_cgh/#video","title":"Video","text":""},{"location":"publications/realistic_defocus_cgh/#abstract","title":"Abstract","text":"

This paper introduces a new multiplane CGH computation method to reconstruct artefact-free high-quality holograms with natural-looking defocus blur. Our method introduces a new targeting scheme and a new loss function. While the targeting scheme accounts for defocused parts of the scene at each depth plane, the new loss function analyzes focused and defocused parts separately in reconstructed images. Our method support phase-only CGH calculations using various iterative (e.g., Gerchberg-Saxton, Gradient Descent) and non-iterative (e.g., Double Phase) CGH techniques. We achieve our best image quality using a modified gradient descent-based optimization recipe where we introduce a constraint inspired by the double phase method. We validate our method experimentally using our proof-of-concept holographic display, comparing various algorithms, including multi-depth scenes with sparse and dense contents.

"},{"location":"publications/realistic_defocus_cgh/#results","title":"Results","text":"

In this work, we demonstrate a new rendering pipeline for multiplane Computer-Generated Holography that can provide near-accurate defocus blur.

Our results suggest that our work can help alliviate unintended artifacts found on existing rendering pipelines for Computer-Generated Holography.

We capture these results using our in-house baked holographic display prototype.

Our technique is suitable for Augmented Reality applications (e.g., near-eye displays, heads-up displays). Here we provide photographs of virtual images generated by our computer-generated holography pipeline overlayed on an actual scene. Note that each image is focused at a different depth level.

Here we show a photograph of our holographic display prototype with Augmented Reality support.

"},{"location":"publications/realistic_defocus_cgh/#relevant-works-from-our-group","title":"Relevant works from our group","text":"

Here are relevant research works from our group:

  • Odak
  • Metameric Varifocal Holograms
  • Learned Holographic Light Transport
  • HoloBeam: Paper-Thin Near-Eye Displays
"},{"location":"publications/realistic_defocus_cgh/#contact","title":"Contact","text":"

Have any queries, questions, suggestions or comments, contact us via kaanaksit@kaanaksit.com.

"},{"location":"publications/realistic_defocus_cgh/#acknowledgements","title":"Acknowledgements","text":"

We also thank Erdem Ulusoy and G\u00fcne\u015f Ayd\u0131ndo\u011fan for discussions in the early phases of the project; Tim Weyrich and Makoto Yamada for dedicating GPU resources in various experimentation phases; David Walton for his feedback on the manuscript;

Yuta Itoh is supported by the JST FOREST Program Grant Number JPMJPR17J2 and JSPS KAKENHI Grant Number JP20H05958 and JP21K19788.

Hakan Urey is supported by the European Innovation Council's HORIZON-EIC-2021-TRANSITION-CHALLENGES program Grant Number 101057672.

Kaan Ak\u015fit is supported by the Royal Society's RGS\\R2\\212229 - Research Grants 2021 Round 2 in building the hardware prototype.

"},{"location":"publications/spec_track/","title":"SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging","text":""},{"location":"publications/spec_track/#people","title":"People","text":"

Ziyang Chen1

Do\u011fa Do\u011fan2

Josef Spjut3

Kaan Ak\u015fit1

1University College London, 2Adobe Research, 3NVIDIA

SIGGRAPH Asia 2024 Poster

"},{"location":"publications/spec_track/#resources","title":"Resources","text":"

Manuscript Poster Supplementary Code

Bibtex
@inproceedings{chen2024spectrack,\n  author = {Ziyang Chen and Mustafa Dogan and Josef Spjut and Kaan Ak{\\c{s}}it},\n  title = {SpecTrack: Learned Multi-Rotation Tracking via Speckle Imaging},\n  booktitle = {SIGGRAPH Asia 2024 Posters (SA Posters '24)},\n  year = {2024},\n  location = {Tokyo, Japan},\n  publisher = {ACM},\n  address = {New York, NY, USA},\n  pages = {2},\n  doi = {10.1145/3681756.3697875},\n  url = {https://doi.org/10.1145/3681756.3697875},\n  month = {December 03--06}\n}\n
"},{"location":"publications/spec_track/#video","title":"Video","text":""},{"location":"publications/spec_track/#abstract","title":"Abstract","text":"

Precision pose detection is increasingly demanded in fields such as personal fabrication, Virtual Reality (VR), and robotics due to its critical role in ensuring accurate positioning information. However, conventional vision-based systems used in these systems often struggle with achieving high precision and accuracy, particularly when dealing with complex environments or fast-moving objects. To address these limitations, we investigate Laser Speckle Imaging (LSI), an emerging optical tracking method that offers promising potential for improving pose estimation accuracy. Specifically, our proposed LSI-Based Tracking leverages the captures from a lensless camera and a retro-reflector marker with a coded aperture to achieve multi-axis rotational pose estimation with high precision. Our extensive trials using our in-house built testbed have shown that SpecTrack achieves an accuracy of \\(0.31^\\circ\\) (std=\\(0.43^\\circ\\)) , significantly outperforming state-of-the-art approaches and improving accuracy up to \\(200\\%\\).

"},{"location":"publications/spec_track/#proposed-method","title":"Proposed Method","text":"

We aim to remotely obtain multiple absolute rotation angles from a coded retroreflective marker by utilizing the overlapping patterns generated by the multi-wavelength laser. the laser beam from the source (\\(S\\)) hits an arbitrary point (\\(P\\)) and diffracts at slightly different angles due to the different wavelengths (\\(\\lambda_0\\) and \\(\\lambda_1\\)). This phenomenon shows a correlation between the surface rotation angle and the captured speckle image.

The first image below shows the structure of the proposed sensor, which contains a bare sensor, laser source and beam splitter (\\(10~mm \\times 10~mm\\)). The beam splitter is placed in front of the bare imaging sensor to ensure that most of the light reflected from the marker covers a large area of the sensor. Additionally, this co-axial optical layout eliminates the light source's lateral offsets, simplifying the speckle behavior in the rotations.

We can tell from the image below that the captured image formed overlappings in when the surface rotates \\(10^\\circ\\) in the y-axis.

Using the Fast Fourier Transform (FFT) to get the magnitudes of speckle image from various poses (y-axis roation, z-axis rotations, and z-axis displacements) or coded surface reveals interpretable patterns:

We employ a shallow neural network to handle the non-linearities of physical aspects and estimate the absolute rotation angles from speckle patterns.

Firstly, we preprocess the captured monochrome speckle frames \\(I_{speckle}\\) (\\(640\\times360\\)~px) by transforming them into the frequency domain \\(\\mathcal{F}(I_{speckle})\\) using the FFT. Then the frames are central cropped and concatenated into a tensor \\([\\mathcal{F}(I_{\\text{speckle}, i})]_{i=1}^5\\) with a shape of \\((5,320,180)\\). From our practical experiences, this concatenated frame tensor provides more robust results when the marker is in motion because it incorporates temporal information. After that, we feed the samples into three convolutional blocks, each comprising a 2D convolution layer, batch normalization, ReLU activation function, and max pooling. After the convolution, the sample is flattened and inputted into a Multi Layer Perceptron (MLP) containing six linear layers each layer is followed by a batch normalization and ReLu activation function. The final layer of MLP outputs the rotation angles \\(\\theta_y\\), \\(\\theta_z\\) and the arbitrary depth \\(d_z\\).

Since capturing samples in all six Degrees Of Freedom simultaneously is physically difficult, we focus on capturing the speckle imaging as the marker rotates in the z-axis and y-axis. We add controlled close-loop motors to a rotary stage to automatically capture the speckle images when the marker is rotated in various axes, as shown below. During the data collection, we control the motors to rotate the marker from \\(0^\\circ\\) to \\(40^\\circ\\) on the y-axis and \\(0^\\circ\\) to \\(90^\\circ\\) the z-axis. Besides the rotations, we repeat the experiment in different depths from \\(16~cm\\) to \\(28~cm\\).

"},{"location":"publications/spec_track/#conclusions","title":"Conclusions","text":""},{"location":"publications/spec_track/#baseline","title":"Baseline","text":"

We compare our work with the state-of-the-art from Gibson et al. However, we lack direct access to accurate measurements, such as the wavelengths emitted by the off-the-shelf laser diode. We subsequently employed a gradient descent-based optimization with a captured training set to get the unknown variables: dominant wavelength \\(\\lambda_0\\), wavelength differences \\(\\Delta \\lambda\\), where \\(\\Delta \\lambda = \\lambda_0 - \\lambda_1 \\ll \\lambda_0\\), and light source position \\(S\\) in the 3D space. Following this, we tested the analytical model proposed by the authors with the test set that contains the speckle images captured when the marker rotates from \\(0^\\circ\\) to \\(40^\\circ\\) in the y-axis. The baseline result indicates the Mean Absolute Error (MAE) of \\(0.60^\\circ\\) (\\(std=0.35^\\circ\\)) on our testbed.

SpecTrack achieved a lower MAE and std: \\(\\mathbf{0.31^\\circ}\\), \\(\\mathbf{0.44^\\circ}\\), respectively. At the same time, the model can estimate the z-axis rotations rotation with a MAE \\(\\mathbf{0.52^\\circ}\\) (\\(std=\\mathbf{0.36^\\circ}\\)). Furthermore, the model adapts to varying depths, showing an accuracy of \\(0.15~cm\\).

"},{"location":"publications/spec_track/#future-work","title":"Future work","text":"

Testing and optimizing the system in real-world environments, considering varying lighting, distances, and object motions, is crucial for successful operation in various applications including VR, AR, and robotics.

"},{"location":"publications/spec_track/#relevant-research-works","title":"Relevant research works","text":"

Here are relevant research works from the authors:

  • Optimizing Vision and Visuals: Lectures on Cameras, Displays, and Perception
  • Unrolled Primal-Dual Networks for Lensless Camera
  • Odak
"},{"location":"publications/spec_track/#outreach","title":"Outreach","text":"

We host a Slack group with more than 250 members. This Slack group focuses on the topics of rendering, perception, displays and cameras. The group is open to public and you can become a member by following this link.

"},{"location":"publications/spec_track/#contact-us","title":"Contact Us","text":"

Warning

Please reach us through email to provide your feedback and comments.

"},{"location":"teaching/","title":"Courses","text":"

The computational light laboratory offers lectures on various topics, including computational optics, computational displays, perceptual graphics and computational fabrication. For the entire list of lectures offered at the time being, please follow the menu at the right-hand side of this text. The lectures that we have offered so far are as follows:

Term Instructor(s) Course Content - Kaan Ak\u015fit - Computational Light Winter 2023 Kaan Ak\u015fit COMP0088 Introduction to Machine Learning Summer 2022 Kaan Ak\u015fit SIGGRAPH 2022 Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception Spring 2022 Kaan Ak\u015fit COMP0160 Lecture 2: Visual perception in perceptual graphics and computational displays Spring 2022 Kaan Ak\u015fit COMP0160 Lecture 7: Integrating Sensory Information in Computational Displays"},{"location":"teaching/comp0160_coursework_1/","title":"Course work I: COMP0160 Perception and Interfaces","text":"

Release date: 24th January 2022

Due date: 7th February 2022

Instructor: Kaan Ak\u015fit

"},{"location":"teaching/comp0160_coursework_1/#background","title":"Background","text":"

COMP0160: Perception and Interface Course offers students a gateway to get familiar with various aspects of perception and interfaces. This document is designed to explain to you the task that has to be conducted by students for delivering a successful work as an outcome of your assignment. Specifically, the documentation is for the first-course work in the perception and interfaces course. The topic of the first-course work is related to the human visual system. As the students compile their course work, they will have a strong understanding of how the human visual system works under different eye prescriptions (e.g., myopia, astigmatism). Having a detailed understanding of the given topic can help students to have insights towards solving problems in domains such as computational displays, perceptual graphics and computational imaging. The software tools used in this course are publicly available. They are shared across the forefront of various industries and academia (e.g., data science to computational approaches in physics, biology or chemistry).

"},{"location":"teaching/comp0160_coursework_1/#requirements","title":"Requirements","text":"

This assignment assumes that you have an understanding of programming with Python language and you are familiar with Torch library that provides access to linear algebra calculations. Within this assignment, you will be asked to deliver your solution in a Jupyter Notebook format. The students are expected to be familiar with working Jupyter Notebooks and know the details regarding saving notebooks so that they can deliver their work for evaluation in the required form. We typically use Matplotlib library for plotting purposes while using Jupyter Notebooks.

In our production machines, we use the Python distribution 3.9.7, Torch distribution 1.9.0, Matplotlib distribution 3.3.4 and Jupyter Notebook distribution 6.2.0. For successfully compiling the assignment, make sure to have these libraries installed on your computer properly. Given that you are going to compile your work with Torch, you can either choose to run your code on CPU or GPU by selecting the proper device in your code. However, at the time you deliver your code, please make sure that your code runs on CPU. As a practical observation, you can also get a sense of speed differences in between those two devices and report within your Jupyter notebook (optional). We typically run these on a Linux operating system. However, it is not a requirement for students to use the same operating system as these components also run on your favourite operating system (e.g., Windows, Android, Mac OS or alike). In your course work, make sure to add docstring type documentation for every function in your code and make sure to comment in between lines to explain your steps within a function.

Before starting with the tasks, we encourage students to attend the second lecture of the perception and interfaces course, namely Lecture 2: Visual Perception in Perceptual Graphics and Computational Displays (recording available on Moodle).

"},{"location":"teaching/comp0160_coursework_1/#some-useful-links-for-beginners","title":"Some useful links for beginners:","text":"
  • Absolute beginners guide for Python and Jupyter Notebook
  • Simple algebric operations in Torch,

Special note from your instructor: We design this homework to make you be better in your next in your life. If you do not have the right background to use the tools proposed in this coursework or if you are a confused absolute beginner, please do not hesitate to reach out to us through Moodle. We are here to support you. Please carefully frame your questions as you approach us for support (e.g., what you want to ask and what you expect) so that we can support you at our best.

"},{"location":"teaching/comp0160_coursework_1/#problem-description","title":"Problem description","text":"

Each and every one of us has a unique visual system. At the heart of our visual system lies our eyes. Our eyes can be simplified as an optical instrument that images a three-dimensional scene to our retinas, sensor alike cellular structure. In this assignment, your task is to develop a user interface on a Jupyter Notebook that simulates how our visions are being affected by various kinds of eye prescriptions. To simply put, how would you perceive a scene if you had a certain prescription. We expect you to have this simulator in live view, enabling the user to choose different eye prescriptions based on Zernike Polynomials. Before conducting any work, we suggest you go through the listed references below:

  • Zernike Polynomials,
  • Watson, Andrew B. \"Computing human optical point spread functions.\" Journal of Vision 15.2 (2015): 26-26,
  • Chakravarthula, Praneeth, et al. \"Gaze-contingent retinal speckle suppression for perceptually-matched foveated holographic displays.\" IEEE Transactions on Visualization and Computer Graphics 27.11 (2021): 4194-4203, -- observe Figure 4 here for sample point-spread functions.
  • Animation examples with Matplotlib and Widgets with Matplotlib,

In addition, you can get a sense of the importance of prescription in next-generation display technologies by going through the survey paper below (actual industrial applications from learning of this course work):

  • Koulieris, George Alex, et al. \"Near\u2010eye display and tracking technologies for virtual and augmented reality.\" Computer Graphics Forum. Vol. 38. No. 2. 2019.

These references can help you to find the required technical details for your subtasks.

"},{"location":"teaching/comp0160_coursework_1/#zernike-polynomial-generator-10-points","title":"Zernike Polynomial generator (10 points)","text":"

The first task is to derive a Pythonic class that can generate Zernike Polynomials on demand. These polynomials can help you to represent the point-spread functions of people with a prescription. A point-spread function can be best described as the system response of your eye to a given scene. In the way you will use point-spread functions, they can be described as kernels that can help you describe your eye as a linear transform or a system that is represented with a single convolution. Once you have fully compiled the zernike_polynomial_generator class in your Notebook, please proceed with the next task:

class zernike_polynomial_generator():\n\n    def __init__(self):\n          ...\n

Our expectation from you, in this case, is to have multiple functions in your zernike_polynomial_generator class that spits out various point-spread functions, read more from here (7.5 points). A person may be having an eye prescription composed of various point-spread functions. To support such a case, make sure to add a function in your class that outputs a weighted sum of chosen point-spread functions (2.5 points).

"},{"location":"teaching/comp0160_coursework_1/#forward-model-10-points","title":"Forward model (10 points)","text":"

We will work with an assumption that our eyes are responding to every point on a given scene in the same way (e.g., stationary kernels, not spatially varying kernels). You have to have a function that is able to load images from a given path.

def load_image(filename):\n     \"\"\"\n    Function to load an image.\n\n    Parameters\n    ------------\n    filename            : str\n                                Filename of the image.\n\n    Returns\n    --------\n    image               : torch.tensor\n                               Loaded image.\n     \"\"\"\n     ....\n    return image\n

Please do not hesitate to use images from Creative Commons for your experiments, and please make sure that these images are clean, meaning ethically good to work with. Please also make sure to work with images that has 1920x1080x3 resolution and please reduce the image into a single color image by taking the average across second axis (1920x1080 - Black and white). Make sure to provide the image that you use together with your Jupyter Notebook in a compressed file format (ZIP).

You will be using this specific image load definition to load images, and you will process these images with your forward model function. Here forward model corresponds to convolving the loaded image with a combination of Zernike polynomials to simulate various kinds of eye prescriptions. In the simplest form, your forward model should look like below:

def forward(image, psf):\n     \"\"\"\n    Forward model, convolving the given image with a given point-spread function.\n\n    Parameters\n    ------------\n    image              : torch.tensor\n                               Image as a torch tensor (MxN).\n    psf                   : torch.tensor\n                              Point-spread function as a torch tensor (MxN).\n\n    Returns\n    --------\n    result               : torch.tensor\n                               Abberated image.\n     \"\"\"\n     ....\n     return result\n

You will receive your 3 points for loading images properly (3 points). The remaining 7 points will be dedicated to the forward model definition (7 points).

Hint for the forward model: torch.nn.Conv2d (Do not necessarilly have to use it but can help).

"},{"location":"teaching/comp0160_coursework_1/#visualizer-10-points","title":"Visualizer (10 points)","text":"

The last bit we want you to add to the Jupyter Notebook is related to the way you will be visualizing the outcome of your forward model. We want your code to be interactive as much as you can make it to be on your given computer hardware. Make sure to visualize images of your forward model using Matplotlib. Make sure to provide buttons and controls for your users to choose different combinations of Zernike polynomial to formulate a point-spread function, and make sure to visualize the point-spread functions that you have generated. Note that we will heavily be relying on your visualizer to assess the outcome of your code; please pay attention to make sure that you have provided all the controls (either as variables to manipulate or buttons or sliders), and they are easy for a user to work with. Note that you are allowed to use other libraries beyond Matplotlib such as Pyplot or if you want to develop a user interface outside of the boundaries of a Jupyter Notebook, it is also ok. But if you do that, please make sure that you have communicated the change in a clear fashion and we are able to run your code.

If you can plot the outcome of the forward model, this plotting can guarantee you half of the points you can receive (5 points). The remaining points can be received as you introduce more sophistication to your visualizer, as explained above (5 points).

"},{"location":"teaching/comp0160_coursework_1/#problem-and-potential-solutions-15-points","title":"Problem and potential solutions (15 points)","text":"

We want you to add a text section to your notebook, where you will find an unsolved/partially solved scientific problem related to eye prescription and visuals (displays, graphics or any other form). The source of this problem can be from the existing literature, and please make sure to survey using your favourite search engines, academic ones and non-academic ones (e.g., Brave, Google Scholar, etc.). The problem can also rely on your practical observations as well as long as you describe it clearly. You also provide potential solutions to the problem that you have found from the literature and your own predictions towards new solutions in the future. The text can not be more than 500 words, no less than 250 words. Note that the length of your text is not an indicator of success, and most powerful writing happens in shorter forms.

You will receive half of the points from your problem description (7.5 points). The remaining half will be from your proposed solution (7.5 points).

"},{"location":"teaching/comp0160_coursework_1/#contacting-us","title":"Contacting Us","text":"

The prefered way of communication is through University College London's online lecture system, Moodle. Please use the Moodle forum for your questions related to the course work.

"},{"location":"teaching/comp0160_perception_and_interfaces/","title":"COMP0160: Perception and Interfaces","text":""},{"location":"teaching/comp0160_perception_and_interfaces/#summary","title":"Summary","text":"

COMP0160: Perception and Interfaces course offers students a gateway to get familiar with various aspects of perception and interfaces. Greater details of the course and its broad description can be found in course website.

Computational light laboratory contributes to COMP0160: Perception and Interfaces by providing two lectures introducing the human visual system, its relation with graphics and displays, and sensing modalities in emerging devices (e.g., near-eye displays for virtual reality and augmented reality). Each of these lectures is two hours long. In addition, we support these lectures with laboratory assignments for the students, which are vital for completing the course.

"},{"location":"teaching/comp0160_perception_and_interfaces/#timetable","title":"Timetable","text":"

The timetable provided below show parts of COMP0160 that are provided by computational light laboratory.

Date Instructor(s) Content 14 January 2022 - 27th March 2022 Kaan Ak\u015fit Practical 17th January 2022 - 23rd January 2022 Kaan Ak\u015fit Visual Perception in graphics and displays 28th February 2022 - 6th March 2022 Kaan Ak\u015fit Integrating Sensory Information in Computational Displays"},{"location":"teaching/comp0160_perception_and_interfaces/#parts","title":"Parts","text":""},{"location":"teaching/comp0160_perception_and_interfaces/#practical","title":"Practical","text":"

12:00 noon to 1:00 pm, Fridays, 14th January 2022 - 27th March 2022

Chandler House G15

Description (Public)

"},{"location":"teaching/comp0160_perception_and_interfaces/#coursework","title":"Coursework","text":"

First coursework

"},{"location":"teaching/comp0160_perception_and_interfaces/#lecture-2-visual-perception-in-perceptual-graphics-and-computational-displays","title":"Lecture 2: Visual perception in perceptual graphics and computational displays","text":"

Winter 2022

Online

Recording (Password protected)

Slides (Invitation required)

This lecture focuses on human visual perception and its applications in computer graphics and computational display domains.

Details

Summary: The students will learn about human visual perception in this course. They will primarily learn about the eye and its structure. The information about the eye explained throughout the lecture will be linked to designing computational displays and perceptual graphics with real cases from the recent literature. Towards the end of this lecture, students will have enough information to build a simplified optical model of a human eye. They will be encouraged to build an eye model using this simplified optical simulation of the human eye.

References:

  • Panero, Julius, and Martin Zelnik. Human dimension & interior space: a source book of design reference standards. Watson-Guptill, 1979.

  • Bekerman, Inessa, Paul Gottlieb, and Michael Vaiman. \"Variations in eyeball diameters of the healthy adults.\" Journal of ophthalmology 2014 (2014).

  • Roberts, Bethany R., and Juliet L. Osborne. \"Testing the efficacy of a thermal camera as a search tool for locating wild bumble bee nests.\" Journal of Apicultural Research 58.4 (2019): 494-500.

  • Park, George E., and Russell Smith Park. \"Further evidence of change in position of the eyeball during fixation.\" Archives of Ophthalmology 23.6 (1940): 1216-1230.

  • Koulieris, George Alex, et al. \"Near\u2010eye display and tracking technologies for virtual and augmented reality.\" Computer Graphics Forum. Vol. 38. No. 2. 2019.

  • Cakmakci, Ozan, and Jannick Rolland. \"Head-worn displays: a review.\" Journal of display technology 2.3 (2006): 199-216.

  • De Groot, S. G., and J. W. Gebhard. \"Pupil size as determined by adapting luminance.\" JOSA 42.7 (1952): 492-495.

  • Hunt, Robert William Gainer. \"Light and dark adaptation and the perception of color.\" JOSA 42.3 (1952): 190-199.

  • Han, S. H., et al. \"The Change of Pupil Cycle Time after Occlusion Therapy in Amblyopia.\" Journal of the Korean Ophthalmological Society 38.2 (1997): 290-295.

  • Fine, I., et al. \"Optical properties of the sclera.\" Physics in Medicine & Biology 30.6 (1985): 565.

  • Zoulinakis, Georgios, et al. \"Accommodation in human eye models: a comparison between the optical designs of Navarro, Arizona and Liou-Brennan.\" International journal of ophthalmology 10.1 (2017): 43.

  • Herndon, Leon W., Jennifer S. Weizer, and Sandra S. Stinnett. \"Central corneal thickness as a risk factor for advanced glaucoma damage.\" Archives of ophthalmology 122.1 (2004): 17-21.

  • Glasser, Adrian, and Melanie CW Campbell. \"Presbyopia and the optical changes in the human crystalline lens with age.\" Vision research 38.2 (1998): 209-229.

  • Bharadwaj, Shrikant R., and Clifton M. Schor. \"Acceleration characteristics of human ocular accommodation.\" Vision Research 45.1 (2005): 17-28.

  • Campbell, F. W., and G. Westheimer. \"Dynamics of accommodation responses of the human eye.\" The Journal of physiology 151.2 (1960): 285-295.

  • Heron, Gordon, W. N. Charman, and C. Schor. \"Dynamics of the accommodation response to abrupt changes in target vergence as a function of age.\" Vision research 41.4 (2001): 507-519.

  • Phillips, Stephen, Douglas Shirachi, and Lawrence Stark. \"Analysis of accommodative response times using histogram information.\" Optometry and Vision Science 49.5 (1972): 389-401.

  • Deering, Michael F. \"A photon accurate model of the human eye.\" ACM Transactions on Graphics (TOG) 24.3 (2005): 649-658.

  • Ratnam, Kavitha, et al. \"Relationship between foveal cone structure and clinical measures of visual function in patients with inherited retinal degenerations.\" Investigative ophthalmology & visual science 54.8 (2013): 5836-5847.

  • Kim, Jonghyun, et al. \"Foveated AR: dynamically-foveated augmented reality display.\" ACM Transactions on Graphics (TOG) 38.4 (2019): 1-15.

"},{"location":"teaching/comp0160_perception_and_interfaces/#lecture-7-integrating-sensory-information-in-computational-displays","title":"Lecture 7: Integrating Sensory Information in Computational Displays","text":"

Winter 2022

Online

Recording (Password protected)

Slides (Invitation required)

This lecture focuses on integrating various kinds of sensory information to the next generation displays.

Details

Summary: In this course, students will learn about sensors and their integration into modern display systems such as Virtual and Augmented Reality near-eye displays and three-dimensional displays. In the first half, a review of various kinds of sensors that could capture vital signs from a user, such as heart rate and gaze orientation, will be provided. The second half will cover applications that use captured sensory information. These applications will be sampled from actual products on the market and research prototypes at the forefront of science.

  • Cennini, G., Arguel, J., Ak\u015fit, K., & van Leest, A. (2010). Heart rate monitoring via remote photoplethysmography with motion artifacts reduction. Optics express, 18(5), 4867-4875.

  • Li, Richard, Eric Whitmire, Michael Stengel, Ben Boudaoud, Jan Kautz, David Luebke, Shwetak Patel, and Kaan Ak\u015fit. \"Optical gaze tracking with spatially-sparse single-pixel detectors.\" In 2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 117-126. IEEE, 2020.

  • Angelopoulos, Anastasios N., Julien NP Martel, Amit P. Kohli, Jorg Conradt, and Gordon Wetzstein. \"Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz.\" IEEE transactions on visualization and computer graphics 27, no. 5 (2021): 2577-2586.

  • Wei, Shih-En, Jason Saragih, Tomas Simon, Adam W. Harley, Stephen Lombardi, Michal Perdoch, Alexander Hypes, Dawei Wang, Hernan Badino, and Yaser Sheikh. \"Vr facial animation via multiview image translation.\" ACM Transactions on Graphics (TOG) 38, no. 4 (2019): 1-16.

  • Yaldiz, Mustafa B., Andreas Meuleman, Hyeonjoong Jang, Hyunho Ha, and Min H. Kim. \"DeepFormableTag: end-to-end generation and recognition of deformable fiducial markers.\" ACM Transactions on Graphics (TOG) 40, no. 4 (2021): 1-14.

  • Glauser, O., Wu, S., Panozzo, D., Hilliges, O., & Sorkine-Hornung, O. (2019). Interactive hand pose estimation using a stretch-sensing soft glove. ACM Transactions on Graphics (TOG), 38(4), 1-15.

  • Glauser, O., Panozzo, D., Hilliges, O., & Sorkine-Hornung, O. (2019). Deformation capture via soft and stretchable sensor arrays. ACM Transactions on Graphics (TOG), 38(2), 1-16.

  • HP Reverb G2 VR Headset

  • MediaPipe Iris: Real-time Iris Tracking and Depth Estimation

  • Brelyon: a window to a whole new world

  • Tobii's eye and head tracking for professional esports

"},{"location":"teaching/comp0160_perception_and_interfaces/#team","title":"Team","text":"

Kaan Ak\u015fit

Instructor

E-mail

"},{"location":"teaching/comp0160_perception_and_interfaces/#contact-us","title":"Contact Us","text":"

Warning

The prefered way of communication is through University College London's online lecture system, Moodle. Please do not reach us through email unless the thing you want to achieve or establish or ask is not possible through the online lecture system.

"},{"location":"teaching/comp0160_practical/","title":"Welcome","text":"

Start date: 12:00 pm, 14th January 2022,

Duration: This description is valid for first four weeks of practical sessions.

Welcome to the practical session for COMP0160: Perception and Interfaces. This course is designed to offer you, the students, a gateway to get familiar with various aspects of perception and interfaces.

In your lectures, you will learn about the human visual system and how humans perceive light from their surroundings. Specifically, the course \"Visual Perception in graphics and displays\", held between 17th January and 23rd January 2022, will teach you how eyes gaze and perceive their surroundings.

In these practical sessions, we provide the opportunity for you to get familiarized with standard tools used in computer graphics. These tools will help you translate what you learn from the courses to an actual outcome. The rest of this document will describe your task in these practical sessions. Remember that the task description provided here contains the information on the session's aims, objectives, and goals. But the description does not necessarily contain all the steps required to conduct your work. This case is because everybody learns things in a slightly different way. We create that environment to elaborate by not strictly telling you which steps to take.

Please make sure to bring your personal laptop with you if you are physically attending.

Once again welcome to COMP0160 practical sessions!

Kaan Ak\u015fit

"},{"location":"teaching/comp0160_practical/#background-15-minutes","title":"Background (15 minutes)","text":"

Imagine you woke up to ten years from now on. You have long graduated from your program, and you found yourself a job in the industry. Guess what your job is about! It is about humans and their perceptions.

The company that you are working at builds digital humans and avatars to represent people or their artificial intelligence-powered bots. These bots offer innovative assistance services such as representing you when you are unavailable (multi conversation) or offering you services that are carried out by human workers today (e.g., call centres). These efforts are for their upcoming revolution in virtual environments, or more popularly known as Metaverse nowadays.

In this hypothetical future, your manager calls you from your virtual reality device while having your first coffee of the day. She assigns you as the lead for upgrading the eye model that they use in their avatars and bots. They want to make sure that the eyes rendered at these avatars look realistic and physically plausible.

You now have these flashbacks, remembering that you learned in some class in the past called \"Visual Perception in Graphics and Displays\" how eyes rotate, shift and view.

In this exercise, please put that information into work, and we ask you to build an eye model that rotates by following the constraints taught in that class. In short, your task is to build a 3D model of the eye that follows physical constraints while gazing (e.g., eyeballs can not rotate 180 degrees, today's cutting-edge PC games make such easy mistakes).

"},{"location":"teaching/comp0160_practical/#tools-45-minutes","title":"Tools (45 minutes)","text":"

In this practical, you will be using Blender 3D, an open-source tool for modelling and rendering with state of the art in computer graphics.

Please make sure to download Blender 3D to your production machine and familiarise yourself with the software controls. You can find many tutorials online by typing Blender tutorial in your favourite search engine (e.g., Google) or cloud video platform (e.g., YouTube).

"},{"location":"teaching/comp0160_practical/#create-a-mesh-of-your-face-1-hour","title":"Create a mesh of your face (1 hour)","text":"

Your first target in this practice is to figure out how to create a mesh of your face from a photograph. This way, we will use the face mesh you created to place the two eyeballs that we aim to add. Once again, there are multiple tutorials online (e.g., sample tutorial), which you can easily search for. You are free to choose your way of creating a mesh of your face. You can also rely on the methods that offer one click add-on base solutions.

Once the face mesh is created, please remove the triangles where your eye should be and make sure to texture your face mesh using your photograph.

"},{"location":"teaching/comp0160_practical/#adding-the-eyeballs-45-minutes","title":"Adding the eyeballs (45 minutes)","text":"

Our next step is to add the eyeballs to your face mesh. Following the geometry that you have learned from the lecture, you will be creating an eyeball that is faithful to the physiology of the eye. These eyeballs will later be placed on the face mesh that you generated. There are again multiple tutorials online that you can rely on to generate a realistic-looking eyeball (e.g., sample tutorial). Once you are complete with this task, make sure to place these eyeballs on your face mesh.

"},{"location":"teaching/comp0160_practical/#realistic-gaze-15-minutes","title":"Realistic gaze (15 minutes)","text":"

Now that you have generated a face mesh and two eyeballs for your face mesh, please explore the option of constraints in the Blender 3D. Our aim here is to add a target (e.g., a box) at some distance in front of the face mesh, and we want our newly generated digital avatar to gaze at the target. You have to identify means to rotate the eyeballs such that they always gaze at the box. But remember, you also learned how much and how far an eyeball can rotate in the lecture. We want to be able to add that as a constrain as well. So once the target moves, eyeballs should rotate, but they shouldn't turn beyond their rotation ability.

"},{"location":"teaching/comp0160_practical/#changing-the-iris-size-with-light-levels-1-hour","title":"Changing the iris size with light levels (1 hour)","text":"

This last task is entirely optional. You will seek means to change iris diameter with the light levels in the environment of your digital avatar. As a first step, make sure to set the target you have created to be a light source. Since our digital avatar is always gazing at it, this target's light level will determine the size of the iris. You learned how large or small an iris could be in your lecture. We want you to manipulate the eyeball mesh procedurally with the light level of your target.

"},{"location":"teaching/comp0160_practical/#conclusion","title":"Conclusion","text":"

You may have multiple questions during practical. Do not hesitate to reach out to us at the practical time or via Moodle. Once you are complete with practical and trust that you have generated an excellent digital avatar, please let us know as this can return as an investment towards improving your final mark from this course. If you provide your consent (since this is containing your personal data -- your face), you can also share your blender file with us by uploading it to a cloud service (Microsoft One Drive provided by the university), share the link of the file with us through an email. In your email, please make sure to state under what licence you are providing us the Blender file (e.g, MIT License, Creative Commons), and please make sure to state if you allow us to use this data in scientific research in the future. Note that this exercise is also beneficial for your final assignment, and it can potentially help you achieve your task in your final project.

"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/","title":"Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception","text":""},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#people","title":"People","text":"

Koray Kavakl\u01311

David Walton2

Nick Antipa3

Rafa\u0142 Mantiuk4

Douglas Lanman5,6

Kaan Ak\u015fit2

1Ko\u00e7 University, 2University College London, 3University of California San Diego, 4University of Cambridge, 5Meta Reality Labs, 6University of Washington

SIGGRAPH 2022

"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#resources","title":"Resources","text":"

Lecture recording Code Foreword

Bibtex
@inproceedings{10.1145/3532720.3535650,\n author = {Kavakli, Koray and Walton, David Robert and Antipa, Nick and Mantiuk, Rafa\\l{} and Lanman, Douglas and Ak\\c{s}it, Kaan},\n title = {Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception},\n year = {2022},\n isbn = {9781450393621},\n publisher = {Association for Computing Machinery},\n address = {New York, NY, USA},\n url = {https://doi.org/10.1145/3532720.3535650},\n doi = {10.1145/3532720.3535650},\n booktitle = {ACM SIGGRAPH 2022 Courses},\n articleno = {17},\n numpages = {66},\n location = {Vancouver, British Columbia, Canada},\n series = {SIGGRAPH '22}\n}\n
"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#presentation","title":"Presentation","text":""},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#abstract","title":"Abstract","text":"

The evolution of the internet is underway, where immersive virtual 3D environments (commonly known as metaverse or telelife) will replace flat 2D interfaces. Crucial ingredients in this transformation are next-generation displays and cameras representing genuinely 3D visuals while meeting the human visual system's perceptual requirements.

This course will provide a fast-paced introduction to optimization methods for next-generation interfaces geared towards immersive virtual 3D environments. Firstly, we will introduce lensless cameras for high dimensional compressive sensing (e.g., single exposure capture to a video or one-shot 3D). Our audience will learn to process images from a lensless camera at the end. Secondly, we introduce holographic displays as a potential candidate for next-generation displays. By the end of this course, you will learn to create your 3D images that can be viewed using a standard holographic display. Lastly, we will introduce perceptual guidance that could be an integral part of the optimization routines of displays and cameras. Our audience will gather experience in integrating perception to display and camera optimizations.

This course targets a wide range of audiences, from domain experts to newcomers. To do so, examples from this course will be based on our in-house toolkit to be replicable for future use. The course material will provide example codes and a broad survey with crucial information on cameras, displays and perception.

"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#relevant-research-works","title":"Relevant research works","text":"

Here are relevant research works from the authors:

  • Odak
  • Metameric Varifocal Holograms
  • Learned Holographic Light Transport
  • Unrolled Primal-Dual Networks for Lensless Cameras
"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#outreach","title":"Outreach","text":"

We host a Slack group with more than 250 members. This Slack group focuses on the topics of rendering, perception, displays and cameras. The group is open to public and you can become a member by following this link.

"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#contact-us","title":"Contact Us","text":"

Warning

Please reach us through email or through GitHub issues to ask your questions or to provide your feedback and comments.

"},{"location":"teaching/siggraph2022_optimizing_vision_and_visuals/#acknowledgements","title":"Acknowledgements","text":"

The authors would like to thank reviewers for their valuable feedback.

Kaan Ak\u015fit is supported by the Royal Society's RGS\\R2\\212229 - Research Grants 2021 Round 2 in building the hardware prototype used in generating the course material. Kaan Ak\u015fit is also supported by Meta Reality Labs inclusive rendering initiative 2022.

"},{"location":"timeline/","title":"Timeline","text":""},{"location":"timeline/#2024","title":"2024","text":""},{"location":"timeline/#april","title":"April","text":""},{"location":"timeline/#22-april-2024","title":"22 April 2024","text":"

Our paper, All-optical image denoising using a diffractive visual processor, is recognized as the most downloaded paper in March 2024 by Nature's Light and Science Applications. This work is a collaboration between \u00c7a\u011fatay I\u015f\u0131l, Tianyi Gan, Fazil Onuralp, Koray Mentesoglu, Jagrit Digani, Huseyin Karaca, Hanlong Chen, Jingxi Li, Deniz Mengu, Mona Jarrahi, Kaan Ak\u015fit, and Ozcan Aydogan.

Our paper, Multi-color Holograms Improve Brightness in Holographic Displays, is awarded with Graphics Replicability Stamp Initiative's replicability stamp. This work is a collaboration between Koray Kavakl\u0131, Liang Shi, Hakan Urey, Wojciech Matusik, and Kaan Ak\u015fit.

"},{"location":"timeline/#january","title":"January","text":""},{"location":"timeline/#29-january-2024","title":"29 January 2024","text":"

Our paper, AutoColor: Learned Light Power Control for Multi-Color Holograms, is presented at SPIE AR|VR|MR 2024. This work is a collaboration between Yicheng Zhan, Koray Kavakl\u0131, Hakan Urey, Qi Sun, and Kaan Ak\u015fit.

"},{"location":"timeline/#2023","title":"2023","text":""},{"location":"timeline/#december","title":"December","text":""},{"location":"timeline/#13-december-2023","title":"13 December 2023","text":"

Our paper, Multi-color Holograms Improve Brightness in Holographic Displays, is presented at SIGGRAPH Asia 2023. This work is a collaboration between Koray Kavakl\u0131, Liang Shi, Hakan Urey, Wojciech Matusik, and Kaan Ak\u015fit.

"},{"location":"timeline/#october","title":"October","text":""},{"location":"timeline/#30-october-2023","title":"30 October 2023","text":"

We are pleased to announce an achievement at the UKRI AI CDT Conference 2023 in Bristol, United Kingdom. Ahmet G\u00fczel showcased our research project, ChromaCorrect, among a diverse array of over 50 posters at the event. We are honored to have been awarded First Prize for Best Poster, and we thank Foundational Artificial Intelligence Center at University College London.

"},{"location":"timeline/#12-13-october-2023","title":"12-13 October 2023","text":"

Kaan helped organizing Optical Waveguides: A key to Socially Acceptable Augmented Reality Glasses? as an Optica Incubator. Kaan also gave an invited talk titled Role of Differentiable Models in Computational Display Research at the same incubator event.

"},{"location":"timeline/#11-october-2023","title":"11 October 2023","text":"

Kaan attended and presented at Meta's Academic forum 2023 upon Meta Reality Labs invitation. Kaan's talk is titled Unlocking Next-Generation Display Technologies with Holography.

"},{"location":"timeline/#9-10-october-2023","title":"9-10 October 2023","text":"

Kaan helped organizing Virtual Reality and Augmented Vision theme at Optica's Frontiers Optics. Kaan also gave an invited talk on his group's work, Headsetless Holographic Virtual Reality Displays in the same theme.

"},{"location":"timeline/#august","title":"August","text":""},{"location":"timeline/#16-august-2023","title":"16 August 2023","text":"

We are grateful to Optica for inviting our Ahmet Hamdi G\u00fczel to present his work at the Vision and Color summer data blast webinar. Have you missed it? The recording is now online.

"},{"location":"timeline/#june","title":"June","text":""},{"location":"timeline/#28-june-2023","title":"28 June 2023","text":"

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism? at Stanford University.

"},{"location":"timeline/#1-june-2023","title":"1 June 2023","text":"

In her latest article, \"The Promise of Holographic Displays,\" Sandrine Ceurstemont gathered perspectives on the promise of holographic displays and provided some space for ours.

"},{"location":"timeline/#april_1","title":"April","text":""},{"location":"timeline/#21-april-2023","title":"21 April 2023","text":"

Our paper, ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance, is published at Optica's Biomedical Optics Express. This work is a result of a collaboration with Ahmet H. G\u00fczel, Jeanne Beyazian, Praneeth Chakravarthula, and Kaan Ak\u015fit.

"},{"location":"timeline/#march","title":"March","text":""},{"location":"timeline/#28-march-2023","title":"28 March 2023","text":"

Our paper, HoloBeam: Paper-Thin Near-Eye Displays, is presented at IEEE VR 2023. This work is a collaboration between Yuta Itoh, and Kaan Ak\u015fit.

Our paper, Realistic Defocus Blur for Multiplane Computer-Generated Holography, is presented at IEEE VR 2023. This work is a collaboration between Koray Kavakl\u0131, Yuta Itoh, Hakan \u00dcrey, and Kaan Ak\u015fit.

"},{"location":"timeline/#15-march-2023","title":"15 March 2023","text":"

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism in displays?. We are thankful to Huawei, United Kingdom for their kind hospitality.

"},{"location":"timeline/#february","title":"February","text":""},{"location":"timeline/#6-february-2023","title":"6 February 2023","text":"

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism?. We are thankful to University of Rochester's institute of optics for their kind hospitality.

"},{"location":"timeline/#january_1","title":"January","text":""},{"location":"timeline/#4-january-2023","title":"4 January 2023","text":"

We are thankful to T\u00dcB\u0130TAK's 2224-A support for our valuable member and a PhD student, Koray Kavakl\u0131 in presenting his work at SPIE's Photonics West 2023. This fund covers a significant portion of his attendance at SPIE's Photonics West.

"},{"location":"timeline/#3-january-2023","title":"3 January 2023","text":"

We are thankful to Oracle for offering us to rely on their cloud infrastructure support for our computational needs. We had to decline their award as we have purchased new computational resources most recently.

"},{"location":"timeline/#2022","title":"2022","text":""},{"location":"timeline/#november","title":"November","text":""},{"location":"timeline/#18-november-2022","title":"18 November 2022","text":"

Our paper, Unrolled Primal-Dual Networks for Lensless Cameras, is published at Optica's Optics Express. This work is a result of a collaboration between Oliver Kingshott, Nick Antipa, Emrah Bostan, and Kaan Ak\u015fit.

"},{"location":"timeline/#october_1","title":"October","text":""},{"location":"timeline/#25-october-2022","title":"25 October 2022","text":"

Our paper, Metameric Inpainting for Image Warping, is published at IEEE's Transaction on Visualization and Computer Graphics. This work is a collaboration between Rafael Kuffner dos Anjos, David Robert Walton, Sebastian Friston, David Swapp, Anthony Steed, Tobias Ritschel, and Kaan Ak\u015fit.

"},{"location":"timeline/#19-october-2022","title":"19 October 2022","text":"

In collaboration with Meta Reality Laboratory's Douglas Lanman, we helped organise a successful augmented reality and virtual reality theme at Optica's Frontiers in Optics 2022. Kaan Ak\u015fit presented a talk titled Realistic Image Reconstruction with Multiplane Computer-Generated Holography, while Koray Kavakl\u0131 presented a talk titled Introduction to Odak: a Differentiable Toolkit for Optical Sciences, Vision Sciences and Computer Graphics.

"},{"location":"timeline/#august_1","title":"August","text":""},{"location":"timeline/#3-august-2022","title":"3 August 2022","text":"

Our course, Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception is available and online in SIGGRAPH 2022. This work is a collaboration between Koray Kavakl\u0131, David Walton, Nick Antipa, Rafa\u0142 Mantiuk, Douglas Lanman and Kaan Ak\u015fit.

"},{"location":"timeline/#june_1","title":"June","text":""},{"location":"timeline/#23-june-2022","title":"23 June 2022","text":"

We are grateful to the Meta Reality Labs for supporting our research through the inclusive rendering initiative 2022. Their award will enable us to investigate inclusive graphics pipelines in terms of human visual perception. Their award is worth 75000 USD.

"},{"location":"timeline/#may","title":"May","text":""},{"location":"timeline/#5-may-2022","title":"5 May 2022","text":"

Our panel, Telelife: A Vision of Remote Living in 2035, is presented at CHI 2022. This work is a collaboration between Kenan Bekta\u015f, Jeeeun Kim, Kiyoshi Kiyokawa, Anthony Steed, Tobias H\u00f6llerer, Nataliya Kosmyna, Misha Sra, Jason Orlosky, and Kaan Ak\u015fit.

"},{"location":"timeline/#march_1","title":"March","text":""},{"location":"timeline/#14-march-2022","title":"14 March 2022","text":"

We introduce our work, Metameric Varifocal Holograms, at IEEE VR 2022. This work is a collaboration between David R. Walton, Koray Kavakli, Rafael Kuffner dos Anjos, David Swapp, Tim Weyrich, Hakan Urey, Anthony Steed, Tobias Ritschel and Kaan Ak\u015fit. David Walton presented the work at the conference.

Kaan Ak\u015fit served as program committee for journal papers, and also as technical achievement and lifetime achievement awards committee member.

"},{"location":"timeline/#11-march-2022","title":"11 March 2022","text":"

Kaan Ak\u015fit together with Jannick Rolland and Babak Amirsolaimani is acting as a guest editor for Journal of Optical Microsystems from SPIE at a special issue targetting optics research in augmented, virtual and mixed reality. Here is a link for call flyer and to submit your work, please follow this link.

"},{"location":"timeline/#february_1","title":"February","text":""},{"location":"timeline/#23-february-2022","title":"23 February 2022","text":"

Kaan Ak\u015fit serves as a program committee for EGSR 2022, which will take place as a hybrid conference, virtual and physically located in Czech Republic's Prag.

"},{"location":"timeline/#january_2","title":"January","text":""},{"location":"timeline/#24-january-2022","title":"24 January 2022","text":"

We presented two invited talks at SPIE's Photonics West. Our first talk is on Perceptually guided computer-generated holography, and our second talk is on Beaming Displays: Towards Displayless Augmented Reality Near-eye Displays.

"},{"location":"timeline/#17-january-2022","title":"17 January 2022","text":"

We thank the next byte podcast for covering our collaboration on SensiCut project with MIT. They did a great job in explaining our in their podcast. You can reach the podcast using this link.

"},{"location":"timeline/#2021","title":"2021","text":""},{"location":"timeline/#november_1","title":"November","text":""},{"location":"timeline/#29-november-2021","title":"29 November 2021","text":"

Our vision, \"Telelife: The Future of Remote Living\", is published at Frontiers in Virtual Reality. We share our vision for the future, specifically in the year 2035.

"},{"location":"timeline/#12-november-2021","title":"12 November 2021","text":"

Our invited work, \"Learned Holographic Light Transport\", is published at Optica's Applied Optics. We show that light transport can be made more accurate by learning hardware dependent kernels.

"},{"location":"timeline/#8-november-2021","title":"8 November 2021","text":"

We are grateful to the Royal Society for awarding us with their research grants 2021 round two scheme. Their award will enable us to invent new holographic light transport models in the future. This is award is worth 14994.65 GBP. The title of our submission is Learned models for Computer-Generated Holography.

"},{"location":"timeline/#november_2","title":"November","text":""},{"location":"timeline/#1-2-november-2021","title":"1-2 November 2021","text":"

In collaboration with Meta Reality Laboratory's Douglas Lanman, we helped organise a successful augmented reality and virtual reality theme at Optica's Frontiers in Optics 2021. Kaan Ak\u015fit presented his work on holographic beaming displays proposal at the same event.

"},{"location":"timeline/#february_2","title":"February","text":""},{"location":"timeline/#18-february-2021","title":"18 February 2021","text":"

We appear on UCL news for receiving UCL-Osaka university strategic partnership fund.

"},{"location":"timeline/#january_3","title":"January","text":""},{"location":"timeline/#4-january-2021","title":"4 January 2021","text":"

Kaan Ak\u015fit joined University College London's computer science department as an Associate Professor. He is now part of the Virtual Reality and Computer Graphics group, and he leads the Computational light laboratory.

"},{"location":"timeline/#2020","title":"2020","text":""},{"location":"timeline/#november_3","title":"November","text":""},{"location":"timeline/#17-november-2020","title":"17 November 2020","text":"

Kaan Ak\u015fit and Jason Orlosky They have been granted UCL-Osaka University Strategic Parner Funds funds. This award is worth 10000 GBP. The title of our submission is Development of a joint Telelife technology seminar using virtual reality.

"},{"location":"timeline/#august_2","title":"August","text":""},{"location":"timeline/#1-august-2020","title":"1 August 2020","text":"

Kaan Ak\u015fit has left his Senior Research scientist position at NVIDIA in the US, and accepted to join University College London's computer science department as an Associate Professor.

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..7c2aeb32 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,87 @@ + + + + https://complightlab.com/ + 2024-10-22 + + + https://complightlab.com/documentation/ + 2024-10-22 + + + https://complightlab.com/documentation/3d_printing/ + 2024-10-22 + + + https://complightlab.com/documentation/become_phd_student/ + 2024-10-22 + + + https://complightlab.com/documentation/getting_started/ + 2024-10-22 + + + https://complightlab.com/documentation/logo/ + 2024-10-22 + + + https://complightlab.com/lectures/ + 2024-10-22 + + + https://complightlab.com/outreach/ + 2024-10-22 + + + https://complightlab.com/people/ + 2024-10-22 + + + https://complightlab.com/publications/ + 2024-10-22 + + + https://complightlab.com/publications/focal_surface_light_transport/ + 2024-10-22 + + + https://complightlab.com/publications/holobeam/ + 2024-10-22 + + + https://complightlab.com/publications/multi_color/ + 2024-10-22 + + + https://complightlab.com/publications/realistic_defocus_cgh/ + 2024-10-22 + + + https://complightlab.com/publications/spec_track/ + 2024-10-22 + + + https://complightlab.com/teaching/ + 2024-10-22 + + + https://complightlab.com/teaching/comp0160_coursework_1/ + 2024-10-22 + + + https://complightlab.com/teaching/comp0160_perception_and_interfaces/ + 2024-10-22 + + + https://complightlab.com/teaching/comp0160_practical/ + 2024-10-22 + + + https://complightlab.com/teaching/siggraph2022_optimizing_vision_and_visuals/ + 2024-10-22 + + + https://complightlab.com/timeline/ + 2024-10-22 + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..cf6f20e5 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/card.css b/stylesheets/card.css new file mode 100644 index 00000000..7329a8b3 --- /dev/null +++ b/stylesheets/card.css @@ -0,0 +1,80 @@ +.cards-list { + z-index: 0; + width: 100%; + display: flex; + justify-content: space-around; + flex-wrap: wrap; +} + +.card { + margin: 30px auto; + width: 260px; + height: 260px; + border-radius: 40px; +box-shadow: 5px 5px 30px 7px rgba(0,0,0,0.25), -5px -5px 30px 7px rgba(0,0,0,0.22); + cursor: pointer; + transition: 0.4s; +} + +.card .card_image { + width: inherit; + height: inherit; + border-radius: 40px; +} + +.card .card_image img { + width: inherit; + height: inherit; + border-radius: 40px; + object-fit: cover; +} + +.card .card_title { + text-align: center; + border-radius: 0px 0px 40px 40px; + font-weight: bold; + font-size: 14px; + margin-top: -80px; + height: 40px; +} + +.card:hover { + transform: scale(0.9, 0.9); + box-shadow: 5px 5px 30px 15px rgba(0,0,0,0.25), + -5px -5px 30px 15px rgba(0,0,0,0.22); +} + +.title-white { + color: white; +} + +.title-black { + color: black; +} + +.title-colorless { +} + +@media all and (max-width: 500px) { + .card-list { + /* On small screens, we are no longer using row direction but column */ + flex-direction: column; + } +} + + +/* +.card { + margin: 30px auto; + width: 300px; + height: 300px; + border-radius: 40px; + background-image: url('https://i.redd.it/b3esnz5ra34y.jpg'); + background-size: cover; + background-repeat: no-repeat; + background-position: center; + background-repeat: no-repeat; +box-shadow: 5px 5px 30px 7px rgba(0,0,0,0.25), -5px -5px 30px 7px rgba(0,0,0,0.22); + transition: 0.4s; +} +*/ diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 00000000..2391d71b --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,16 @@ +[data-md-color-scheme="youtube"] { + --md-primary-fg-color: #a61600; + --md-primary-fg-color--light: #a61600; + --md-primary-fg-color--dark: #a61600; +} + +[data-md-color-scheme="slate"] { + --md-hue: 210; + --md-primary-fg-color: #a61600; + +} + +.md-grid { + max-width: 1440px; +} + diff --git a/teaching/comp0160_coursework_1/index.html b/teaching/comp0160_coursework_1/index.html new file mode 100644 index 00000000..5f5bcd22 --- /dev/null +++ b/teaching/comp0160_coursework_1/index.html @@ -0,0 +1,2771 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Coursework 1 - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Course work I: COMP0160 Perception and Interfaces

+

Release date: 24th January 2022

+

Due date: 7th February 2022

+

Instructor: Kaan Akşit

+

Background

+

COMP0160: Perception and Interface Course offers students a gateway to get familiar with various aspects of perception and interfaces. +This document is designed to explain to you the task that has to be conducted by students for delivering a successful work as an outcome of your assignment. +Specifically, the documentation is for the first-course work in the perception and interfaces course. +The topic of the first-course work is related to the human visual system. +As the students compile their course work, they will have a strong understanding of how the human visual system works under different eye prescriptions (e.g., myopia, astigmatism). +Having a detailed understanding of the given topic can help students to have insights towards solving problems in domains such as computational displays, perceptual graphics and computational imaging. +The software tools used in this course are publicly available. +They are shared across the forefront of various industries and academia (e.g., data science to computational approaches in physics, biology or chemistry).

+

Requirements

+

This assignment assumes that you have an understanding of programming with Python language and you are familiar with Torch library that provides access to linear algebra calculations. +Within this assignment, you will be asked to deliver your solution in a Jupyter Notebook format. +The students are expected to be familiar with working Jupyter Notebooks and know the details regarding saving notebooks so that they can deliver their work for evaluation in the required form. +We typically use Matplotlib library for plotting purposes while using Jupyter Notebooks.

+

In our production machines, we use the Python distribution 3.9.7, Torch distribution 1.9.0, Matplotlib distribution 3.3.4 and Jupyter Notebook distribution 6.2.0. +For successfully compiling the assignment, make sure to have these libraries installed on your computer properly. +Given that you are going to compile your work with Torch, you can either choose to run your code on CPU or GPU by selecting the proper device in your code. +However, at the time you deliver your code, please make sure that your code runs on CPU. +As a practical observation, you can also get a sense of speed differences in between those two devices and report within your Jupyter notebook (optional). +We typically run these on a Linux operating system. +However, it is not a requirement for students to use the same operating system as these components also run on your favourite operating system (e.g., Windows, Android, Mac OS or alike). +In your course work, make sure to add docstring type documentation for every function in your code and make sure to comment in between lines to explain your steps within a function.

+

Before starting with the tasks, we encourage students to attend the second lecture of the perception and interfaces course, namely Lecture 2: Visual Perception in Perceptual Graphics and Computational Displays (recording available on Moodle).

+ + +

Special note from your instructor: We design this homework to make you be better in your next in your life. +If you do not have the right background to use the tools proposed in this coursework or if you are a confused absolute beginner, please do not hesitate to reach out to us through Moodle. +We are here to support you. +Please carefully frame your questions as you approach us for support (e.g., what you want to ask and what you expect) so that we can support you at our best.

+

Problem description

+

Each and every one of us has a unique visual system. +At the heart of our visual system lies our eyes. +Our eyes can be simplified as an optical instrument that images a three-dimensional scene to our retinas, sensor alike cellular structure. +In this assignment, your task is to develop a user interface on a Jupyter Notebook that simulates how our visions are being affected by various kinds of eye prescriptions. +To simply put, how would you perceive a scene if you had a certain prescription. +We expect you to have this simulator in live view, enabling the user to choose different eye prescriptions based on Zernike Polynomials. +Before conducting any work, we suggest you go through the listed references below:

+ +

In addition, you can get a sense of the importance of prescription in next-generation display technologies by going through the survey paper below (actual industrial applications from learning of this course work):

+ +

These references can help you to find the required technical details for your subtasks.

+

Zernike Polynomial generator (10 points)

+

The first task is to derive a Pythonic class that can generate Zernike Polynomials on demand. +These polynomials can help you to represent the point-spread functions of people with a prescription. +A point-spread function can be best described as the system response of your eye to a given scene. +In the way you will use point-spread functions, they can be described as kernels that can help you describe your eye as a linear transform or a system that is represented with a single convolution. +Once you have fully compiled the zernike_polynomial_generator class in your Notebook, please proceed with the next task:

+
class zernike_polynomial_generator():
+
+    def __init__(self):
+          ...
+
+

Our expectation from you, in this case, is to have multiple functions in your zernike_polynomial_generator class that spits out various point-spread functions, read more from here (7.5 points). +A person may be having an eye prescription composed of various point-spread functions. +To support such a case, make sure to add a function in your class that outputs a weighted sum of chosen point-spread functions (2.5 points).

+

Forward model (10 points)

+

We will work with an assumption that our eyes are responding to every point on a given scene in the same way (e.g., stationary kernels, not spatially varying kernels). +You have to have a function that is able to load images from a given path.

+
def load_image(filename):
+     """
+    Function to load an image.
+
+    Parameters
+    ------------
+    filename            : str
+                                Filename of the image.
+
+    Returns
+    --------
+    image               : torch.tensor
+                               Loaded image.
+     """
+     ....
+    return image
+
+

Please do not hesitate to use images from Creative Commons for your experiments, and please make sure that these images are clean, meaning ethically good to work with. +Please also make sure to work with images that has 1920x1080x3 resolution and please reduce the image into a single color image by taking the average across second axis (1920x1080 - Black and white). +Make sure to provide the image that you use together with your Jupyter Notebook in a compressed file format (ZIP).

+

You will be using this specific image load definition to load images, and you will process these images with your forward model function. +Here forward model corresponds to convolving the loaded image with a combination of Zernike polynomials to simulate various kinds of eye prescriptions. +In the simplest form, your forward model should look like below:

+
def forward(image, psf):
+     """
+    Forward model, convolving the given image with a given point-spread function.
+
+    Parameters
+    ------------
+    image              : torch.tensor
+                               Image as a torch tensor (MxN).
+    psf                   : torch.tensor
+                              Point-spread function as a torch tensor (MxN).
+
+    Returns
+    --------
+    result               : torch.tensor
+                               Abberated image.
+     """
+     ....
+     return result
+
+

You will receive your 3 points for loading images properly (3 points). +The remaining 7 points will be dedicated to the forward model definition (7 points).

+

Hint for the forward model: torch.nn.Conv2d (Do not necessarilly have to use it but can help).

+

Visualizer (10 points)

+

The last bit we want you to add to the Jupyter Notebook is related to the way you will be visualizing the outcome of your forward model. +We want your code to be interactive as much as you can make it to be on +your given computer hardware. +Make sure to visualize images of your forward model using Matplotlib. +Make sure to provide buttons and controls for your users to choose different combinations of Zernike polynomial to formulate a point-spread function, and make sure to visualize the point-spread functions that you have generated. +Note that we will heavily be relying on your visualizer to assess the outcome of your code; please pay attention to make sure that you have provided all the controls (either as variables to manipulate or buttons or sliders), and they are easy for a user to work with. +Note that you are allowed to use other libraries beyond Matplotlib such as Pyplot or if you want to develop a user interface outside of the boundaries of a Jupyter Notebook, it is also ok. +But if you do that, please make sure that you have communicated the change in a clear fashion and we are able to run your code.

+

If you can plot the outcome of the forward model, this plotting can guarantee you half of the points you can receive (5 points). +The remaining points can be received as you introduce more sophistication to your visualizer, as explained above (5 points).

+

Problem and potential solutions (15 points)

+

We want you to add a text section to your notebook, where you will find an unsolved/partially solved scientific problem related to eye prescription and visuals (displays, graphics or any other form). +The source of this problem can be from the existing literature, and please make sure to survey using your favourite search engines, academic ones and non-academic ones (e.g., Brave, Google Scholar, etc.). +The problem can also rely on your practical observations as well as long as you describe it clearly. +You also provide potential solutions to the problem that you have found from the literature and your own predictions towards new solutions in the future. +The text can not be more than 500 words, no less than 250 words. +Note that the length of your text is not an indicator of success, and most powerful writing happens in shorter forms.

+

You will receive half of the points from your problem description (7.5 points). +The remaining half will be from your proposed solution (7.5 points).

+

Contacting Us

+

The prefered way of communication is through University College London's online lecture system, Moodle. +Please use the Moodle forum for your questions related to the course work.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/teaching/comp0160_perception_and_interfaces/index.html b/teaching/comp0160_perception_and_interfaces/index.html new file mode 100644 index 00000000..955e6265 --- /dev/null +++ b/teaching/comp0160_perception_and_interfaces/index.html @@ -0,0 +1,2811 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Description - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

COMP0160: Perception and Interfaces

+

Summary

+

COMP0160: Perception and Interfaces course offers students a gateway to get familiar with various aspects of perception and interfaces. Greater details of the course and its broad description can be found in course website.

+

Computational light laboratory contributes to COMP0160: Perception and Interfaces by providing two lectures introducing the human visual system, its relation with graphics and displays, and sensing modalities in emerging devices (e.g., near-eye displays for virtual reality and augmented reality). +Each of these lectures is two hours long. In addition, we support these lectures with laboratory assignments for the students, which are vital for completing the course.

+

Timetable

+

The timetable provided below show parts of COMP0160 that are provided by computational light laboratory.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
DateInstructor(s)Content
14 January 2022 - 27th March 2022Kaan AkşitPractical
17th January 2022 - 23rd January 2022Kaan AkşitVisual Perception in graphics and displays
28th February 2022 - 6th March 2022Kaan AkşitIntegrating Sensory Information in Computational Displays
+

Parts

+

Practical

+

12:00 noon to 1:00 pm, Fridays, 14th January 2022 - 27th March 2022

+

Chandler House G15

+

Description (Public)

+

Coursework

+

First coursework

+

Lecture 2: Visual perception in perceptual graphics and computational displays

+

Winter 2022

+

Online

+

Recording (Password protected)

+

Slides (Invitation required)

+

This lecture focuses on human visual perception and its applications in computer graphics and computational display domains.

+
+Details +

Summary: +The students will learn about human visual perception in this course. +They will primarily learn about the eye and its structure. +The information about the eye explained throughout the lecture will be linked to designing computational displays and perceptual graphics with real cases from the recent literature. +Towards the end of this lecture, students will have enough information to build a simplified optical model of a human eye. +They will be encouraged to build an eye model using this simplified optical simulation of the human eye.

+

References:

+
    +
  • +

    Panero, Julius, and Martin Zelnik. Human dimension & interior space: a source book of design reference standards. Watson-Guptill, 1979.

    +
  • +
  • +

    Bekerman, Inessa, Paul Gottlieb, and Michael Vaiman. "Variations in eyeball diameters of the healthy adults." Journal of ophthalmology 2014 (2014).

    +
  • +
  • +

    Roberts, Bethany R., and Juliet L. Osborne. "Testing the efficacy of a thermal camera as a search tool for locating wild bumble bee nests." Journal of Apicultural Research 58.4 (2019): 494-500.

    +
  • +
  • +

    Park, George E., and Russell Smith Park. "Further evidence of change in position of the eyeball during fixation." Archives of Ophthalmology 23.6 (1940): 1216-1230.

    +
  • +
  • +

    Koulieris, George Alex, et al. "Near‐eye display and tracking technologies for virtual and augmented reality." Computer Graphics Forum. Vol. 38. No. 2. 2019.

    +
  • +
  • +

    Cakmakci, Ozan, and Jannick Rolland. "Head-worn displays: a review." Journal of display technology 2.3 (2006): 199-216.

    +
  • +
  • +

    De Groot, S. G., and J. W. Gebhard. "Pupil size as determined by adapting luminance." JOSA 42.7 (1952): 492-495.

    +
  • +
  • +

    Hunt, Robert William Gainer. "Light and dark adaptation and the perception of color." JOSA 42.3 (1952): 190-199.

    +
  • +
  • +

    Han, S. H., et al. "The Change of Pupil Cycle Time after Occlusion Therapy in Amblyopia." Journal of the Korean Ophthalmological Society 38.2 (1997): 290-295.

    +
  • +
  • +

    Fine, I., et al. "Optical properties of the sclera." Physics in Medicine & Biology 30.6 (1985): 565.

    +
  • +
  • +

    Zoulinakis, Georgios, et al. "Accommodation in human eye models: a comparison between the optical designs of Navarro, Arizona and Liou-Brennan." International journal of ophthalmology 10.1 (2017): 43.

    +
  • +
  • +

    Herndon, Leon W., Jennifer S. Weizer, and Sandra S. Stinnett. "Central corneal thickness as a risk factor for advanced glaucoma damage." Archives of ophthalmology 122.1 (2004): 17-21.

    +
  • +
  • +

    Glasser, Adrian, and Melanie CW Campbell. "Presbyopia and the optical changes in the human crystalline lens with age." Vision research 38.2 (1998): 209-229.

    +
  • +
  • +

    Bharadwaj, Shrikant R., and Clifton M. Schor. "Acceleration characteristics of human ocular accommodation." Vision Research 45.1 (2005): 17-28.

    +
  • +
  • +

    Campbell, F. W., and G. Westheimer. "Dynamics of accommodation responses of the human eye." The Journal of physiology 151.2 (1960): 285-295.

    +
  • +
  • +

    Heron, Gordon, W. N. Charman, and C. Schor. "Dynamics of the accommodation response to abrupt changes in target vergence as a function of age." Vision research 41.4 (2001): 507-519.

    +
  • +
  • +

    Phillips, Stephen, Douglas Shirachi, and Lawrence Stark. "Analysis of accommodative response times using histogram information." Optometry and Vision Science 49.5 (1972): 389-401.

    +
  • +
  • +

    Deering, Michael F. "A photon accurate model of the human eye." ACM Transactions on Graphics (TOG) 24.3 (2005): 649-658.

    +
  • +
  • +

    Ratnam, Kavitha, et al. "Relationship between foveal cone structure and clinical measures of visual function in patients with inherited retinal degenerations." Investigative ophthalmology & visual science 54.8 (2013): 5836-5847.

    +
  • +
  • +

    Kim, Jonghyun, et al. "Foveated AR: dynamically-foveated augmented reality display." ACM Transactions on Graphics (TOG) 38.4 (2019): 1-15.

    +
  • +
+
+

Lecture 7: Integrating Sensory Information in Computational Displays

+

Winter 2022

+

Online

+

Recording (Password protected)

+

Slides (Invitation required)

+

This lecture focuses on integrating various kinds of sensory information to the next generation displays.

+
+Details +

Summary: +In this course, students will learn about sensors and their integration into modern display systems such as Virtual and Augmented Reality near-eye displays and three-dimensional displays. +In the first half, a review of various kinds of sensors that could capture vital signs from a user, such as heart rate and gaze orientation, will be provided. +The second half will cover applications that use captured sensory information. These applications will be sampled from actual products on the market and research prototypes at the forefront of science.

+
    +
  • +

    Cennini, G., Arguel, J., Akşit, K., & van Leest, A. (2010). Heart rate monitoring via remote photoplethysmography with motion artifacts reduction. Optics express, 18(5), 4867-4875.

    +
  • +
  • +

    Li, Richard, Eric Whitmire, Michael Stengel, Ben Boudaoud, Jan Kautz, David Luebke, Shwetak Patel, and Kaan Akşit. "Optical gaze tracking with spatially-sparse single-pixel detectors." In 2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 117-126. IEEE, 2020.

    +
  • +
  • +

    Angelopoulos, Anastasios N., Julien NP Martel, Amit P. Kohli, Jorg Conradt, and Gordon Wetzstein. "Event-Based Near-Eye Gaze Tracking Beyond 10,000 Hz." IEEE transactions on visualization and computer graphics 27, no. 5 (2021): 2577-2586.

    +
  • +
  • +

    Wei, Shih-En, Jason Saragih, Tomas Simon, Adam W. Harley, Stephen Lombardi, Michal Perdoch, Alexander Hypes, Dawei Wang, Hernan Badino, and Yaser Sheikh. "Vr facial animation via multiview image translation." ACM Transactions on Graphics (TOG) 38, no. 4 (2019): 1-16.

    +
  • +
  • +

    Yaldiz, Mustafa B., Andreas Meuleman, Hyeonjoong Jang, Hyunho Ha, and Min H. Kim. "DeepFormableTag: end-to-end generation and recognition of deformable fiducial markers." ACM Transactions on Graphics (TOG) 40, no. 4 (2021): 1-14.

    +
  • +
  • +

    Glauser, O., Wu, S., Panozzo, D., Hilliges, O., & Sorkine-Hornung, O. (2019). Interactive hand pose estimation using a stretch-sensing soft glove. ACM Transactions on Graphics (TOG), 38(4), 1-15.

    +
  • +
  • +

    Glauser, O., Panozzo, D., Hilliges, O., & Sorkine-Hornung, O. (2019). Deformation capture via soft and stretchable sensor arrays. ACM Transactions on Graphics (TOG), 38(2), 1-16.

    +
  • +
  • +

    HP Reverb G2 VR Headset

    +
  • +
  • +

    MediaPipe Iris: Real-time Iris Tracking and Depth Estimation

    +
  • +
  • +

    Brelyon: a window to a whole new world

    +
  • +
  • +

    Tobii's eye and head tracking for professional esports

    +
  • +
+
+

Team

+
+ +
+

Kaan Akşit

+

Instructor

+

E-mail +

+

Contact Us

+
+

Warning

+

The prefered way of communication is through University College London's online lecture system, Moodle. +Please do not reach us through email unless the thing you want to achieve or establish or ask is not possible through the online lecture system.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/teaching/comp0160_practical/index.html b/teaching/comp0160_practical/index.html new file mode 100644 index 00000000..f50591b7 --- /dev/null +++ b/teaching/comp0160_practical/index.html @@ -0,0 +1,2490 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Practical - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Welcome

+

Start date: 12:00 pm, 14th January 2022,

+

Duration: This description is valid for first four weeks of practical sessions.

+

Welcome to the practical session for COMP0160: Perception and Interfaces. This course is designed to offer you, the students, a gateway to get familiar with various aspects of perception and interfaces.

+

In your lectures, you will learn about the human visual system and how humans perceive light from their surroundings. +Specifically, the course "Visual Perception in graphics and displays", held between 17th January and 23rd January 2022, will teach you how eyes gaze and perceive their surroundings.

+

In these practical sessions, we provide the opportunity for you to get familiarized with standard tools used in computer graphics. +These tools will help you translate what you learn from the courses to an actual outcome. +The rest of this document will describe your task in these practical sessions. +Remember that the task description provided here contains the information on the session's aims, objectives, and goals. +But the description does not necessarily contain all the steps required to conduct your work. +This case is because everybody learns things in a slightly different way. +We create that environment to elaborate by not strictly telling you which steps to take.

+

Please make sure to bring your personal laptop with you if you are physically attending.

+

Once again welcome to COMP0160 practical sessions!

+

Kaan Akşit

+

Background (15 minutes)

+

Imagine you woke up to ten years from now on. +You have long graduated from your program, and you found yourself a job in the industry. +Guess what your job is about! +It is about humans and their perceptions.

+

The company that you are working at builds digital humans and avatars to represent people or their artificial intelligence-powered bots. +These bots offer innovative assistance services such as representing you when you are unavailable (multi conversation) or offering you services that are carried out by human workers today (e.g., call centres). +These efforts are for their upcoming revolution in virtual environments, or more popularly known as Metaverse nowadays.

+

In this hypothetical future, your manager calls you from your virtual reality device while having your first coffee of the day. +She assigns you as the lead for upgrading the eye model that they use in their avatars and bots. +They want to make sure that the eyes rendered at these avatars look realistic and physically plausible.

+

You now have these flashbacks, remembering that you learned in some class in the past called "Visual Perception in Graphics and Displays" how eyes rotate, shift and view.

+

In this exercise, please put that information into work, and we ask you to build an eye model that rotates by following the constraints taught in that class. +In short, your task is to build a 3D model of the eye that follows physical constraints while gazing (e.g., eyeballs can not rotate 180 degrees, today's cutting-edge PC games make such easy mistakes).

+

Tools (45 minutes)

+

In this practical, you will be using Blender 3D, an open-source tool for modelling and rendering with state of the art in computer graphics.

+

Please make sure to download Blender 3D to your production machine and familiarise yourself with the software controls. +You can find many tutorials online by typing Blender tutorial in your favourite search engine (e.g., Google) or cloud video platform (e.g., YouTube).

+

Create a mesh of your face (1 hour)

+

Your first target in this practice is to figure out how to create a mesh of your face from a photograph. +This way, we will use the face mesh you created to place the two eyeballs that we aim to add. +Once again, there are multiple tutorials online (e.g., sample tutorial), which you can easily search for. +You are free to choose your way of creating a mesh of your face. +You can also rely on the methods that offer one click add-on base solutions.

+

Once the face mesh is created, please remove the triangles where your eye should be and make sure to texture your face mesh using your photograph.

+

Adding the eyeballs (45 minutes)

+

Our next step is to add the eyeballs to your face mesh. +Following the geometry that you have learned from the lecture, you will be creating an eyeball that is faithful to the physiology of the eye. +These eyeballs will later be placed on the face mesh that you generated. +There are again multiple tutorials online that you can rely on to generate a realistic-looking eyeball (e.g., sample tutorial). +Once you are complete with this task, make sure to place these eyeballs on your face mesh.

+

Realistic gaze (15 minutes)

+

Now that you have generated a face mesh and two eyeballs for your face mesh, please explore the option of constraints in the Blender 3D. +Our aim here is to add a target (e.g., a box) at some distance in front of the face mesh, and we want our newly generated digital avatar to gaze at the target. +You have to identify means to rotate the eyeballs such that they always gaze at the box. +But remember, you also learned how much and how far an eyeball can rotate in the lecture. +We want to be able to add that as a constrain as well. +So once the target moves, eyeballs should rotate, but they shouldn't turn beyond their rotation ability.

+

Changing the iris size with light levels (1 hour)

+

This last task is entirely optional. +You will seek means to change iris diameter with the light levels in the environment of your digital avatar. +As a first step, make sure to set the target you have created to be a light source. +Since our digital avatar is always gazing at it, this target's light level will determine the size of the iris. +You learned how large or small an iris could be in your lecture. +We want you to manipulate the eyeball mesh procedurally with the light level of your target.

+

Conclusion

+

You may have multiple questions during practical. +Do not hesitate to reach out to us at the practical time or via Moodle. +Once you are complete with practical and trust that you have generated an excellent digital avatar, please let us know as this can return as an investment towards improving your final mark from this course. +If you provide your consent (since this is containing your personal data -- your face), you can also share your blender file with us by uploading it to a cloud service (Microsoft One Drive provided by the university), share the link of the file with us through an email. +In your email, please make sure to state under what licence you are providing us the Blender file (e.g, MIT License, Creative Commons), and please make sure to state if you allow us to use this data in scientific research in the future. +Note that this exercise is also beneficial for your final assignment, and it can potentially help you achieve your task in your final project.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/teaching/index.html b/teaching/index.html new file mode 100644 index 00000000..9478eaac --- /dev/null +++ b/teaching/index.html @@ -0,0 +1,2457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Courses - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Courses

+ +

The computational light laboratory offers lectures on various topics, including computational optics, computational displays, perceptual graphics and computational fabrication. +For the entire list of lectures offered at the time being, please follow the menu at the right-hand side of this text. +The lectures that we have offered so far are as follows:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TermInstructor(s)CourseContent
-Kaan Akşit-Computational Light
Winter 2023Kaan AkşitCOMP0088Introduction to Machine Learning
Summer 2022Kaan AkşitSIGGRAPH 2022Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception
Spring 2022Kaan AkşitCOMP0160Lecture 2: Visual perception in perceptual graphics and computational displays
Spring 2022Kaan AkşitCOMP0160Lecture 7: Integrating Sensory Information in Computational Displays
+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/teaching/media/optimizing_vision_and_visuals.png b/teaching/media/optimizing_vision_and_visuals.png new file mode 100644 index 00000000..559cf4e1 Binary files /dev/null and b/teaching/media/optimizing_vision_and_visuals.png differ diff --git a/teaching/siggraph2022_optimizing_vision_and_visuals/index.html b/teaching/siggraph2022_optimizing_vision_and_visuals/index.html new file mode 100644 index 00000000..e4baa70b --- /dev/null +++ b/teaching/siggraph2022_optimizing_vision_and_visuals/index.html @@ -0,0 +1,2810 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception

+

People

+ + + + + + + + + + + + + + + + + + + +
/      /      /      /      /      /     

Koray Kavaklı1

David Walton2

Nick Antipa3

Rafał Mantiuk4

Douglas Lanman5,6

Kaan Akşit2

+

1Koç University, 2University College London, 3University of California San Diego, 4University of Cambridge, 5Meta Reality Labs, 6University of Washington

+

SIGGRAPH 2022

+ +

Resources

+

Lecture recording + Code + Foreword

+
+ Bibtex +
@inproceedings{10.1145/3532720.3535650,
+ author = {Kavakli, Koray and Walton, David Robert and Antipa, Nick and Mantiuk, Rafa\l{} and Lanman, Douglas and Ak\c{s}it, Kaan},
+ title = {Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception},
+ year = {2022},
+ isbn = {9781450393621},
+ publisher = {Association for Computing Machinery},
+ address = {New York, NY, USA},
+ url = {https://doi.org/10.1145/3532720.3535650},
+ doi = {10.1145/3532720.3535650},
+ booktitle = {ACM SIGGRAPH 2022 Courses},
+ articleno = {17},
+ numpages = {66},
+ location = {Vancouver, British Columbia, Canada},
+ series = {SIGGRAPH '22}
+}
+
+
+

Presentation

+

+ +

+ +

Abstract

+

The evolution of the internet is underway, where immersive virtual 3D environments (commonly known as metaverse or telelife) will replace flat 2D interfaces. +Crucial ingredients in this transformation are next-generation displays and cameras representing genuinely 3D visuals while meeting the human visual system's perceptual requirements.

+

This course will provide a fast-paced introduction to optimization methods for next-generation interfaces geared towards immersive virtual 3D environments. +Firstly, we will introduce lensless cameras for high dimensional compressive sensing (e.g., single exposure capture to a video or one-shot 3D). +Our audience will learn to process images from a lensless camera at the end. +Secondly, we introduce holographic displays as a potential candidate for next-generation displays. +By the end of this course, you will learn to create your 3D images that can be viewed using a standard holographic display. +Lastly, we will introduce perceptual guidance that could be an integral part of the optimization routines of displays and cameras. +Our audience will gather experience in integrating perception to display and camera optimizations.

+

This course targets a wide range of audiences, from domain experts to newcomers. +To do so, examples from this course will be based on our in-house toolkit to be replicable for future use. +The course material will provide example codes and a broad survey with crucial information on cameras, displays and perception.

+

Relevant research works

+

Here are relevant research works from the authors:

+ +

Outreach

+

We host a Slack group with more than 250 members. +This Slack group focuses on the topics of rendering, perception, displays and cameras. +The group is open to public and you can become a member by following this link.

+

Contact Us

+
+

Warning

+

Please reach us through email or through GitHub issues to ask your questions or to provide your feedback and comments.

+
+

Acknowledgements

+

The authors would like to thank reviewers for their valuable feedback.

+
+ + +
+

Kaan Akşit is supported by the Royal Society's RGS\R2\212229 - Research Grants 2021 Round 2 in building the hardware prototype used in generating the course material. Kaan Akşit is also supported by Meta Reality Labs inclusive rendering initiative 2022. +
+
+
+
+
+
+

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/timeline/index.html b/timeline/index.html new file mode 100644 index 00000000..10571f2c --- /dev/null +++ b/timeline/index.html @@ -0,0 +1,4346 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Recent news - Computational Light Laboratory at University College London + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Timeline

+

2024

+

April

+

22 April 2024

+
+Nature Light Science and Applications +
+

Our paper, All-optical image denoising using a diffractive visual processor, is recognized as the most downloaded paper in March 2024 by Nature's Light and Science Applications. +This work is a collaboration between +Çağatay Işıl, +Tianyi Gan, +Fazil Onuralp, +Koray Mentesoglu, +Jagrit Digani, +Huseyin Karaca, +Hanlong Chen, +Jingxi Li, +Deniz Mengu, +Mona Jarrahi, +Kaan Akşit, +and Ozcan Aydogan. +
+

+
+Graphics Replicability Stamp Initiative +
+

Our paper, Multi-color Holograms Improve Brightness in Holographic Displays, is awarded with Graphics Replicability Stamp Initiative's replicability stamp. +This work is a collaboration between +Koray Kavaklı, +Liang Shi, +Hakan Urey, +Wojciech Matusik, +and Kaan Akşit. +
+

+

January

+

29 January 2024

+
+SPIE Photonics West 2024 +
+

Our paper, AutoColor: Learned Light Power Control for Multi-Color Holograms, is presented at SPIE AR|VR|MR 2024. +This work is a collaboration between +Yicheng Zhan, +Koray Kavaklı, +Hakan Urey, +Qi Sun, +and Kaan Akşit. +
+

+

2023

+

December

+

13 December 2023

+
+SIGGRAPH Asia 2023 +
+

Our paper, +Multi-color Holograms Improve Brightness in Holographic Displays, +is presented at +SIGGRAPH Asia 2023. +This work is a collaboration between +Koray Kavaklı, +Liang Shi, +Hakan Urey, +Wojciech Matusik, +and Kaan Akşit. +
+

+

October

+

30 October 2023

+
+Ahmet Guzel's Poster Award +
+

We are pleased to announce an achievement at the UKRI AI CDT Conference 2023 in Bristol, United Kingdom. +Ahmet Güzel showcased our research project, ChromaCorrect, among a diverse array of over 50 posters at the event. +We are honored to have been awarded First Prize for Best Poster, and we thank Foundational Artificial Intelligence Center at University College London. +
+

+

12-13 October 2023

+
+ +
+

Kaan helped organizing Optical Waveguides: A key to Socially Acceptable Augmented Reality Glasses? as an Optica Incubator. +Kaan also gave an invited talk titled Role of Differentiable Models in Computational Display Research at the same incubator event. +
+

+

11 October 2023

+
+ +
+

Kaan attended and presented at Meta's Academic forum 2023 upon Meta Reality Labs invitation. +Kaan's talk is titled Unlocking Next-Generation Display Technologies with Holography. +
+

+

9-10 October 2023

+
+ +
+

Kaan helped organizing Virtual Reality and Augmented Vision theme at Optica's Frontiers Optics. +Kaan also gave an invited talk on his group's work, Headsetless Holographic Virtual Reality Displays in the same theme. +
+

+

August

+

16 August 2023

+
+ +
+

We are grateful to Optica for inviting our Ahmet Hamdi Güzel to present his work at the Vision and Color summer data blast webinar. +Have you missed it? The recording is now online. +
+

+

June

+

28 June 2023

+
+ +
+

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism? at Stanford University. +
+

+

1 June 2023

+
+ +
+

In her latest article, "The Promise of Holographic Displays," Sandrine Ceurstemont gathered perspectives on the promise of holographic displays and provided some space for ours. +
+

+

April

+

21 April 2023

+
+ +
+

Our paper, +ChromaCorrect: Prescription Correction in Virtual Reality Headsets through Perceptual Guidance, +is published at Optica's Biomedical Optics Express. +This work is a result of a collaboration with +Ahmet H. Güzel, +Jeanne Beyazian, +Praneeth Chakravarthula, +and Kaan Akşit. +
+

+

March

+

28 March 2023

+
+ +
+

Our paper, +HoloBeam: Paper-Thin Near-Eye Displays, +is presented at +IEEE VR 2023. +This work is a collaboration between +Yuta Itoh, +and Kaan Akşit. +
+
+

+
+ +
+

Our paper, +Realistic Defocus Blur for Multiplane Computer-Generated Holography, +is presented at +IEEE VR 2023. +This work is a collaboration between +Koray Kavaklı, +Yuta Itoh, +Hakan Ürey, +and Kaan Akşit. +
+
+

+

15 March 2023

+
+ +
+

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism in displays?. +We are thankful to Huawei, United Kingdom for their kind hospitality. +
+

+

February

+

6 February 2023

+
+ +
+

Kaan presented an invited talk titled Could holographic displays be the key to achieving realism?. +We are thankful to University of Rochester's institute of optics for their kind hospitality. +
+

+

January

+

4 January 2023

+
+ +
+

We are thankful to TÜBİTAK's 2224-A support for our valuable member and a PhD student, Koray Kavaklı in presenting his work at SPIE's Photonics West 2023. +This fund covers a significant portion of his attendance at SPIE's Photonics West. +
+

+

3 January 2023

+
+ +
+

We are thankful to Oracle for offering us to rely on their cloud infrastructure support for our computational needs. +We had to decline their award as we have purchased new computational resources most recently. +
+

+

2022

+

November

+

18 November 2022

+
+ +
+

Our paper, +Unrolled Primal-Dual Networks for Lensless Cameras, +is published at +Optica's Optics Express. +This work is a result of a collaboration between +Oliver Kingshott, +Nick Antipa, +Emrah Bostan, +and Kaan Akşit. +
+

+

October

+

25 October 2022

+
+ +
+

Our paper, +Metameric Inpainting for Image Warping, +is published at IEEE's Transaction on Visualization and Computer Graphics. +This work is a collaboration between +Rafael Kuffner dos Anjos, +David Robert Walton, +Sebastian Friston, +David Swapp, +Anthony Steed, +Tobias Ritschel, +and Kaan Akşit. +
+

+

19 October 2022

+
+ +
+

In collaboration with Meta Reality Laboratory's Douglas Lanman, we helped organise a successful augmented reality and virtual reality theme at Optica's Frontiers in Optics 2022. +Kaan Akşit presented a talk titled Realistic Image Reconstruction with Multiplane Computer-Generated Holography, while Koray Kavaklı presented a talk titled Introduction to Odak: a Differentiable Toolkit for Optical Sciences, Vision Sciences and Computer Graphics. +
+

+

August

+

3 August 2022

+
+ +
+

Our course, Optimizing Vision and Visuals: Lectures on Cameras, Displays and Perception is available and online in SIGGRAPH 2022. +This work is a collaboration between Koray Kavaklı, David Walton, Nick Antipa, Rafał Mantiuk, Douglas Lanman and Kaan Akşit. +
+

+

June

+

23 June 2022

+
+ +
+

We are grateful to the Meta Reality Labs for supporting our research through the inclusive rendering initiative 2022. +Their award will enable us to investigate inclusive graphics pipelines in terms of human visual perception. +Their award is worth 75000 USD. +
+

+

May

+

5 May 2022

+
+ +
+

Our panel, Telelife: A Vision of Remote Living in 2035, is presented at CHI 2022. +This work is a collaboration between +Kenan Bektaş, +Jeeeun Kim, +Kiyoshi Kiyokawa, +Anthony Steed, +Tobias Höllerer, +Nataliya Kosmyna, +Misha Sra, +Jason Orlosky, +and Kaan Akşit. +
+

+

March

+

14 March 2022

+
+ +
+

We introduce our work, Metameric Varifocal Holograms, at IEEE VR 2022. +This work is a collaboration between David R. Walton, Koray Kavakli, Rafael Kuffner dos Anjos, David Swapp, Tim Weyrich, Hakan Urey, Anthony Steed, Tobias Ritschel and Kaan Akşit. +David Walton presented the work at the conference. +
+

+
+ +
+

Kaan Akşit served as program committee for journal papers, and also as technical achievement and lifetime achievement awards committee member. +
+

+

11 March 2022

+
+ +
+

Kaan Akşit together with Jannick Rolland and Babak Amirsolaimani is acting as a guest editor for Journal of Optical Microsystems from SPIE at a special issue targetting optics research in augmented, virtual and mixed reality. +Here is a link for call flyer and to submit your work, please follow this link. +
+

+

February

+

23 February 2022

+
+ +
+

Kaan Akşit serves as a program committee for EGSR 2022, which will take place as a hybrid conference, virtual and physically located in Czech Republic's Prag. +
+

+

January

+

24 January 2022

+
+ +
+

We presented two invited talks at SPIE's Photonics West. Our first talk is on Perceptually guided computer-generated holography, and our second talk is on Beaming Displays: Towards Displayless Augmented Reality Near-eye Displays. +
+

+

17 January 2022

+
+ +
+

We thank the next byte podcast for covering our collaboration on SensiCut project with MIT. +They did a great job in explaining our in their podcast. +You can reach the podcast using this link. +
+

+

2021

+

November

+

29 November 2021

+
+ +
+

Our vision, "Telelife: The Future of Remote Living", is published at Frontiers in Virtual Reality. We share our vision for the future, specifically in the year 2035. +
+

+

12 November 2021

+
+ +
+

Our invited work, "Learned Holographic Light Transport", is published at Optica's Applied Optics. We show that light transport can be made more accurate by learning hardware dependent kernels. +
+

+

8 November 2021

+
+ +
+

We are grateful to the Royal Society for awarding us with their research grants 2021 round two scheme. Their award will enable us to invent new holographic light transport models in the future. This is award is worth 14994.65 GBP. The title of our submission is Learned models for Computer-Generated Holography. +
+

+

November

+

1-2 November 2021

+
+ +
+

In collaboration with Meta Reality Laboratory's Douglas Lanman, we helped organise a successful augmented reality and virtual reality theme at Optica's Frontiers in Optics 2021. Kaan Akşit presented his work on holographic beaming displays proposal at the same event. +
+

+

February

+

18 February 2021

+
+ +
+

We appear on UCL news for receiving UCL-Osaka university strategic partnership fund. +
+
+

+

January

+

4 January 2021

+
+ +
+

Kaan Akşit joined University College London's computer science department as an Associate Professor. He is now part of the Virtual Reality and Computer Graphics group, and he leads the Computational light laboratory. +
+

+

2020

+

November

+

17 November 2020

+
+ +
+

Kaan Akşit and Jason Orlosky They have been granted UCL-Osaka University Strategic Parner Funds funds. This award is worth 10000 GBP. The title of our submission is Development of a joint Telelife technology seminar using virtual reality. +
+

+

August

+

1 August 2020

+
+ +
+

Kaan Akşit has left his Senior Research scientist position at NVIDIA in the US, and accepted to join University College London's computer science department as an Associate Professor. +
+

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + \ No newline at end of file
+Interested in joining our research group? +

If you are interested in joining our group as an intern, an undergraduate student, a master student, a Ph.D. student, a postdoctoral researher or a visiting researcher, please do not hesitate to reach out to Kaan Akşit.

+