@inproceedings{Navarro2022, abstract = {Electrodermal activity (EDA) is considered to be an effective metric for measuring changes in the arousal level of people. In this paper, the phasic component of EDA data from players is analyzed in relation to their reported experience from a standardized questionnaire, when interacting with a couple of virtual reality games that featured two different input devices: the HTC Vive and Leap Motion controllers. Initial results show that there are no significant differences in the phasic component data, despite having significant differences in their respective player experience. Furthermore, no linear correlations are found between the phasic component data and the evaluated experience variables, with the only exception of negative affect which features a weak positive correlation. In conclusion, the phasic component of EDA data has here shown a limited correlation with player experience and should be further explored in combination with other psychophysiological signals.}, author = {Diego Navarro and Valeria Garro and Veronica Sundstedt}, doi = {10.5220/0011006100003124}, issn = {21844321}, booktitle = {Proceedings of the International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}, keywords = {Electrodermography,Phasic Component,Player Experience,Psychophysiology,Virtual Reality}, pages = {2184-4321}, publisher = {Science and Technology Publications, Lda}, title = {Electrodermal Activity Evaluation of Player Experience in Virtual Reality Games: A Phasic Component Analysis}, volume = {2}, year = {2022} } @misc{neurochat, author = {}, title = {Коммуникационная система НейроЧат - официальный сайт}, url = "https://neurochat.pro/", month = {}, year = {} } @misc{naukaprotez, author = {}, title = {Кибер-протез с новой системой очувствления испытали в России | Новости науки}, howpublished = {\url{https://xn--80aa3ak5a.xn--p1ai/news/zavershilsya-4-y-etap-issledovaniy-po-ochuvstvleniyu-protezov-i-kupirovaniyu-fantomnykh-boley/}}, year = {} } @misc{Neuralin90, author = {РИА Новости}, title = {Neuralink впервые вживит человеку зрительный имплант, заявил Маск - РИА Новости, 31.03.2025}, url = "https://ria.ru/20250331/neuralink-2008320527.html", month = {}, year = {} } @article{bciChen, author = {Chen, Shugeng and Chen, Mingyi and Wang, Xu and Liu, Xiuyun and Liu, Bing and Ming, Dong}, year = {2025}, month = {03}, pages = {}, title = {Brain–computer interfaces in 2023–2024}, volume = {3}, journal = {Brain-X}, doi = {10.1002/brx2.70024} } @misc{BrainDat14, author = {}, title = {Brain Data Measurement \& Wireless EEG Solutions | Emotiv}, url = "https://www.emotiv.com", month = {}, year = {} } @article{Gomathy2024, author = {A Gomathy and P Shriguhan and A P Sibi and Ranjith Kumar and E K Rakesh}, issn = {1741-8992}, issue = {S7}, pages = {1598-1606}, title = {An Advanced Human-Machine Interface Utilizing Eye Tracking For Enhanced Written Communication Among Locked-In Syndrome Patients By Using Haar Cascade Algorithm}, volume = {21}, url = {www.migrationletters.com}, year = {2024} } @article{Liao2022, author = {Hua Liao and Changbo Zhang and Wendi Zhao and Weihua Dong}, doi = {10.3390/ijgi11020127}, issn = {22209964}, issue = {2}, journal = {ISPRS International Journal of Geo-Information}, keywords = {Buffer size,Dwell time,Eye tracking,Gaze-based map interaction,Human–computer interaction,Select operation}, month = {2}, publisher = {MDPI}, title = {Toward Gaze-Based Map Interactions: Determining the Dwell Time and Buffer Size for the Gaze-Based Selection of Map Features}, volume = {11}, year = {2022} } @misc{Eyetrack44, author = {}, title = {Eye tracking — a catalyst for innovation in AR, VR, and MR - Tobii}, url = "https://www.tobii.com/products/integration/xr-headsets", month = {}, year = {} } @article{egaze, author = {Li, Nealson and Chang, Muya and Raychowdhury, Arijit}, year = {2024}, month = {01}, pages = {}, title = {E-Gaze: Gaze Estimation with Event Camera}, volume = {PP}, journal = {IEEE transactions on pattern analysis and machine intelligence}, doi = {10.1109/TPAMI.2024.3359606} } @article{Xie2025, author = {Jiacheng Xie and Rongfeng Chen and Ziming Liu and Jiahao Zhou and Juan Hou and Zengxiang Zhou}, doi = {10.3390/jemr18040028}, issue = {4}, journal = {Journal of Eye Movement Research}, month = {7}, pages = {28}, publisher = {MDPI AG}, title = {GMM-HMM-Based Eye Movement Classification for Efficient and Intuitive Dynamic Human–Computer Interaction Systems}, volume = {18}, year = {2025} } @misc{Chhimpa2025, doi = {10.3390/jemr18050047}, issn = {19958692}, issue = {5}, journal = {Journal of Eye Movement Research}, keywords = {electroencephalography,electrooculography,eye tracking,eye tracking performance parameters,eye-tracking tools,human–computer interaction,scleral coil,video oculography}, month = {10}, publisher = {International Group for Eye Movement Research}, title = {A Comprehensive Framework for Eye Tracking: Methods, Tools, Applications, and Cross-Platform Evaluation}, volume = {18}, year = {2025} } @misc{DriverMo12, author = {Fanny Lyrheden}, title = {Driver Monitoring 2.0: How Euro NCAP is Raising the Bar in 2026 - Smart Eye}, url = "https://www.smarteye.se/blog/driver-monitoring-euro-ncap-2026/", month = {4}, year = {2025} } @misc{Neurable92, author = {}, title = {Neurable | The Mind. Unlocked. | Work Smarter, Not Longer}, url = "https://www.neurable.com/", month = {}, year = {} } @misc{Kernel70, author = {}, title = {Kernel}, url = "https://www.kernel.com/", month = {}, year = {} } @misc{MNE, author = {}, title = {MNE — MNE 1.11.0 documentation}, url = "https://mne.tools/stable/index.html", month = {1}, year = {2026} } @misc{Blackroc20, author = {}, title = {Blackrock Neurotech | Empowered by Thought}, url = "https://blackrockneurotech.com/", month = {}, year = {} } @misc{Loreta, author = {}, title = {LORETA; documentation link}, url = "https://www.mitsar-eeg.ru/download/manuals/Loreta_UM_RUS.pdf", month = {}, year = {} } @misc{cloi, author = {}, title = {LG CLOI SERVEBOT (новость с официального сайта)}, url = "https://www.lg.com/kz/about-lg/press-and-media/lg-predstavlyaet-servisnogo-robota-cloi-servebot/", month = {}, year = {} } @misc{stentrode, author = {}, title = {The Technology | Synchron}, url = "https://synchron.com/technology", month = {}, year = {} } @misc{sphericalins, author = {}, title = {Top 10 Brain-Computer Interface Companies in 2025}, url = "https://www.sphericalinsights.com/blogs/top-10-companies-leading-the-brain-computer-interface-market-in-2025-key-players-statistics-future-trends-2024-2035", month = {}, year = {} } @misc{neurotrend, title = {Крупнейшая российская нейромаркетинговая компания - Нейротренд}, url = {https://neurotrend.ru/}, abstract = {Нейротренд - научно обоснованный бизнес, открывающий для своих клиентов новые возможности применения нейротехнологий в маркетинге.} } @misc{skoltech-n, author = {}, title = {Центр инженерных систем и наук |}, url = "https://www.skoltech.ru/center/engineering", month = {}, year = {} } @misc{hse-neuro, author = {}, title = {О нас — Центр нейроэкономики и когнитивных исследований — Национальный исследовательский университет «Высшая школа экономики»}, url = "https://www.hse.ru/cdm-centre/about", month = {}, year = {} } @misc{sfedu, author = {}, title = {Научно-исследовательский технологический Центр нейротехнологий}, url = "https://sfedu.ru/www/stat_pages22.show?p=ELS/inf/D\&x=ELS/10484", month = {}, year = {} } @misc{msu-bmi, author = {}, title = {Лаборатория нейрокомпьютерных интерфейсов}, url = "https://human.bio.msu.ru/lab_neurophysiology.html", month = {}, year = {} } @misc{sensomed, author = {}, title = {Резидент «Сколково» зарегистрировал «умную перчатку» для}, url = "https://sk.ru/news/rezident-skolkovo-zaregistriroval-umnuyu-perchatku-dlya-reabilitacii-posle-insulta/", month = {}, year = {} } @misc{bitronics, author = {}, title = {Набор-конструктор «Юный нейромоделист» BiTronics Lab}, url = "https://bitronicslab.com/neuromodelist", month = {}, year = {} } @misc{istok, author = {}, title = {Слуховые системы костной проводимости | ГК «Исток-Аудио»}, url = "https://www.istok-audio.com/implantiruemye-slukhovye-sistemy/", month = {}, year = {} } @misc{neurobotics, author = {}, title = {Нейроботикс - Главная}, url = "https://neurobotics.ru/", month = {}, year = {} } @misc{nti2035, author = {}, title = {Нейронет --- nti2035.ru}, howpublished = {\url{https://nti2035.ru/markets/mneuronet}}, year = {} } @article{wu2018human, title={A human--machine interface using electrical impedance tomography for hand prosthesis control}, author={Wu, Yu and Jiang, Dai and Liu, Xiao and Bayford, Richard and Demosthenous, Andreas}, journal={IEEE transactions on biomedical circuits and systems}, volume={12}, number={6}, pages={1322--1333}, year={2018}, publisher={IEEE} } @article{Edelman2025, abstract = {Brain-computer interface (BCI) is a rapidly evolving technology that has the potential to widely influence research, clinical and recreational use. Non-invasive BCI approaches are particularly common as they can impact a large number of participants safely and at a relatively low cost. Where traditional non-invasive BCIs were used for simple computer cursor tasks, it is now increasingly common for these systems to control robotic devices for complex tasks that may be useful in daily life. In this review, we provide an overview of the general BCI framework as well as the various methods that can be used to record neural activity, extract signals of interest, and decode brain states. In this context, we summarize the current state-of-the-art of non-invasive BCI research, focusing on trends in both the application of BCIs for controlling external devices and algorithm development to optimize their use. We also discuss various open-source BCI toolboxes and software, and describe their impact on the field at large.}, author = {Bradley J. Edelman and Shuailei Zhang and Gerwin Schalk and Peter Brunner and Gernot Müller-Putz and Cuntai Guan and Bin He}, doi = {10.1109/RBME.2024.3449790}, issn = {19411189}, journal = {IEEE Reviews in Biomedical Engineering}, keywords = {BCI,brain-computer interface,deep learning,electroencephalography,manifold classification,motor imagery,motor-related cortical potentials,neural decoding,neurotechnology,robotic arm,transfer learning}, pages = {26-49}, pmid = {39186407}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {Non-Invasive Brain-Computer Interfaces: State of the Art and Trends}, volume = {18}, year = {2025} } @misc{Li2021, abstract = {Amputation of the upper limb brings heavy burden to amputees, reduces their quality of life, and limits their performance in activities of daily life. The realization of natural control for prosthetic hands is crucial to improving the quality of life of amputees. Surface electromyography (sEMG) signal is one of the most widely used biological signals for the prediction of upper limb motor intention, which is an essential element of the control systems of prosthetic hands. The conversion of sEMG signals into effective control signals often requires a lot of computational power and complex process. Existing commercial prosthetic hands can only provide natural control for very few active degrees of freedom. Deep learning (DL) has performed surprisingly well in the development of intelligent systems in recent years. The significant improvement of hardware equipment and the continuous emergence of large data sets of sEMG have also boosted the DL research in sEMG signal processing. DL can effectively improve the accuracy of sEMG pattern recognition and reduce the influence of interference factors. This paper analyzes the applicability and efficiency of DL in sEMG-based gesture recognition and reviews the key techniques of DL-based sEMG pattern recognition for the prosthetic hand, including signal acquisition, signal preprocessing, feature extraction, classification of patterns, post-processing, and performance evaluation. Finally, the current challenges and future prospects in clinical application of these techniques are outlined and discussed.}, author = {Wei Li and Ping Shi and Hongliu Yu}, doi = {10.3389/fnins.2021.621885}, issn = {1662453X}, journal = {Frontiers in Neuroscience}, keywords = {convolutional neural network,deep learning,hand gesture recognition,pattern recognition,prosthesis hand,recurrent neural network,surface electromyography}, month = {4}, publisher = {Frontiers Media S.A.}, title = {Gesture Recognition Using Surface Electromyography and Deep Learning for Prostheses Hand: State-of-the-Art, Challenges, and Future}, volume = {15}, year = {2021} } @article{asd, abstract = {Surface electromyography (sEMG) is a promising computer access method for individuals with motor impairments. However, optimal sensor placement is a tedious task requiring trial-and-error by an expert, particularly when recording from facial musculature likely to be spared in individuals with neurological impairments. We sought to reduce sEMG sensor configuration complexity by using quantitative signal features extracted from a short calibration task to predict human-machine interface (HMI) performance. A cursor control system allowed individuals to activate specific sEMG-targeted muscles to control an onscreen cursor and navigate a target selection task. The task was repeated for a range of sensor configurations to elicit a range of signal qualities. Signal features were extracted from the calibration of each configuration and examined via a principle component factor analysis in order to predict HMI performance during subsequent tasks. Feature components most influenced by energy and complexity of the EMG signal and muscle activity between sensors were significantly predictive of HMI performance. However, configuration order had a greater effect on performance than the configurations, suggesting that non-experts can place sEMG sensors in the vicinity of usable muscle sites for computer access and healthy individuals will learn to efficiently control the HMI system.}, author = {Jennifer M Vojtech and Gabriel J Cler and Cara E Stepp}, doi = {10.1109/TNSRE}, keywords = {Index Terms electromyography,feature extraction,human-machine interfaces,myoelectric control}, title = {Prediction of optimal facial electromyographic sensor configurations for human-machine interface control} } @misc{wiki26, author = {}, title = {OpenVibe - Wikipedia}, year = {} } @misc{bci2000, author = {}, title = {bci2000.org}, howpublished = {\url{https://www.bci2000.org/}}, year = {} } @misc{Saha2021, abstract = {Brain computer interfaces (BCI) provide a direct communication link between the brain and a computer or other external devices. They offer an extended degree of freedom either by strengthening or by substituting human peripheral working capacity and have potential applications in various fields such as rehabilitation, affective computing, robotics, gaming, and neuroscience. Significant research efforts on a global scale have delivered common platforms for technology standardization and help tackle highly complex and non-linear brain dynamics and related feature extraction and classification challenges. Time-variant psycho-neurophysiological fluctuations and their impact on brain signals impose another challenge for BCI researchers to transform the technology from laboratory experiments to plug-and-play daily life. This review summarizes state-of-the-art progress in the BCI field over the last decades and highlights critical challenges.}, author = {Simanto Saha and Khondaker A. Mamun and Khawza Ahmed and Raqibul Mostafa and Ganesh R. Naik and Sam Darvishi and Ahsan H. Khandoker and Mathias Baumert}, doi = {10.3389/fnsys.2021.578875}, issn = {16625137}, journal = {Frontiers in Systems Neuroscience}, keywords = {brain computer interface,cognitive rehabilitation,electrical/hemodynamic brain signals,hybrid/multimodal BCI,neuroimaging techniques,neurosensors}, month = {2}, publisher = {Frontiers Media S.A.}, title = {Progress in Brain Computer Interface: Challenges and Opportunities}, volume = {15}, year = {2021} } @misc{Matht2018, abstract = {Pupils respond to three distinct kinds of stimuli: they constrict in response to brightness (the pupil light response), constrict in response to near fixation (the pupil near response), and dilate in response to increases in arousal and mental effort, either triggered by an external stimulus or spontaneously. In this review, I describe these three pupil responses, how they are related to high-level cognition, and the neural pathways that control them. I also discuss the functional relevance of pupil responses, that is, how pupil responses help us to better see the world. Although pupil responses likely serve many functions, not all of which are fully understood, one important function is to optimize vision either for acuity (small pupils see sharper) and depth of field (small pupils see sharply at a wider range of distances), or for sensitivity (large pupils are better able to detect faint stimuli); that is, pupils change their size to optimize vision for a particular situation. In many ways, pupil responses are similar to other eye movements, such as saccades and smooth pursuit: like these other eye movements, pupil responses have properties of both reflexive and voluntary action, and are part of active visual exploration.}, author = {Sebastiaan Mathôt}, doi = {10.5334/joc.18}, issn = {25144820}, issue = {1}, journal = {Journal of Cognition}, keywords = {Eye movements,Orienting response,Psychosensory pupil response,Pupil light response,Pupil near response,Pupillometry}, pmid = {31517190}, publisher = {Ubiquity Press}, title = {Pupillometry: Psychology, physiology, and function}, volume = {1}, year = {2018} } @article{Qian2024, abstract = {Gaze estimation is long been recognised as having potential as the basis for human-computer interaction (HCI) systems, but usability and robustness of performance remain challenging. This work focuses on systems in which there is a live video stream showing enough of the subjects face to track eye movements and some means to infer gaze location from detected eye features. Currently, systems generally require some form of calibration or set-up procedure at the start of each user session. Here we explore some simple strategies for enabling gaze based HCI to operate immediately and robustly without any explicit set-up tasks. We explore different choices of coordinate origin for combining extracted features from multiple subjects and the replacement of subject specific calibration by system initiation based on prior models. Results show that referencing all extracted features to local coordinate origins determined by subject start position enables robust immediate operation. Combining this approach with an adaptive gaze estimation model using an interactive user interface enables continuous operation with the 75th percentile gaze errors of 0.7∘, and maximum gaze errors of 1.7∘ during prospective testing. There constitute state-of-the-art results and have the potential to enable a new generation of reliable gaze based HCI systems.}, author = {Kun Qian and Tomoki Arichi and A. David Edwards and Joseph V. Hajnal}, doi = {10.1038/s41598-024-62365-9}, issn = {20452322}, issue = {1}, journal = {Scientific Reports}, month = {12}, pmid = {38778122}, publisher = {Nature Research}, title = {Instant interaction driven adaptive gaze control interface}, volume = {14}, year = {2024} } @article{Kaifosh2025, abstract = {Since the advent of computing, humans have sought computer input technologies that are expressive, intuitive and universal. While diverse modalities have been developed, including keyboards, mice and touchscreens, they require interaction with a device that can be limiting, especially in on-the-go scenarios. Gesture-based systems use cameras or inertial sensors to avoid an intermediary device, but tend to perform well only for unobscured movements. By contrast, brain–computer or neuromotor interfaces that directly interface with the body’s electrical signalling have been imagined to solve the interface problem1, but high-bandwidth communication has been demonstrated only using invasive interfaces with bespoke decoders designed for single individuals2, 3–4. Here, we describe the development of a generic non-invasive neuromotor interface that enables computer input decoded from surface electromyography (sEMG). We developed a highly sensitive, easily donned sEMG wristband and a scalable infrastructure for collecting training data from thousands of consenting participants. Together, these data enabled us to develop generic sEMG decoding models that generalize across people. Test users demonstrate a closed-loop median performance of gesture decoding of 0.66 target acquisitions per second in a continuous navigation task, 0.88 gesture detections per second in a discrete-gesture task and handwriting at 20.9 words per minute. We demonstrate that the decoding performance of handwriting models can be further improved by 16\% by personalizing sEMG decoding models. To our knowledge, this is the first high-bandwidth neuromotor interface with performant out-of-the-box generalization across people.}, author = {Patrick Kaifosh and Thomas R. Reardon and Danny Zlobinsky and Lei Zhou}, doi = {10.1038/s41586-025-09255-w}, issn = {14764687}, issue = {8081}, journal = {Nature}, month = {9}, pages = {702-711}, pmid = {40702190}, publisher = {Nature Research}, title = {A generic non-invasive neuromotor interface for human-computer interaction}, volume = {645}, year = {2025} } @techReport{Li2025, abstract = {The demand for advanced human-machine interfaces (HMIs) highlights the need for accurate measurement of muscle contraction states. Traditional methods, such as electromyography, cannot measure passive muscle contraction states, while optical and ultrasonic techniques suffer from motion artifacts due to their rigid transducers. To overcome these limitations, we developed a flexible multichannel electrical impedance sensor (FMEIS) for non-invasive detection of skeletal muscle contractions. By applying an imperceptible current, the FMEIS can target multiple deep muscles by capturing electric-field ripples generated by their contractions. With an ultrathin profile (~220 micrometers), a low elastic modulus (212.8 kilopascals) closely matching human skin, and engineered adhesive sensor surfaces, the FMEIS conforms nicely to human skin with minimized motion artifacts. The FMEIS achieved high accuracy in both hand gesture recognition and muscle force prediction using machine learning models. With demonstrated performance across multiple HMI applications, including human-robot collaboration, exoskeleton control, and virtual surgery, FMEIS shows great potential for future real-time collaborative HMI systems.}, author = {Junwei Li and Kunlin Wu and Jingcheng Xiao and Tianyu Chen and Xudong Yang and Jie Pan and Yu Chen and Yifan Wang}, journal = {Sci. Adv}, pages = {3359}, title = {E N G I N E E R I N G Flexible multichannel muscle impedance sensors for collaborative human-machine interfaces}, volume = {11}, url = {https://www.science.org}, year = {2025} }