210 lines
23 KiB
BibTeX
210 lines
23 KiB
BibTeX
@inproceedings{Navarro2022,
|
||
abstract = {Electrodermal activity (EDA) is considered to be an effective metric for measuring changes in the arousal level of people. In this paper, the phasic component of EDA data from players is analyzed in relation to their reported experience from a standardized questionnaire, when interacting with a couple of virtual reality games that featured two different input devices: the HTC Vive and Leap Motion controllers. Initial results show that there are no significant differences in the phasic component data, despite having significant differences in their respective player experience. Furthermore, no linear correlations are found between the phasic component data and the evaluated experience variables, with the only exception of negative affect which features a weak positive correlation. In conclusion, the phasic component of EDA data has here shown a limited correlation with player experience and should be further explored in combination with other psychophysiological signals.},
|
||
author = {Diego Navarro and Valeria Garro and Veronica Sundstedt},
|
||
doi = {10.5220/0011006100003124},
|
||
issn = {21844321},
|
||
booktitle = {Proceedings of the International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
|
||
keywords = {Electrodermography,Phasic Component,Player Experience,Psychophysiology,Virtual Reality},
|
||
pages = {2184-4321},
|
||
publisher = {Science and Technology Publications, Lda},
|
||
title = {Electrodermal Activity Evaluation of Player Experience in Virtual Reality Games: A Phasic Component Analysis},
|
||
volume = {2},
|
||
year = {2022}
|
||
}
|
||
@misc{neurochat,
|
||
author = {},
|
||
title = {Коммуникационная система НейроЧат - официальный сайт},
|
||
url = "https://neurochat.pro/",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{naukaprotez,
|
||
author = {},
|
||
title = {Кибер-протез с новой системой очувствления испытали в России | Новости науки},
|
||
howpublished = {\url{https://xn--80aa3ak5a.xn--p1ai/news/zavershilsya-4-y-etap-issledovaniy-po-ochuvstvleniyu-protezov-i-kupirovaniyu-fantomnykh-boley/}},
|
||
year = {}
|
||
}
|
||
@misc{Neuralin90,
|
||
author = {РИА Новости},
|
||
title = {Neuralink впервые вживит человеку зрительный имплант, заявил Маск - РИА Новости, 31.03.2025},
|
||
url = "https://ria.ru/20250331/neuralink-2008320527.html",
|
||
month = {},
|
||
year = {},
|
||
note = "[Online; accessed 2026-01-29]"
|
||
}
|
||
@article{bciChen,
|
||
author = {Chen, Shugeng and Chen, Mingyi and Wang, Xu and Liu, Xiuyun and Liu, Bing and Ming, Dong},
|
||
year = {2025},
|
||
month = {03},
|
||
pages = {},
|
||
title = {Brain–computer interfaces in 2023–2024},
|
||
volume = {3},
|
||
journal = {Brain-X},
|
||
doi = {10.1002/brx2.70024}
|
||
}
|
||
@misc{BrainDat14,
|
||
author = {},
|
||
title = {Brain Data Measurement \& Wireless EEG Solutions | Emotiv},
|
||
url = "https://www.emotiv.com",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{Neurable92,
|
||
author = {},
|
||
title = {Neurable | The Mind. Unlocked. | Work Smarter, Not Longer},
|
||
url = "https://www.neurable.com/",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{Kernel70,
|
||
author = {},
|
||
title = {Kernel},
|
||
url = "https://www.kernel.com/",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{Blackroc20,
|
||
author = {},
|
||
title = {Blackrock Neurotech | Empowered by Thought},
|
||
url = "https://blackrockneurotech.com/",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{stentrode,
|
||
author = {},
|
||
title = {The Technology | Synchron},
|
||
url = "https://synchron.com/technology",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{sphericalins,
|
||
author = {},
|
||
title = {Top 10 Brain-Computer Interface Companies in 2025},
|
||
url = "https://www.sphericalinsights.com/blogs/top-10-companies-leading-the-brain-computer-interface-market-in-2025-key-players-statistics-future-trends-2024-2035",
|
||
month = {},
|
||
year = {}
|
||
}
|
||
@misc{neurotrend,
|
||
title = {Крупнейшая российская нейромаркетинговая компания - Нейротренд},
|
||
url = {https://neurotrend.ru/},
|
||
abstract = {Нейротренд - научно обоснованный бизнес, открывающий для своих клиентов новые возможности применения нейротехнологий в маркетинге.}
|
||
}
|
||
@misc{nti2035,
|
||
author = {},
|
||
title = {Нейронет --- nti2035.ru},
|
||
howpublished = {\url{https://nti2035.ru/markets/mneuronet}},
|
||
year = {}
|
||
}
|
||
@article{Edelman2025,
|
||
abstract = {Brain-computer interface (BCI) is a rapidly evolving technology that has the potential to widely influence research, clinical and recreational use. Non-invasive BCI approaches are particularly common as they can impact a large number of participants safely and at a relatively low cost. Where traditional non-invasive BCIs were used for simple computer cursor tasks, it is now increasingly common for these systems to control robotic devices for complex tasks that may be useful in daily life. In this review, we provide an overview of the general BCI framework as well as the various methods that can be used to record neural activity, extract signals of interest, and decode brain states. In this context, we summarize the current state-of-the-art of non-invasive BCI research, focusing on trends in both the application of BCIs for controlling external devices and algorithm development to optimize their use. We also discuss various open-source BCI toolboxes and software, and describe their impact on the field at large.},
|
||
author = {Bradley J. Edelman and Shuailei Zhang and Gerwin Schalk and Peter Brunner and Gernot Müller-Putz and Cuntai Guan and Bin He},
|
||
doi = {10.1109/RBME.2024.3449790},
|
||
issn = {19411189},
|
||
journal = {IEEE Reviews in Biomedical Engineering},
|
||
keywords = {BCI,brain-computer interface,deep learning,electroencephalography,manifold classification,motor imagery,motor-related cortical potentials,neural decoding,neurotechnology,robotic arm,transfer learning},
|
||
pages = {26-49},
|
||
pmid = {39186407},
|
||
publisher = {Institute of Electrical and Electronics Engineers Inc.},
|
||
title = {Non-Invasive Brain-Computer Interfaces: State of the Art and Trends},
|
||
volume = {18},
|
||
year = {2025}
|
||
}
|
||
@misc{Li2021,
|
||
abstract = {Amputation of the upper limb brings heavy burden to amputees, reduces their quality of life, and limits their performance in activities of daily life. The realization of natural control for prosthetic hands is crucial to improving the quality of life of amputees. Surface electromyography (sEMG) signal is one of the most widely used biological signals for the prediction of upper limb motor intention, which is an essential element of the control systems of prosthetic hands. The conversion of sEMG signals into effective control signals often requires a lot of computational power and complex process. Existing commercial prosthetic hands can only provide natural control for very few active degrees of freedom. Deep learning (DL) has performed surprisingly well in the development of intelligent systems in recent years. The significant improvement of hardware equipment and the continuous emergence of large data sets of sEMG have also boosted the DL research in sEMG signal processing. DL can effectively improve the accuracy of sEMG pattern recognition and reduce the influence of interference factors. This paper analyzes the applicability and efficiency of DL in sEMG-based gesture recognition and reviews the key techniques of DL-based sEMG pattern recognition for the prosthetic hand, including signal acquisition, signal preprocessing, feature extraction, classification of patterns, post-processing, and performance evaluation. Finally, the current challenges and future prospects in clinical application of these techniques are outlined and discussed.},
|
||
author = {Wei Li and Ping Shi and Hongliu Yu},
|
||
doi = {10.3389/fnins.2021.621885},
|
||
issn = {1662453X},
|
||
journal = {Frontiers in Neuroscience},
|
||
keywords = {convolutional neural network,deep learning,hand gesture recognition,pattern recognition,prosthesis hand,recurrent neural network,surface electromyography},
|
||
month = {4},
|
||
publisher = {Frontiers Media S.A.},
|
||
title = {Gesture Recognition Using Surface Electromyography and Deep Learning for Prostheses Hand: State-of-the-Art, Challenges, and Future},
|
||
volume = {15},
|
||
year = {2021}
|
||
}
|
||
@article{asd,
|
||
abstract = {Surface electromyography (sEMG) is a promising computer access method for individuals with motor impairments. However, optimal sensor placement is a tedious task requiring trial-and-error by an expert, particularly when recording from facial musculature likely to be spared in individuals with neurological impairments. We sought to reduce sEMG sensor configuration complexity by using quantitative signal features extracted from a short calibration task to predict human-machine interface (HMI) performance. A cursor control system allowed individuals to activate specific sEMG-targeted muscles to control an onscreen cursor and navigate a target selection task. The task was repeated for a range of sensor configurations to elicit a range of signal qualities. Signal features were extracted from the calibration of each configuration and examined via a principle component factor analysis in order to predict HMI performance during subsequent tasks. Feature components most influenced by energy and complexity of the EMG signal and muscle activity between sensors were significantly predictive of HMI performance. However, configuration order had a greater effect on performance than the configurations, suggesting that non-experts can place sEMG sensors in the vicinity of usable muscle sites for computer access and healthy individuals will learn to efficiently control the HMI system.},
|
||
author = {Jennifer M Vojtech and Gabriel J Cler and Cara E Stepp},
|
||
doi = {10.1109/TNSRE},
|
||
keywords = {Index Terms electromyography,feature extraction,human-machine interfaces,myoelectric control},
|
||
title = {Prediction of optimal facial electromyographic sensor configurations for human-machine interface control}
|
||
}
|
||
@misc{wiki26,
|
||
author = {},
|
||
title = {{O}pen{V}ibe - {W}ikipedia --- en.wikipedia.org},
|
||
howpublished = {\url{https://en.wikipedia.org/wiki/{O}pen{V}ibe}},
|
||
year = {}
|
||
}
|
||
@misc{bci2000,
|
||
author = {},
|
||
title = {bci2000.org},
|
||
howpublished = {\url{https://www.bci2000.org/}},
|
||
year = {}
|
||
}
|
||
@misc{Saha2021,
|
||
abstract = {Brain computer interfaces (BCI) provide a direct communication link between the brain and a computer or other external devices. They offer an extended degree of freedom either by strengthening or by substituting human peripheral working capacity and have potential applications in various fields such as rehabilitation, affective computing, robotics, gaming, and neuroscience. Significant research efforts on a global scale have delivered common platforms for technology standardization and help tackle highly complex and non-linear brain dynamics and related feature extraction and classification challenges. Time-variant psycho-neurophysiological fluctuations and their impact on brain signals impose another challenge for BCI researchers to transform the technology from laboratory experiments to plug-and-play daily life. This review summarizes state-of-the-art progress in the BCI field over the last decades and highlights critical challenges.},
|
||
author = {Simanto Saha and Khondaker A. Mamun and Khawza Ahmed and Raqibul Mostafa and Ganesh R. Naik and Sam Darvishi and Ahsan H. Khandoker and Mathias Baumert},
|
||
doi = {10.3389/fnsys.2021.578875},
|
||
issn = {16625137},
|
||
journal = {Frontiers in Systems Neuroscience},
|
||
keywords = {brain computer interface,cognitive rehabilitation,electrical/hemodynamic brain signals,hybrid/multimodal BCI,neuroimaging techniques,neurosensors},
|
||
month = {2},
|
||
publisher = {Frontiers Media S.A.},
|
||
title = {Progress in Brain Computer Interface: Challenges and Opportunities},
|
||
volume = {15},
|
||
year = {2021}
|
||
}
|
||
@misc{Matht2018,
|
||
abstract = {Pupils respond to three distinct kinds of stimuli: they constrict in response to brightness (the pupil light response), constrict in response to near fixation (the pupil near response), and dilate in response to increases in arousal and mental effort, either triggered by an external stimulus or spontaneously. In this review, I describe these three pupil responses, how they are related to high-level cognition, and the neural pathways that control them. I also discuss the functional relevance of pupil responses, that is, how pupil responses help us to better see the world. Although pupil responses likely serve many functions, not all of which are fully understood, one important function is to optimize vision either for acuity (small pupils see sharper) and depth of field (small pupils see sharply at a wider range of distances), or for sensitivity (large pupils are better able to detect faint stimuli); that is, pupils change their size to optimize vision for a particular situation. In many ways, pupil responses are similar to other eye movements, such as saccades and smooth pursuit: like these other eye movements, pupil responses have properties of both reflexive and voluntary action, and are part of active visual exploration.},
|
||
author = {Sebastiaan Mathôt},
|
||
doi = {10.5334/joc.18},
|
||
issn = {25144820},
|
||
issue = {1},
|
||
journal = {Journal of Cognition},
|
||
keywords = {Eye movements,Orienting response,Psychosensory pupil response,Pupil light response,Pupil near response,Pupillometry},
|
||
pmid = {31517190},
|
||
publisher = {Ubiquity Press},
|
||
title = {Pupillometry: Psychology, physiology, and function},
|
||
volume = {1},
|
||
year = {2018}
|
||
}
|
||
@article{Qian2024,
|
||
abstract = {Gaze estimation is long been recognised as having potential as the basis for human-computer interaction (HCI) systems, but usability and robustness of performance remain challenging. This work focuses on systems in which there is a live video stream showing enough of the subjects face to track eye movements and some means to infer gaze location from detected eye features. Currently, systems generally require some form of calibration or set-up procedure at the start of each user session. Here we explore some simple strategies for enabling gaze based HCI to operate immediately and robustly without any explicit set-up tasks. We explore different choices of coordinate origin for combining extracted features from multiple subjects and the replacement of subject specific calibration by system initiation based on prior models. Results show that referencing all extracted features to local coordinate origins determined by subject start position enables robust immediate operation. Combining this approach with an adaptive gaze estimation model using an interactive user interface enables continuous operation with the 75th percentile gaze errors of 0.7∘, and maximum gaze errors of 1.7∘ during prospective testing. There constitute state-of-the-art results and have the potential to enable a new generation of reliable gaze based HCI systems.},
|
||
author = {Kun Qian and Tomoki Arichi and A. David Edwards and Joseph V. Hajnal},
|
||
doi = {10.1038/s41598-024-62365-9},
|
||
issn = {20452322},
|
||
issue = {1},
|
||
journal = {Scientific Reports},
|
||
month = {12},
|
||
pmid = {38778122},
|
||
publisher = {Nature Research},
|
||
title = {Instant interaction driven adaptive gaze control interface},
|
||
volume = {14},
|
||
year = {2024}
|
||
}
|
||
@article{Kaifosh2025,
|
||
abstract = {Since the advent of computing, humans have sought computer input technologies that are expressive, intuitive and universal. While diverse modalities have been developed, including keyboards, mice and touchscreens, they require interaction with a device that can be limiting, especially in on-the-go scenarios. Gesture-based systems use cameras or inertial sensors to avoid an intermediary device, but tend to perform well only for unobscured movements. By contrast, brain–computer or neuromotor interfaces that directly interface with the body’s electrical signalling have been imagined to solve the interface problem1, but high-bandwidth communication has been demonstrated only using invasive interfaces with bespoke decoders designed for single individuals2, 3–4. Here, we describe the development of a generic non-invasive neuromotor interface that enables computer input decoded from surface electromyography (sEMG). We developed a highly sensitive, easily donned sEMG wristband and a scalable infrastructure for collecting training data from thousands of consenting participants. Together, these data enabled us to develop generic sEMG decoding models that generalize across people. Test users demonstrate a closed-loop median performance of gesture decoding of 0.66 target acquisitions per second in a continuous navigation task, 0.88 gesture detections per second in a discrete-gesture task and handwriting at 20.9 words per minute. We demonstrate that the decoding performance of handwriting models can be further improved by 16% by personalizing sEMG decoding models. To our knowledge, this is the first high-bandwidth neuromotor interface with performant out-of-the-box generalization across people.},
|
||
author = {Patrick Kaifosh and Thomas R. Reardon and Danny Zlobinsky and Lei Zhou and Joseph Zhong and Kevin Zheng and Jiesi Zhao and Mingrui Zhang and Michael Zawadzki and Shuibenyang Yuan and Akshay Yembarwar and Chris Ye and Yuguan Xing and Steve Williams and Christopher Wiebe and Ezri White and Daniel Z. Wetmore and Ron J. Weiss and Claire L. Warriner and Richard Warren and Christy Warden and Zhuo Wang and Renxiong Wang and Megan Wang and Peter Walkington and Kyle Urquhart and Migmar Tsering and Julia Trabulsi and Blizelle Tormis and Calvin Tong and Anoushka Tiwari and Aman Tiwari and Tiberiu Tesileanu and Tugce Tasci and Aliqyan Tapia and Jesslyn Tannady and Vikram Tank and Ziyi Tang and David Sussillo and Ananya Suri and Swetha Suresh and Emanuel Strauss and Connor Stout and Fabio Stefanini and Adrian Spurr and Morgan Springer and Ivy Jiyoung Song and Seyyid Emre Sofuoglu and Jeff Smith and Viswanath Sivakumar and Gagandip Singh and Jeremy Simpson and Noor Siddiqi and Yasmin Siahpoosh and Olivia Shiah and Kevin Shi and Stephie Shen and Vandita Sharma and Philip Shamash and Anish Shah and Ami Shah and Nurettin Dorukhan Sergin and Mike Seltzer and Jeffrey Seely and David Schwab and Collin Schlager and Aichatou Savane and Roxanna Salim and Amir Sadoughi and James Sacra and Abby Russo and Sam Russell and Alexander Rudnicki and Ran Rubin and Likhon Roy and Ian Roth and Andrew Rosenkranz and Sebi Rolotti and Lain Warawao Nemo Mora y. Rivera and Jason Reisman and Jonathan Reid and Devin Reardon and Tanvi Ranjan and Julian Ramos Rojas and Jose Ramirez Fuentes and Sunaina Rajani and Shanil Puri and Brandon Pool and Eftychios Pnevmatikakis and Natalie Plotkin and Stephen M. Plaza and Adrien Piérard and Zavion Perez and Matias Perez and Diogo Peixoto and Sean Parker and Julia Paredes and Tejaswy Pailla and Adam M. Packer and Emmanuella Owolabi and Timothy M. Otchy and Chris Osborn and Garrick Orchard and Shay Ohayon and Luke O’Connor and Keven Nguyen and Jimson Ngeo and Romario Nelson and Ajay Nathan and Norris Nakagaki and Krunal Naik and Conner Muth and Suman Mulumudi and Anthony Moschella and Dano Morrison and Brock Morrell and Yonathan Morin and Sean Moore and Ricardo Pio Monti and Ilya Milyavskiy and David Miller and Josh Merel and Jorge Aurelio Menendez and Ashley McHugh and Stephen McAnearney and Nicolas Yvan Masse and Mirek Martincik and Najja Marshall and Jesse Marshall and Michael Mandel and Niru Maheswaranathan and Attila Maczak and Francisco Luongo and Roddy Louie and Kati London and Saar Lively and Yuxuan Liu and Yingru Liu and Jonathan Liao and Tiffanie Li and Juheui Amy Lee and Jin Hyung Lee and Jennifer Lee and Claire Lee and Andrzej Lawn and Christina Shabu Koshy and T. W. Koh and Steven Kober and Ta Chu Kao and Ajay Kamdar and Nishant Kakar and Nirag Kadakia and Na Young Jun and James Jaeyoon Jun and Brooke Juarez and Xiaodong Jiang and Connie Jiang and Vinay Jayaram and Shaker Islam and Hazel Hulet and Yue Hui and Rongqing Huang and Vivian Hsiung and Alex Hsieh and Stepan Hruda and Helen Hou and Lillian Hong and Kirak Hong and Daniel N. Hill and Carl Hewitt and Stav Hertz and Carlos Xavier Hernández and Katarina Hadjer and Austin Ha and Sinem Guven Kaya and Chetan Gupta and Ning Guo and Bryn Gunther and Lauren Grosberg and Alexandre Gramfort and Greg Gimler and Danny Giebisch and Vickram Gidwani and Sayan Ghosh and Minha Ghani and Nathalie Therese Helene Gayraud and Jonateal Garcia and Sida Gao and Jonathan Gamutan and Rupesh Gajurel and Jean Christophe Gagnon-Audet and Jianing Fu and Dustin Freeman and Emanuele Formento and Jake Fine and Randy Feliz and Ali Farshchian and Nariman Farsad and Mina Fahmi and Phina Enemuo and Zaina Edelson and Alan Du and Scott Draves and Bob DiMaiolo and Bruno De Araujo and Thomas J. Davidson and Nathan Danielson and William F. Cusack and Raven Cunningham and Mark Cramer and Jota Costa and James Connors and Jeremiah Chung and Won Chun and John Choi and Rohan Chitnis and Rudi Chiarito and Joshua Chester and Daniel Chenet and Chia Jung Chang and Adam J. Calhoun and Matt Butler and Joe Bravate and Maclyn Brandwein and Mario Bräcklein and Matthew Bracaglia and Don Bosley and Anu Bolarinwa and Luke Boegner and Sean Bittner and Joe Bienkowski and Rohin Bhasin and Adam Berenzweig and Wilman Pimentel Beltran and Philip Bard and Alexandre Barachant and Nicholas Baker and Christopher Baker and Francisco Ayerbe and Lana Awad and Mridu Atray and Rahul Arora and Sacha Arnoud and Chris Anderson and Brian D. Allen},
|
||
doi = {10.1038/s41586-025-09255-w},
|
||
issn = {14764687},
|
||
issue = {8081},
|
||
journal = {Nature},
|
||
month = {9},
|
||
pages = {702-711},
|
||
pmid = {40702190},
|
||
publisher = {Nature Research},
|
||
title = {A generic non-invasive neuromotor interface for human-computer interaction},
|
||
volume = {645},
|
||
year = {2025}
|
||
}
|
||
@techReport{Li2025,
|
||
abstract = {The demand for advanced human-machine interfaces (HMIs) highlights the need for accurate measurement of muscle contraction states. Traditional methods, such as electromyography, cannot measure passive muscle contraction states, while optical and ultrasonic techniques suffer from motion artifacts due to their rigid transducers. To overcome these limitations, we developed a flexible multichannel electrical impedance sensor (FMEIS) for non-invasive detection of skeletal muscle contractions. By applying an imperceptible current, the FMEIS can target multiple deep muscles by capturing electric-field ripples generated by their contractions. With an ultrathin profile (~220 micrometers), a low elastic modulus (212.8 kilopascals) closely matching human skin, and engineered adhesive sensor surfaces, the FMEIS conforms nicely to human skin with minimized motion artifacts. The FMEIS achieved high accuracy in both hand gesture recognition and muscle force prediction using machine learning models. With demonstrated performance across multiple HMI applications, including human-robot collaboration, exoskeleton control, and virtual surgery, FMEIS shows great potential for future real-time collaborative HMI systems.},
|
||
author = {Junwei Li and Kunlin Wu and Jingcheng Xiao and Tianyu Chen and Xudong Yang and Jie Pan and Yu Chen and Yifan Wang},
|
||
journal = {Sci. Adv},
|
||
pages = {3359},
|
||
title = {E N G I N E E R I N G Flexible multichannel muscle impedance sensors for collaborative human-machine interfaces},
|
||
volume = {11},
|
||
url = {https://www.science.org},
|
||
year = {2025}
|
||
}
|