����C %# , #&')*)-0-(0%()(��C (((((((((((((((((((((((((((((((((((((((((((((((((((����"�������@�@�hC��}!���Ѱ��<"� 9iׂIIIHk�+?�c?��*Y�����!�du)b�T�9вU�$8G��I.�澬��D���Sq� q�}.<��Z�l�V!X� *x�-�\����t3i�Ũ�sNv71�ƛ\��z|t�L���$�����*f��kʮ��7�H;���~F%�'3�@�H�q�` 9mOL����/x@ @��G
d�8F�ه��Ka�Kdr�Fh.�]y4 JЛ��]�K�B�E$��$ $ �PR�����G�]��u�i$�$���'! "#031���C/Td=S�Q?���62Ccj{ ����̏d�چ/c�V�`��Wz͈�{Y`�d�h�L �]OB���l���o���mr���n��s-ڗEZ��N�_��1%b���H�ϣ������V�7):�ӷ)�}�~�(�;�!�b1�5K��[E�vϻ>��q.%� ���O���(�c�#x�$�'+��`٥v��v(�����M�"�v��B��.�a ���T�~�ϕ�hy(6nݱl��1yNɓx�������AR�8�rqv1.cS�+��_���&@�� �u�M�5Ĉ�Xm���eL�X�q��y#�9]�c�}ɄL��d�eJ몓���I1T�d��CaM�$��T�,�X �bʭ�!�%F5��X1x#���!�q��\��F��2��&Rq���C�ol~�̱�.0ϦL�d�`.������ ���m{�Y~k{C��}bv�;U��c<�r�~ɜs�1�j��]W�l��*նCr��Q�N9�-������d��E؛��nF��eړ�8(q��5UgRȱGTA��*������̆��V�珰����ezN��h�U]�T�FG�^���<��ay�,!���5.� �u�bΚ�V�J%��m�Dxn'�����6�@BPa�`��Hts� �ɮ���Ŏ�Zɬ��%B�X��d5Z���hC}�䅸�p+ k=��ʒ(�aՏFG&�%@/�{+�Yu+�ȣGѩ"O%�|vȲxF>�N(��ou�h6 &Y5��8�7�E$-��']n,@TD\��+���Ry�U��U^�Q,f>��1�����q��f��U��� ����F���ڥ��>I�����fNUw�u��#OMMQ6� N�*��_�� k� ����rS��`���1�:��!�F'<+� � b?O��2 !Q12A��� "3a������#$��?�,�7�!`yǮ(�1�6w��a���� �F�#��?*"s���v>��Ⱥ����f�v��͑���s����������]Gn��S ���ȥpG ы�E�g�)Z���x�rY�q�]�@f�_܃�pչEڎّC ����Ŝ*/ �h�O�Sv�و\��5��U��y��|o�Hm2C�S�BW����)��5��{T��W���=o*RA��<����L0g4{��쁢�ep�rw�8��7��U���t<Ԍѻ7�fGf�k}���Ê�㛆Gռz�Q@��{C��'G��8�!�S$�j��x���|���צV<��,����u�k�uu�rM�f�_dϣi ߫�ԟn�!K����mxu�=�槻�'j�X�����������%!A "1QR#Br��?�R:��R�n�b[�II?#��6<:�$gN����lGNlrr��dעMMn`ɿy�,�%B�e�W��dVS��r���� %�tT��(�ɷ��S�]�O]#�_LEMHN�M���kv���~X���O6�U�V_�����b���J�t�774����D!1AQa"2q�#3BRb����0���� 4CSr����cst�����?��^q���7�dG�U�"p��moz��'��n_x���唹e������<6��O�t���R>k��s=�Cr���e�?�i��� ����/��ں$be���o`ޮ�GHy�;fNAl�8��.�\�S������"���a�úF�YvNk�-*`v�k�ʈ2f�EE��Wa�,� �fF^#�;��[9��^~������Y$:0#W3������Z*���I�Z�ڹ�k�n--9=��G��;7F)m{T�Ɇ��=�����Ȭ5�5�B�aڞ5M����#m�5Ʀ��m�8��+Hh���$�}�:&�e�Q�[;i]С�:�:��o����$<~��5RB�?�s3�5�r��O��ֿ�w�P/��̅���(�Z6�R>)��N��4�!ʊ�wz�-�r�w+�yk���q�1�bKhƸ�4N�Ӑ�X����Q��_��})�+e1�5��n��q?��[�^�9�<�z3Fsi�8�'�)9p)�{��RP�Z+�*��p(aY��V����6l�g�9��;���d�u���Nt@�3�sTwzaŇ�GT�b�H��(#��*zc�������9K�b1�����t����Ê��
�Z?g�iD���H�R���B���^M����v���O���L�D,'d�q�C�P�����$Δ��U�֟֊=�s��F�$��J�ދZ?�N��������A�N�WP��,�� �¦�&;�x��dup�����i���Ipd���;�Dž!��ֿѮAb%�u��}j��-p��>I�[�N�bi����G�'�;4w�m]H�]����#LӘNN��R��������s�.]��en��-�8e��Ps����Q��;���ț�E�ݫ���7��g�_L��W��EZ:/��I���a�g�n�ܤ��iٹ���ŷ�T���H~i�a�����֎�~KV������ A-2m]�F"�m�9-Zbǰ�״ @����~�4�N�[�Uxč�tl>������u#r�gѐ�3���;M9�<�J�����1�vfL8����1�P�HgP�Xv��������{����O�}�n��KQ؋����7<�l�fey<�}�>�bX���4<`Y7���si��V)�s�:�{�rO�h�z �@4VW�B���&�������ɡob܋�F��4>y�s�fXWS�N�O$�,.u:�ԫ��g�yao4��$h��D#��ٸf^kh�7�#1Z�֥&���*�v-��;bޭ����Q�����h�ow�y]�ه.+�7�M�ⴻ �JY��g�f�i3q��KC��3�¹�?5�Z.N��^Z w���KF͂���7��ރ۞��wj��T�J.�q��\Sv1U����R��욽&�N����pЖ`�`у��m`v�n#z��4��>e��V�`'���h�����'�j�AҔ�-�4:H���n]9�h<��n����U�6m��2c�E�1/�Y�%���I��~ʏ�|VBƟ@����;�������%�M9M���}��1�D��d����%g���O��]��у&�r��f�7�uܲ���(!1AQaq�������0� ���?!��*��@)�Je�G��j��{�['��v+���������)���(�/����д%젍Z��kk�Lu�Rm���j.c���@Z� V�J��d��j���h6���2AO�� a;oBu���H�=���nK�W8�B�ɰ�u?��бأm,�sr����|����8˨i��qI2tZ�ۄJP��XE��������zޔj~]UMu����zv!����N�&�1�Y��zJ�ՠ��\p��o'ሸ�C؊Y��TD"HM5�Ъ��i߯a���F����A)�����ڮ����z�E���@�hg�֝8�1jk��\�M�3�8ܢ�� ������s�7����N}�ޭ������GN�Bc���L pk�;�J�δ3�e�iU�gAYW]\�>�GyگQ=��f�KA;T�a`eM+Q �� �Ln���̌]GM�����<Ħ�j���H��N�M�x�}aX{̣S� ��ԅ��n�MA�S�r�(����(�L��zo9���.�;
�ӳf������`Ӕ٢3�� IW��\9~_���saa�\ԊW�ܭX:���ӆ�38�ty*����N�qP����BI�Y��jE��>DP�!�R%-��4��'�皺;��~J�!�7m���X��h�P!曭���$�\�AYj�.lC��4��+�jD�dgC0-*���|��`ZD�+л�C"��)��s��8Kq�pq���Ms��4� ��7\U`�.��[Ey8��AH!/��,���(:M -�T䓥�~O�4-���Ԓn��}HDN7���K���$�_Ԕ䚞`�R�hB�_aX?4V��ŗ�@ه�u�a�;�{PcT+�������7YBo�?��r-ͩ{�ĎA�� ����˼n��M286��G���1���V�˜Jв"l��V5���5�C]h���̊�A���%� �'p���Ԃ���Ր��9=�d�=�e�{�'<3�_ �:^�~��4�(�n�-C�s��5m![�jmIqU�~�Tw8��`���p�H8�u�Д l m�aP�0�������9y����CM��F1G糞�.�U~�������FC�{�!e(Y�:���P����7~;�L�N^{�1r�\���ԬG(���0d�ÏO�qK�Z�⑼�T�{ 2��s��Kd�Տ?mMQ��=���6�7�i�����H+����9��d��=��;�QؤH8n�Lb�D��yS%�(�{b���Cu���p�t#C���$A"�H{���jqᶯ�:�n=E����hH�`�!�m��MA������?�v6���+MԿ⟚qK�i�D�*Q5��CZ���2�|]�:Xd+�t�:o@��M��� :�32��b����[\5=�ֵ7])�|t��Ϻ����w�B�ń�e���!`�:��I,��9:����j@/a 8����+<�u�(T^ۺ~��2oE�B�%b)��z��ݳځ�)��i�j��&��Fi`qr��w���7�@��P�� �3Z&<�m�S�C����7t�T����ƴ�q~J�e�r6�Z]�rL���ه�E17'�x���+[�ܜTc6�/�����W�`�qpMJ���N5^����x�}{l�Fm������1�oZ\�����/d�/6� �uӸ�0elXuX;M��$M�}mB��������Z%e���3f�js����O�J~2�z�86�*PB��v�Ν��e-��.�/��L�O����2����9���4}|��T5M���hÐ7�F*��l+y0����:|��=k[�d�;|�ԉe�=w�<��õ�<��'!1AQaq����� ������?��5����)�(���+>v����6&{���Ǹ@����M�����v��iA 6T'�w��h�s �E}�x��G&'g�� J~1q�f�f���&��q˘���-���vYm
�/i1 �I��6��u,)�#�,����l}*&`�$�ͬe�%�w3�x�Ѥ�Xc�D��执g�峕�5B/�|$��=���%8 a��2.l� c�@G� �\�/x[өq�]�v5?�����N|�!���\��,>��{�"r�/��?��&!1QAa�� ��ᑱ����?ĊD�肭�� nv@�yޝ (�����I ����U - ���b�m�E>,��1v!�d�&�� ���&�檔�5D�&0P��Ԕ�͒@Z��:E"� Q��`>PH:~�O�����P�3W��@hM��k�U��\�O��R�������5ʄ�,��f�|��r���}јxo)�"+h�QK���/��0�`�5�{M~�� ���'!1AQaq���0 �������?�?�k��#^�~�G��#V,������#Z�1'ܤ����������~p�O%O�O�\�q�`�~��}��E�Ű5 �輸�du����x\�$���s[�{T2t`B��gq�4Z]b� 㛪�3,(@����bAp�r)9:@|b�!r�g:N�^�Ʌ��� �x_�\��pm7I��0?>^k��������w���|.K�[sF@�]Gn*L �yO� le�P�.p��֍�j�S�=�ʨ�ןQF�"��5zʼn���k�*8�u" ����Fg��� �cSy�V������Ƈ��N��ؐ(�����48hV�A�ӎ^��^ ���jyB� ��p"�����y]�ļlU�(�7�U`3�pCGF'&yg������o��z������X��ν:�P"@�G@x[��o&MJ�$F.����hi w;}�/^͇q���n�mN�/�TQ���އ��O1\,}��bQ #¯^S!)��X���#GPȏ�t�� c^\��' }iIZ���a�)��������z��4͊�Ξy��48,��f���#�����KP!Jx�|w�ʆ�������������#��Z�������< �~K��r�p&qH/;�R���沽�+�E�R���~0v���V#ʀ�T��S(-ڝ��B�y�b�C�D������b��������8��~�= �Y�ͧ]��@n����M�k2�%�;�%,�r6�LR腻?^��;KŇ=�ք ���=`�ɥ��/����z�&�I{���#J��M���C��}�H9^UJ�,P ��pS����G�d69Ϭu���%"��ˢP��K�"k)��=��9� ����㇌,��Oli��Xzh� " � ������R��^�s����N�k��Q>�63(���� ��PQ�Py�����3����$f+W՛=4�ǁ`*��^��Eb�K�t�6��^��!�籷��ȭ��K{/;�L���p�x�����;a���Oلz�[�.NP4�]Gc�T�v����~sg'LED��]j��'�G�]�6rY����UPw�*O�İՋi�'8�۴�#g�Xx+=�eU6�R��c�"�u2��~�?n�y�;�u��3�'��6�f������b��߬M�$*��k&?6���*^1n����ێz)<��Gz� �����7����Y� ��ۃ)$A��2�L6� ե�H�<�r��#ʽ2��O��R���z�A��XW��@���������<�G� Ϥ�^�˓i�M�W���6 ��0��m){c�;ݧ�>R�a����}1�ٯ%�EY2�Q��Ep���$ ��E��qS��t#+x� *�h�UI��XM?�'//��a'�G�����q@���<��z��؟����cd��z�ˬT_u�Ѯ����&�z�k ��n ]�a%�py»�`Qd�xc������n�� ��*��oTd�;'j�<�!j���'�(~�ʹW�M� P�mȘ��@֨V+��R�`�$��`�+@��_[�kG����P���Zh9�R����&5b�v���Z���#p�&�Ա+��8�etZ7G���;��@"�e0���v7����?��z�?_���_�q1�T�"�p�ˎ/U 6_�B�>��0( ��}G#������Ȣ�p�� �9��;/& `�B&$�y��t(�*z�x���Ӕ������S�?Kȏ3���{p� b � ۍ-�z܈֦��6?<���ǬP�N�G �更� �6�/h�����0Z���������i�ua��e�*M'A� �x��v�q.>�F� oN{��Q���{gD��L��u��=|���O xN���d���q�8(��E�Uu��,��O� t�DJ ����;��G����e���C��VYZ�� ���T4{����(�Ӳ'c�t�f��w�c�jr�e�m �#7,�6��B�E4Q�P�.P�(&��^{9H-�m�o ��q�g1���=��>p�)/"p0!4�mS6ú�FN���h��D �)��XdT �FؤZ⸚�k���H�c8v� <���u�P�Հ���:��_�EN��|�ӛ��u?-�/�o�Lhk�ܸ�S�;�Rī�����T"�N����M��px7<�� j�$��`�Y)Pjh 5` K�Qf�4�C�bX"�D���;HD�Z�9R b�F)�UA����v�#��HD�!{������>I� �`�ԁ i�4�)t*�ç�Le�_���>ru�GEQg��ǔct��ō0��l6v���d�� ��GG8���v^�|�#JyZPSO�� Y�CuAߐ�"�x���OfHF@�K�V�!少Eҕ]h� ��[���)��.q����*0I<8��^�6�}p��^tho���ig�i����DK���p,��2�3�I��5����쓄OY�6s7Qs�Ow^�w�J/�A➰������0������g(Մ��y��Kԇ����QS��?H���w�X�=��ҞX�~���Q=�'���p?7�@g�~�G�}�r��g�T?���
One Hat Cyber Team
One Hat Cyber Team
Your IP :
3.133.160.6
Server IP :
162.0.235.113
Server :
Linux premium146.web-hosting.com 4.18.0-513.18.1.lve.el8.x86_64 #1 SMP Thu Feb 22 12:55:50 UTC 2024 x86_64
Server Software :
LiteSpeed
PHP Version :
5.6.40
Buat File
|
Buat Folder
Dir :
~
/
opt
/
alt
/
ruby34
/
include
/
ruby
/
Edit File Name:
atomic.h
#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/ #define RUBY_ATOMIC_H /** * @file * @author Ruby developers <ruby-core@ruby-lang.org> * @copyright This file is a part of the programming language Ruby. * Permission is hereby granted, to either redistribute and/or * modify this file, provided that the conditions mentioned in the * file COPYING are met. Consult the file for details. * @warning Symbols prefixed with either `RBIMPL` or `rbimpl` are * implementation details. Don't take them as canon. They could * rapidly appear then vanish. The name (path) of this header file * is also an implementation detail. Do not expect it to persist * at the place it is now. Developers are free to move it anywhere * anytime at will. * @note To ruby-core: remember that this header can be possibly * recursively included from extension libraries written in C++. * Do not expect for instance `__VA_ARGS__` is always available. * We assume C99 for ruby itself but we don't assume languages of * extension libraries. They could be written in C++98. * @brief Atomic operations * * Basically, if we could assume either C11 or C++11, these macros are just * redundant. Sadly we cannot. We have to do them ourselves. */ #include "ruby/internal/config.h" #ifdef STDC_HEADERS # include <stddef.h> /* size_t */ #endif #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> /* ssize_t */ #endif #if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0) # pragma intrinsic(_InterlockedOr) #elif defined(__sun) && defined(HAVE_ATOMIC_H) # include <atomic.h> #endif #include "ruby/assert.h" #include "ruby/backward/2/limits.h" #include "ruby/internal/attr/artificial.h" #include "ruby/internal/attr/noalias.h" #include "ruby/internal/attr/nonnull.h" #include "ruby/internal/compiler_since.h" #include "ruby/internal/cast.h" #include "ruby/internal/value.h" #include "ruby/internal/static_assert.h" #include "ruby/internal/stdbool.h" /* * Asserts that your environment supports more than one atomic types. These * days systems tend to have such property (C11 was a standard of decades ago, * right?) but we still support older ones. */ #if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS) # define RUBY_ATOMIC_GENERIC_MACRO 1 #endif /** * Type that is eligible for atomic operations. Depending on your host * platform you might have more than one such type, but we choose one of them * anyways. */ #if defined(__DOXYGEN__) using rb_atomic_t = std::atomic<unsigned>; #elif defined(HAVE_GCC_ATOMIC_BUILTINS) typedef unsigned int rb_atomic_t; #elif defined(HAVE_GCC_SYNC_BUILTINS) typedef unsigned int rb_atomic_t; #elif defined(_WIN32) # include <winsock2.h> // to prevent macro redefinitions # include <windows.h> // for `LONG` and `Interlocked` functions typedef LONG rb_atomic_t; #elif defined(__sun) && defined(HAVE_ATOMIC_H) typedef unsigned int rb_atomic_t; #else # error No atomic operation found #endif /** * Atomically replaces the value pointed by `var` with the result of addition * of `val` to the old value of `var`. * * @param var A variable of ::rb_atomic_t. * @param val Value to add. * @return What was stored in `var` before the addition. * @post `var` holds `var + val`. */ #define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val)) /** * Atomically replaces the value pointed by `var` with the result of * subtraction of `val` to the old value of `var`. * * @param var A variable of ::rb_atomic_t. * @param val Value to subtract. * @return What was stored in `var` before the subtraction. * @post `var` holds `var - val`. */ #define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val)) /** * Atomically replaces the value pointed by `var` with the result of * bitwise OR between `val` and the old value of `var`. * * @param var A variable of ::rb_atomic_t. * @param val Value to mix. * @return void * @post `var` holds `var | val`. * @note For portability, this macro can return void. */ #define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val)) /** * Atomically replaces the value pointed by `var` with `val`. This is just an * assignment, but you can additionally know the previous value. * * @param var A variable of ::rb_atomic_t. * @param val Value to set. * @return What was stored in `var` before the assignment. * @post `var` holds `val`. */ #define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val)) /** * Atomic compare-and-swap. This stores `val` to `var` if and only if the * assignment changes the value of `var` from `oldval` to `newval`. You can * detect whether the assignment happened or not using the return value. * * @param var A variable of ::rb_atomic_t. * @param oldval Expected value of `var` before the assignment. * @param newval What you want to store at `var`. * @retval oldval Successful assignment (`var` is now `newval`). * @retval otherwise Something else is at `var`; not updated. */ #define RUBY_ATOMIC_CAS(var, oldval, newval) \ rbimpl_atomic_cas(&(var), (oldval), (newval)) /** * Atomic load. This loads `var` with an atomic intrinsic and returns * its value. * * @param var A variable of ::rb_atomic_t * @return What was stored in `var`j */ #define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var)) /** * Identical to #RUBY_ATOMIC_EXCHANGE, except for the return type. * * @param var A variable of ::rb_atomic_t. * @param val Value to set. * @return void * @post `var` holds `val`. */ #define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val)) /** * Identical to #RUBY_ATOMIC_FETCH_ADD, except for the return type. * * @param var A variable of ::rb_atomic_t. * @param val Value to add. * @return void * @post `var` holds `var + val`. */ #define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val)) /** * Identical to #RUBY_ATOMIC_FETCH_SUB, except for the return type. * * @param var A variable of ::rb_atomic_t. * @param val Value to subtract. * @return void * @post `var` holds `var - val`. */ #define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val)) /** * Atomically increments the value pointed by `var`. * * @param var A variable of ::rb_atomic_t. * @return void * @post `var` holds `var + 1`. */ #define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var)) /** * Atomically decrements the value pointed by `var`. * * @param var A variable of ::rb_atomic_t. * @return void * @post `var` holds `var - 1`. */ #define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var)) /** * Identical to #RUBY_ATOMIC_INC, except it expects its argument is `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `size_t`. * @return void * @post `var` holds `var + 1`. */ #define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var)) /** * Identical to #RUBY_ATOMIC_DEC, except it expects its argument is `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `size_t`. * @return void * @post `var` holds `var - 1`. */ #define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var)) /** * Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are * `size_t`. There are cases where ::rb_atomic_t is 32bit while `size_t` is * 64bit. This should be used for size related operations to support such * platforms. * * @param var A variable of `size_t`. * @param val Value to set. * @return What was stored in `var` before the assignment. * @post `var` holds `val`. */ #define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \ rbimpl_atomic_size_exchange(&(var), (val)) /** * Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `size_t`. * @param oldval Expected value of `var` before the assignment. * @param newval What you want to store at `var`. * @retval oldval Successful assignment (`var` is now `newval`). * @retval otherwise Something else is at `var`; not updated. */ #define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \ rbimpl_atomic_size_cas(&(var), (oldval), (newval)) /** * Identical to #RUBY_ATOMIC_ADD, except it expects its arguments are `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `size_t`. * @param val Value to add. * @return void * @post `var` holds `var + val`. */ #define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val)) /** * Identical to #RUBY_ATOMIC_SUB, except it expects its arguments are `size_t`. * There are cases where ::rb_atomic_t is 32bit while `size_t` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `size_t`. * @param val Value to subtract. * @return void * @post `var` holds `var - val`. */ #define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val)) /** * Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are * `void*`. There are cases where ::rb_atomic_t is 32bit while `void*` is * 64bit. This should be used for pointer related operations to support such * platforms. * * @param var A variable of `void *`. * @param val Value to set. * @return What was stored in `var` before the assignment. * @post `var` holds `val`. * * @internal * * :FIXME: this `(void*)` cast is evil! However `void*` is incompatible with * some pointers, most notably function pointers. */ #define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \ RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val)) /** * Identical to #RUBY_ATOMIC_LOAD, except it expects its arguments are `void*`. * There are cases where ::rb_atomic_t is 32bit while `void*` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `void*` * @return The value of `var` (without tearing) */ #define RUBY_ATOMIC_PTR_LOAD(var) \ RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var)) /** * Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are `void*`. * There are cases where ::rb_atomic_t is 32bit while `void*` is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `void*`. * @param oldval Expected value of `var` before the assignment. * @param newval What you want to store at `var`. * @retval oldval Successful assignment (`var` is now `newval`). * @retval otherwise Something else is at `var`; not updated. */ #define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \ RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval))) /** * Identical to #RUBY_ATOMIC_EXCHANGE, except it expects its arguments are * ::VALUE. There are cases where ::rb_atomic_t is 32bit while ::VALUE is * 64bit. This should be used for pointer related operations to support such * platforms. * * @param var A variable of ::VALUE. * @param val Value to set. * @return What was stored in `var` before the assignment. * @post `var` holds `val`. */ #define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \ rbimpl_atomic_value_exchange(&(var), (val)) /** * Identical to #RUBY_ATOMIC_CAS, except it expects its arguments are ::VALUE. * There are cases where ::rb_atomic_t is 32bit while ::VALUE is 64bit. This * should be used for size related operations to support such platforms. * * @param var A variable of `void*`. * @param oldval Expected value of `var` before the assignment. * @param newval What you want to store at `var`. * @retval oldval Successful assignment (`var` is now `newval`). * @retval otherwise Something else is at `var`; not updated. */ #define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \ rbimpl_atomic_value_cas(&(var), (oldval), (newval)) /** @cond INTERNAL_MACRO */ RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline rb_atomic_t rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_fetch_and_add(ptr, val); #elif defined(_WIN32) return InterlockedExchangeAdd(ptr, val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) /* * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to * manually check integer overflow. */ RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX); return atomic_add_int_nv(ptr, val) - val; #else # error Unsupported platform. #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) /* * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s * return value is not used, then compiles it into single `LOCK ADD` * instruction. */ __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) __sync_add_and_fetch(ptr, val); #elif defined(_WIN32) /* * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to * `InterlockedExchangeAdd` for better portability. */ InterlockedExchangeAdd(ptr, val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) /* Ditto for `atomic_add_int_nv`. */ RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX); atomic_add_int(ptr, val); #else # error Unsupported platform. #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_size_add(volatile size_t *ptr, size_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) __sync_add_and_fetch(ptr, val); #elif defined(_WIN64) /* Ditto for `InterlockeExchangedAdd`. */ InterlockedExchangeAdd64(ptr, val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) /* Ditto for `atomic_add_int_nv`. */ RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX); atomic_add_long(ptr, val); #else RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t)); volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr); rbimpl_atomic_add(tmp, val); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_inc(volatile rb_atomic_t *ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS) rbimpl_atomic_add(ptr, 1); #elif defined(_WIN32) InterlockedIncrement(ptr); #elif defined(__sun) && defined(HAVE_ATOMIC_H) atomic_inc_uint(ptr); #else rbimpl_atomic_add(ptr, 1); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_size_inc(volatile size_t *ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS) rbimpl_atomic_size_add(ptr, 1); #elif defined(_WIN64) InterlockedIncrement64(ptr); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) atomic_inc_ulong(ptr); #else RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t)); rbimpl_atomic_size_add(ptr, 1); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline rb_atomic_t rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_fetch_and_sub(ptr, val); #elif defined(_WIN32) /* rb_atomic_t is signed here! Safe to do `-val`. */ return InterlockedExchangeAdd(ptr, -val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) /* Ditto for `rbimpl_atomic_fetch_add`. */ const signed neg = -1; RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX); return atomic_add_int_nv(ptr, neg * val) + val; #else # error Unsupported platform. #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) __sync_sub_and_fetch(ptr, val); #elif defined(_WIN32) InterlockedExchangeAdd(ptr, -val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) const signed neg = -1; RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX); atomic_add_int(ptr, neg * val); #else # error Unsupported platform. #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) __sync_sub_and_fetch(ptr, val); #elif defined(_WIN64) const ssize_t neg = -1; InterlockedExchangeAdd64(ptr, neg * val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) const signed neg = -1; RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX); atomic_add_long(ptr, neg * val); #else RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t)); volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr); rbimpl_atomic_sub(tmp, val); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_dec(volatile rb_atomic_t *ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS) rbimpl_atomic_sub(ptr, 1); #elif defined(_WIN32) InterlockedDecrement(ptr); #elif defined(__sun) && defined(HAVE_ATOMIC_H) atomic_dec_uint(ptr); #else rbimpl_atomic_sub(ptr, 1); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_size_dec(volatile size_t *ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS) rbimpl_atomic_size_sub(ptr, 1); #elif defined(_WIN64) InterlockedDecrement64(ptr); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) atomic_dec_ulong(ptr); #else RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t)); rbimpl_atomic_size_sub(ptr, 1); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) __sync_or_and_fetch(ptr, val); #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0) _InterlockedOr(ptr, val); #elif defined(_WIN32) && defined(__GNUC__) /* This was for old MinGW. Maybe not needed any longer? */ __asm__( "lock\n\t" "orl\t%1, %0" : "=m"(ptr) : "Ir"(val)); #elif defined(_WIN32) && defined(_M_IX86) __asm mov eax, ptr; __asm mov ecx, val; __asm lock or [eax], ecx; #elif defined(__sun) && defined(HAVE_ATOMIC_H) atomic_or_uint(ptr, val); #else # error Unsupported platform. #endif } /* Nobody uses this but for theoretical backwards compatibility... */ #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0) static inline rb_atomic_t rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val) { return rbimpl_atomic_or(var, val); } #endif RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline rb_atomic_t rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_lock_test_and_set(ptr, val); #elif defined(_WIN32) return InterlockedExchange(ptr, val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) return atomic_swap_uint(ptr, val); #else # error Unsupported platform. #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline size_t rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST); #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_lock_test_and_set(ptr, val); #elif defined(_WIN64) return InterlockedExchange64(ptr, val); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) return atomic_swap_ulong(ptr, val); #else RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t)); volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr); const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val); return RBIMPL_CAST((size_t)ret); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void * rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val) { #if 0 #elif defined(InterlockedExchangePointer) /* const_cast */ PVOID *pptr = RBIMPL_CAST((PVOID *)ptr); PVOID pval = RBIMPL_CAST((PVOID)val); return InterlockedExchangePointer(pptr, pval); #elif defined(__sun) && defined(HAVE_ATOMIC_H) return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val)); #else RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t)); const size_t sval = RBIMPL_CAST((size_t)val); volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr); const size_t sret = rbimpl_atomic_size_exchange(sptr, sval); return RBIMPL_CAST((void *)sret); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline VALUE rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val) { RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t)); const size_t sval = RBIMPL_CAST((size_t)val); volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr); const size_t sret = rbimpl_atomic_size_exchange(sptr, sval); return RBIMPL_CAST((VALUE)sret); } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline rb_atomic_t rbimpl_atomic_load(volatile rb_atomic_t *ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_load_n(ptr, __ATOMIC_SEQ_CST); #else return rbimpl_atomic_fetch_add(ptr, 0); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST); #else /* Maybe std::atomic<rb_atomic_t>::store can be faster? */ rbimpl_atomic_exchange(ptr, val); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline rb_atomic_t rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_compare_exchange_n( ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return oldval; #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_val_compare_and_swap(ptr, oldval, newval); #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0) return InterlockedCompareExchange(ptr, newval, oldval); #elif defined(_WIN32) PVOID *pptr = RBIMPL_CAST((PVOID *)ptr); PVOID pold = RBIMPL_CAST((PVOID)oldval); PVOID pnew = RBIMPL_CAST((PVOID)newval); PVOID pret = InterlockedCompareExchange(pptr, pnew, pold); return RBIMPL_CAST((rb_atomic_t)pret); #elif defined(__sun) && defined(HAVE_ATOMIC_H) return atomic_cas_uint(ptr, oldval, newval); #else # error Unsupported platform. #endif } /* Nobody uses this but for theoretical backwards compatibility... */ #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0) static inline rb_atomic_t rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval) { return rbimpl_atomic_cas(var, oldval, newval); } #endif RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline size_t rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) __atomic_compare_exchange_n( ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return oldval; #elif defined(HAVE_GCC_SYNC_BUILTINS) return __sync_val_compare_and_swap(ptr, oldval, newval); #elif defined(_WIN64) return InterlockedCompareExchange64(ptr, newval, oldval); #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) return atomic_cas_ulong(ptr, oldval, newval); #else RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t)); volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr); return rbimpl_atomic_cas(tmp, oldval, newval); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void * rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval) { #if 0 #elif defined(InterlockedExchangePointer) /* ... Can we say that InterlockedCompareExchangePtr surly exists when * InterlockedExchangePointer is defined? Seems so but...?*/ PVOID *pptr = RBIMPL_CAST((PVOID *)ptr); PVOID pold = RBIMPL_CAST((PVOID)oldval); PVOID pnew = RBIMPL_CAST((PVOID)newval); return InterlockedCompareExchangePointer(pptr, pnew, pold); #elif defined(__sun) && defined(HAVE_ATOMIC_H) void *pold = RBIMPL_CAST((void *)oldval); void *pnew = RBIMPL_CAST((void *)newval); return atomic_cas_ptr(ptr, pold, pnew); #else RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t)); const size_t snew = RBIMPL_CAST((size_t)newval); const size_t sold = RBIMPL_CAST((size_t)oldval); volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr); const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew); return RBIMPL_CAST((void *)sret); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline void * rbimpl_atomic_ptr_load(void **ptr) { #if 0 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) return __atomic_load_n(ptr, __ATOMIC_SEQ_CST); #else void *val = *ptr; return rbimpl_atomic_ptr_cas(ptr, val, val); #endif } RBIMPL_ATTR_ARTIFICIAL() RBIMPL_ATTR_NOALIAS() RBIMPL_ATTR_NONNULL((1)) static inline VALUE rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval) { RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t)); const size_t snew = RBIMPL_CAST((size_t)newval); const size_t sold = RBIMPL_CAST((size_t)oldval); volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr); const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew); return RBIMPL_CAST((VALUE)sret); } /** @endcond */ #endif /* RUBY_ATOMIC_H */
Save