summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorMon P Wang <wangmp@apple.com>2008-07-30 04:36:53 +0000
committerMon P Wang <wangmp@apple.com>2008-07-30 04:36:53 +0000
commite3b3a7241c01f26613694e53b26b01abf764ddfc (patch)
tree37ed9c9fbdaaa1209eb34379b2a0a06941b1c735 /docs
parent1fbffe0cef92ee284f44cfef3ce0db7ec7d86f18 (diff)
downloadllvm-e3b3a7241c01f26613694e53b26b01abf764ddfc.tar.gz
llvm-e3b3a7241c01f26613694e53b26b01abf764ddfc.tar.bz2
llvm-e3b3a7241c01f26613694e53b26b01abf764ddfc.tar.xz
Added support for overloading intrinsics (atomics) based on pointers
to different address spaces. This alters the naming scheme for those intrinsics, e.g., atomic.load.add.i32 => atomic.load.add.i32.p0i32 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@54195 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'docs')
-rw-r--r--docs/LangRef.html147
1 files changed, 75 insertions, 72 deletions
diff --git a/docs/LangRef.html b/docs/LangRef.html
index 1ee7f2972e..06b886897f 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -5788,14 +5788,15 @@ i1 &lt;device&gt; )
<div class="doc_text">
<h5>Syntax:</h5>
<p>
- This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
- integer bit width. Not all targets support all bit widths however.</p>
+ This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on
+ any integer bit width and for different address spaces. Not all targets
+ support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.cmp.swap.i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
-declare i16 @llvm.atomic.cmp.swap.i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
-declare i32 @llvm.atomic.cmp.swap.i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
-declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
+declare i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
+declare i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
+declare i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
+declare i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
</pre>
<h5>Overview:</h5>
@@ -5827,13 +5828,13 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &l
store i32 4, %ptr
%val1 = add i32 4, 4
-%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
+%result1 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 4, %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
-%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
+%result2 = call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %ptr, i32 5, %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
@@ -5852,10 +5853,10 @@ declare i64 @llvm.atomic.cmp.swap.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &l
This is an overloaded intrinsic. You can use <tt>llvm.atomic.swap</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.swap.i8( i8* &lt;ptr&gt;, i8 &lt;val&gt; )
-declare i16 @llvm.atomic.swap.i16( i16* &lt;ptr&gt;, i16 &lt;val&gt; )
-declare i32 @llvm.atomic.swap.i32( i32* &lt;ptr&gt;, i32 &lt;val&gt; )
-declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
+declare i8 @llvm.atomic.swap.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;val&gt; )
+declare i16 @llvm.atomic.swap.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;val&gt; )
+declare i32 @llvm.atomic.swap.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;val&gt; )
+declare i64 @llvm.atomic.swap.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
</pre>
<h5>Overview:</h5>
@@ -5886,13 +5887,13 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
store i32 4, %ptr
%val1 = add i32 4, 4
-%result1 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val1 )
+%result1 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val1 )
<i>; yields {i32}:result1 = 4</i>
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
%val2 = add i32 1, 1
-%result2 = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val2 )
+%result2 = call i32 @llvm.atomic.swap.i32.p0i32( i32* %ptr, i32 %val2 )
<i>; yields {i32}:result2 = 8</i>
%stored2 = icmp eq i32 %result2, 8 <i>; yields {i1}:stored2 = true</i>
@@ -5911,10 +5912,10 @@ declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
integer bit width. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.add.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.add.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.add.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.add.i8..p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.add.i16..p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.add.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.add.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@@ -5941,11 +5942,11 @@ declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 4, %ptr
-%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
+%result1 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 4</i>
-%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
+%result2 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 8</i>
-%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
+%result3 = call i32 @llvm.atomic.load.add.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 10</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
</pre>
@@ -5960,12 +5961,13 @@ declare i64 @llvm.atomic.load.add.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<h5>Syntax:</h5>
<p>
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
- any integer bit width. Not all targets support all bit widths however.</p>
+ any integer bit width and for different address spaces. Not all targets
+ support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.sub.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.sub.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.sub.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.sub.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.sub.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.sub.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.sub.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@@ -5992,11 +5994,11 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 8, %ptr
-%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
+%result1 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 4 )
<i>; yields {i32}:result1 = 8</i>
-%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
+%result2 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 2 )
<i>; yields {i32}:result2 = 4</i>
-%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
+%result3 = call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %ptr, i32 5 )
<i>; yields {i32}:result3 = 2</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
</pre>
@@ -6015,37 +6017,37 @@ declare i64 @llvm.atomic.load.sub.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
<tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
- <tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
- support all bit widths however.</p>
+ <tt>llvm.atomic.load_xor</tt> on any integer bit width and for different
+ address spaces. Not all targets support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.and.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.and.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.and.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.and.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.and.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.and.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.and.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.and.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.or.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.or.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.or.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.or.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.or.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.or.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.or.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.or.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.nand.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.nand.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.nand.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.nand.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.nand.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.nand.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.nand.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.nand.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.xor.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.xor.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.xor.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.xor.i8.p0i32( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.xor.i16.p0i32( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.xor.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.xor.i64.p0i32( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@@ -6074,13 +6076,13 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 0x0F0F, %ptr
-%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
+%result0 = call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result0 = 0x0F0F</i>
-%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
+%result1 = call i32 @llvm.atomic.load.and.i32.p0i32( i32* %ptr, i32 0xFF )
<i>; yields {i32}:result1 = 0xFFFFFFF0</i>
-%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
+%result2 = call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result2 = 0xF0</i>
-%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
+%result3 = call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %ptr, i32 0F )
<i>; yields {i32}:result3 = FF</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
</pre>
@@ -6100,37 +6102,38 @@ declare i64 @llvm.atomic.load.xor.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<p>
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
<tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
- <tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
+ <tt>llvm.atomic.load_umin</tt> on any integer bit width and for different
+ address spaces. Not all targets
support all bit widths however.</p>
<pre>
-declare i8 @llvm.atomic.load.max.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.max.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.max.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.max.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.max.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.max.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.max.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.max.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.min.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.min.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.min.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.min.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.min.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.min.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.min.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.min.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.umax.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.umax.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.umax.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.umax.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.umax.i8.p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.umax.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.umax.i32.p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.umax.i64.p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<pre>
-declare i8 @llvm.atomic.load.umin.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
-declare i16 @llvm.atomic.load.umin.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
-declare i32 @llvm.atomic.load.umin.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
-declare i64 @llvm.atomic.load.umin.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+declare i8 @llvm.atomic.load.umin.i8..p0i8( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.load.umin.i16.p0i16( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.load.umin.i32..p0i32( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.load.umin.i64..p0i64( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
</pre>
<h5>Overview:</h5>
@@ -6159,13 +6162,13 @@ declare i64 @llvm.atomic.load.umin.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
<pre>
%ptr = malloc i32
store i32 7, %ptr
-%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
+%result0 = call i32 @llvm.atomic.load.min.i32.p0i32( i32* %ptr, i32 -2 )
<i>; yields {i32}:result0 = 7</i>
-%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
+%result1 = call i32 @llvm.atomic.load.max.i32.p0i32( i32* %ptr, i32 8 )
<i>; yields {i32}:result1 = -2</i>
-%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
+%result2 = call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %ptr, i32 10 )
<i>; yields {i32}:result2 = 8</i>
-%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
+%result3 = call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %ptr, i32 30 )
<i>; yields {i32}:result3 = 8</i>
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
</pre>