wiki:SIMDVectorExampleInLLVM

Version 7 (modified by pmonday, 3 years ago) (diff)

--

It is useful to see the vector instructions "in action" in LLVM human readable form (a ".ll" file) prior to implementing the Cmm -> LLVM backend (within the ./compiler/llvmGen section of the code). LLVM code is somewhere between Java byte code authoring and direct assembly language authoring. Here is the process:

  • Generate or Create a human readable file (a ".ll" file), for example, create "add_floats.ll"
  • Compile this file to byte code using the LLVM compiler: llvm-as add_floats.ll. This generates a ".bc" file, in this case, add_floats.bc. The byte code is unreadable.
  • Now there are a few options once byte code is available
    • Generate native machine code: llc add_floats.bc will create a native assembler instruction set in a ".s" file (add_floats.s)
    • Run the byte codes on the JIT compiler: lli add_floats.bc should run the instructions and produce the result

To demonstrate the vector instructions, we can start with a basic C program (just to illustrate ... remember, LLVM is not functional so starting in an imperative language makes a lot of sense):

#include <stdio.h>

int main()
{
   float x[4], y[4], z[4];
   x[0] = 1.0;
   x[1] = 2.0;
   x[2] = 3.0;
   x[3] = 4.0;
   y[0] = 10.0;
   y[1] = 20.0;
   y[2] = 30.0;
   y[3] = 40.0;

   z[0] = x[0] + y[0]; 
   z[1] = x[1] + y[1]; 
   z[2] = x[2] + y[2]; 
   z[3] = x[3] + y[3];
   printf("%f %f %f %f\n", z[0], z[1], z[2], z[3]);
}

Compiling and running this in C is easy and left to the user.

This converts easily to LLVM human readable format (use the online generator if you'd like):

; ModuleID = '/tmp/webcompile/_21191_0.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"

@.str = private unnamed_addr constant [13 x i8] c"%f %f %f %f\0A\00"

define i32 @main() nounwind {
  %1 = alloca i32, align 4
  %x = alloca [4 x float], align 16
  %y = alloca [4 x float], align 16
  %z = alloca [4 x float], align 16
  store i32 0, i32* %1
  %2 = getelementptr inbounds [4 x float]* %x, i32 0, i64 0
  store float 1.000000e+00, float* %2
  %3 = getelementptr inbounds [4 x float]* %x, i32 0, i64 1
  store float 2.000000e+00, float* %3
  %4 = getelementptr inbounds [4 x float]* %x, i32 0, i64 2
  store float 3.000000e+00, float* %4
  %5 = getelementptr inbounds [4 x float]* %x, i32 0, i64 3
  store float 4.000000e+00, float* %5
  %6 = getelementptr inbounds [4 x float]* %y, i32 0, i64 0
  store float 1.000000e+01, float* %6
  %7 = getelementptr inbounds [4 x float]* %y, i32 0, i64 1
  store float 2.000000e+01, float* %7
  %8 = getelementptr inbounds [4 x float]* %y, i32 0, i64 2
  store float 3.000000e+01, float* %8
  %9 = getelementptr inbounds [4 x float]* %y, i32 0, i64 3
  store float 4.000000e+01, float* %9
  %10 = getelementptr inbounds [4 x float]* %x, i32 0, i64 0
  %11 = load float* %10
  %12 = getelementptr inbounds [4 x float]* %y, i32 0, i64 0
  %13 = load float* %12
  %14 = fadd float %11, %13
  %15 = getelementptr inbounds [4 x float]* %z, i32 0, i64 0
  store float %14, float* %15
  %16 = getelementptr inbounds [4 x float]* %x, i32 0, i64 1
  %17 = load float* %16
  %18 = getelementptr inbounds [4 x float]* %y, i32 0, i64 1
  %19 = load float* %18
  %20 = fadd float %17, %19
  %21 = getelementptr inbounds [4 x float]* %z, i32 0, i64 1
  store float %20, float* %21
  %22 = getelementptr inbounds [4 x float]* %x, i32 0, i64 2
  %23 = load float* %22
  %24 = getelementptr inbounds [4 x float]* %y, i32 0, i64 2
  %25 = load float* %24
  %26 = fadd float %23, %25
  %27 = getelementptr inbounds [4 x float]* %z, i32 0, i64 2
  store float %26, float* %27
  %28 = getelementptr inbounds [4 x float]* %x, i32 0, i64 3
  %29 = load float* %28
  %30 = getelementptr inbounds [4 x float]* %y, i32 0, i64 3
  %31 = load float* %30
  %32 = fadd float %29, %31
  %33 = getelementptr inbounds [4 x float]* %z, i32 0, i64 3
  store float %32, float* %33
  %34 = getelementptr inbounds [4 x float]* %z, i32 0, i64 0
  %35 = load float* %34
  %36 = fpext float %35 to double
  %37 = getelementptr inbounds [4 x float]* %z, i32 0, i64 1
  %38 = load float* %37
  %39 = fpext float %38 to double
  %40 = getelementptr inbounds [4 x float]* %z, i32 0, i64 2
  %41 = load float* %40
  %42 = fpext float %41 to double
  %43 = getelementptr inbounds [4 x float]* %z, i32 0, i64 3
  %44 = load float* %43
  %45 = fpext float %44 to double
  %46 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), double %36, double %39, double %42, double %45)
  %47 = load i32* %1
  ret i32 %47
}

declare i32 @printf(i8*, ...)

This is easy enough to run using the JIT compiler: lli add_floats.ll

[root@pg155-n19 pgms]# lli add_floats.ll 
11.000000 22.000000 33.000000 44.000000
[root@pg155-n19 pgms]# 

The core of the instructions can be replaced with vectorization (obviously, optimizing this program will result in very little code and vectorization is not necessary, but this is an exercise.

Here is the .ll code rewritten with vectorization:

; ModuleID = '/tmp/webcompile/_21191_0.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"

@.str = private unnamed_addr constant [13 x i8] c"%f %f %f %f\0A\00"

define i32 @main() nounwind {
  %1 = alloca i32, align 4

  ; allocate three vectors
  %x = alloca <4 x float>, align 16
  %y = alloca <4 x float>, align 16
  %z = alloca <4 x float>, align 16

  store i32 0, i32* %1

  ; store initial values to the x and y vectors
  %2 = getelementptr inbounds <4 x float>* %x, i32 0, i64 0
  store float 1.000000e+00, float* %2
  %3 = getelementptr inbounds <4 x float>* %x, i32 0, i64 1
  store float 2.000000e+00, float* %3
  %4 = getelementptr inbounds <4 x float>* %x, i32 0, i64 2
  store float 3.000000e+00, float* %4
  %5 = getelementptr inbounds <4 x float>* %x, i32 0, i64 3
  store float 4.000000e+00, float* %5
  %6 = getelementptr inbounds <4 x float>* %y, i32 0, i64 0
  store float 1.000000e+01, float* %6
  %7 = getelementptr inbounds <4 x float>* %y, i32 0, i64 1
  store float 2.000000e+01, float* %7
  %8 = getelementptr inbounds <4 x float>* %y, i32 0, i64 2
  store float 3.000000e+01, float* %8
  %9 = getelementptr inbounds <4 x float>* %y, i32 0, i64 3
  store float 4.000000e+01, float* %9

  ; load the vectors
  %xs = load <4 x float>* %x
  %ys = load <4 x float>* %y

  ; add the vectors
  %zs = fadd <4 x float> %xs, %ys

  ; store the result vector back to z
  store <4 x float> %zs, <4 x float>* %z

  ; get the elements out of the vector for printing
  %10 = getelementptr inbounds <4 x float>* %z, i32 0, i64 0
  %11 = load float* %10
  %12 = fpext float %11 to double
  %13 = getelementptr inbounds <4 x float>* %z, i32 0, i64 1
  %14 = load float* %13
  %15 = fpext float %14 to double
  %16 = getelementptr inbounds <4 x float>* %z, i32 0, i64 2
  %17 = load float* %16
  %18 = fpext float %17 to double
  %19 = getelementptr inbounds <4 x float>* %z, i32 0, i64 3
  %20 = load float* %19
  %21 = fpext float %20 to double

  ; print the components of z that were extracted above
  %22 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), double %12, double %15, double %18, double %21)

  ; return
  %23 = load i32* %1
  ret i32 %23
}

declare i32 @printf(i8*, ...)

Rerunning the program above yields the same results as the original, non-vectorized LLVM program.

[root@pg155-n19 pgms]# lli add_floats_vec.ll 
11.000000 22.000000 33.000000 44.000000

Let's take a look at a program with a substantially larger array of floats to add. Again, optimizing this would yield very little code and, certainly, lazy evaluation would turn this program into basically a no-op. Nonetheless, this should speed up by almost a factor of 4 when vectorized. Here is the C code that helps guide the imperative implementation:

#include <stdio.h>

int main()
{
   int sz = 40000;
   float x[sz], y[sz], z[sz];
   int i;
   for (i = 0; i < sz; i++) {
      x[i] = i;
      y[i] = i + sz;
   }

   for (i = 0; i < sz; i += 4) {
     z[i] = x[i] + y[i];
     z[i+1] = x[i+1] + y[i+1];
     z[i+2] = x[i+2] + y[i+2];
     z[i+3] = x[i+3] + y[i+3];
   }

   printf("%f %f %f %f\n", z[0], z[1], z[2], z[3]);
}

The resulting non-optimized LLVM code is as follows:

; ModuleID = '/tmp/webcompile/_718_0.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"

@.str = private unnamed_addr constant [13 x i8] c"%f %f %f %f\0A\00"

define i32 @main() nounwind {
  %1 = alloca i32, align 4
  %sz = alloca i32, align 4
  %2 = alloca i8*
  %i = alloca i32, align 4
  store i32 0, i32* %1
  store i32 40000, i32* %sz, align 4
  %3 = call i8* @llvm.stacksave()
  store i8* %3, i8** %2
  %4 = load i32* %sz, align 4
  %5 = zext i32 %4 to i64
  %6 = mul i64 4, %5
  %7 = alloca i8, i64 %6, align 16
  %8 = bitcast i8* %7 to float*
  %9 = load i32* %sz, align 4
  %10 = zext i32 %9 to i64
  %11 = mul i64 4, %10
  %12 = alloca i8, i64 %11, align 16
  %13 = bitcast i8* %12 to float*
  %14 = load i32* %sz, align 4
  %15 = zext i32 %14 to i64
  %16 = mul i64 4, %15
  %17 = alloca i8, i64 %16, align 16
  %18 = bitcast i8* %17 to float*
  store i32 0, i32* %i, align 4
  br label %19

; <label>:19                                      ; preds = %36, %0
  %20 = load i32* %i, align 4
  %21 = load i32* %sz, align 4
  %22 = icmp slt i32 %20, %21
  br i1 %22, label %23, label %39

; <label>:23                                      ; preds = %19
  %24 = load i32* %i, align 4
  %25 = sitofp i32 %24 to float
  %26 = load i32* %i, align 4
  %27 = sext i32 %26 to i64
  %28 = getelementptr inbounds float* %8, i64 %27
  store float %25, float* %28
  %29 = load i32* %i, align 4
  %30 = load i32* %sz, align 4
  %31 = add nsw i32 %29, %30
  %32 = sitofp i32 %31 to float
  %33 = load i32* %i, align 4
  %34 = sext i32 %33 to i64
  %35 = getelementptr inbounds float* %13, i64 %34
  store float %32, float* %35
  br label %36

; <label>:36                                      ; preds = %23
  %37 = load i32* %i, align 4
  %38 = add nsw i32 %37, 1
  store i32 %38, i32* %i, align 4
  br label %19

; <label>:39                                      ; preds = %19
  store i32 0, i32* %i, align 4
  br label %40

; <label>:40                                      ; preds = %102, %39
  %41 = load i32* %i, align 4
  %42 = load i32* %sz, align 4
  %43 = icmp slt i32 %41, %42
  br i1 %43, label %44, label %105

; <label>:44                                      ; preds = %40
  %45 = load i32* %i, align 4
  %46 = sext i32 %45 to i64
  %47 = getelementptr inbounds float* %8, i64 %46
  %48 = load float* %47
  %49 = load i32* %i, align 4
  %50 = sext i32 %49 to i64
  %51 = getelementptr inbounds float* %13, i64 %50
  %52 = load float* %51
  %53 = fadd float %48, %52
  %54 = load i32* %i, align 4
  %55 = sext i32 %54 to i64
  %56 = getelementptr inbounds float* %18, i64 %55
  store float %53, float* %56
  %57 = load i32* %i, align 4
  %58 = add nsw i32 %57, 1
  %59 = sext i32 %58 to i64
  %60 = getelementptr inbounds float* %8, i64 %59
  %61 = load float* %60
  %62 = load i32* %i, align 4
  %63 = add nsw i32 %62, 1
  %64 = sext i32 %63 to i64
  %65 = getelementptr inbounds float* %13, i64 %64
  %66 = load float* %65
  %67 = fadd float %61, %66
  %68 = load i32* %i, align 4
  %69 = add nsw i32 %68, 1
  %70 = sext i32 %69 to i64
  %71 = getelementptr inbounds float* %18, i64 %70
  store float %67, float* %71
  %72 = load i32* %i, align 4
  %73 = add nsw i32 %72, 2
  %74 = sext i32 %73 to i64
  %75 = getelementptr inbounds float* %8, i64 %74
  %76 = load float* %75
  %77 = load i32* %i, align 4
  %78 = add nsw i32 %77, 2
  %79 = sext i32 %78 to i64
  %80 = getelementptr inbounds float* %13, i64 %79
  %81 = load float* %80
  %82 = fadd float %76, %81
  %83 = load i32* %i, align 4
  %84 = add nsw i32 %83, 2
  %85 = sext i32 %84 to i64
  %86 = getelementptr inbounds float* %18, i64 %85
  store float %82, float* %86
  %87 = load i32* %i, align 4
  %88 = add nsw i32 %87, 3
  %89 = sext i32 %88 to i64
  %90 = getelementptr inbounds float* %8, i64 %89
  %91 = load float* %90
  %92 = load i32* %i, align 4
  %93 = add nsw i32 %92, 3
  %94 = sext i32 %93 to i64
  %95 = getelementptr inbounds float* %13, i64 %94
  %96 = load float* %95
  %97 = fadd float %91, %96
  %98 = load i32* %i, align 4
  %99 = add nsw i32 %98, 3
  %100 = sext i32 %99 to i64
  %101 = getelementptr inbounds float* %18, i64 %100
  store float %97, float* %101
  br label %102

; <label>:102                                     ; preds = %44
  %103 = load i32* %i, align 4
  %104 = add nsw i32 %103, 4
  store i32 %104, i32* %i, align 4
  br label %40

; <label>:105                                     ; preds = %40
  %106 = getelementptr inbounds float* %18, i64 0
  %107 = load float* %106
  %108 = fpext float %107 to double
  %109 = getelementptr inbounds float* %18, i64 1
  %110 = load float* %109
  %111 = fpext float %110 to double
  %112 = getelementptr inbounds float* %18, i64 2
  %113 = load float* %112
  %114 = fpext float %113 to double
  %115 = getelementptr inbounds float* %18, i64 3
  %116 = load float* %115
  %117 = fpext float %116 to double
  %118 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), double %108, double %111, double %114, double %117)
  %119 = load i8** %2
  call void @llvm.stackrestore(i8* %119)
  %120 = load i32* %1
  ret i32 %120
}

declare i8* @llvm.stacksave() nounwind

declare i32 @printf(i8*, ...)

declare void @llvm.stackrestore(i8*) nounwind

Instead of hand-optimizing the entire sequence, the exercise will merely convert the types to vectors and then alter the loop starting with "label 44" to use vector addition rather then the sequence of adds that is currently being used. The resulting program is as follows:

Timing the execution of the bytecodes yields: