日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 人文社科 > 生活经验 >内容正文

生活经验

Relay外部库使用

發布時間:2023/11/28 生活经验 64 豆豆
生活随笔 收集整理的這篇文章主要介紹了 Relay外部库使用 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

Relay外部庫使用
本文介紹如何將cuDNN或cuBLAS等外部庫與Relay一起使用。
Relay內部使用TVM生成目標特定的代碼。例如,使用cuda后端,TVM為用戶提供的網絡中的所有層生成cuda內核。有時將各種供應商開發的外部庫合并到Relay中也很有幫助。幸運的是,TVM具有透明地調用這些庫的機制。對于Relay用戶,要做的只是適當地設置目標字符串。
在可以使用Relay的外部庫之前,TVM必須與要使用的庫一起構建。例如,要使用cuDNN,需要啟用cmake / config.cmake中的USE_CUDNN選項,并在必要時指定cuDNN include和庫目錄。
首先,導入Relay和TVM。
import tvm
from tvm import te
import numpy as np
from tvm.contrib import graph_runtime as runtime
from tvm import relay
from tvm.relay import testing
import tvm.testing
創建一個簡單的網絡
創建一個非常簡單的網絡進行演示。由卷積,批處理歸一化和ReLU激活組成。
out_channels = 16
batch_size = 1

data = relay.var(“data”, relay.TensorType((batch_size, 3, 224, 224), “float32”))
weight = relay.var(“weight”)
bn_gamma = relay.var(“bn_gamma”)
bn_beta = relay.var(“bn_beta”)
bn_mmean = relay.var(“bn_mean”)
bn_mvar = relay.var(“bn_var”)

simple_net = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1)
)
simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
simple_net = relay.nn.relu(simple_net)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)

data_shape = (batch_size, 3, 224, 224)
net, params = testing.create_workload(simple_net)
使用cuda后端構建并運行
像往常一樣,使用cuda后端構建并運行此網絡。通過將日志記錄級別設置為DEBUG,Relay圖編譯的結果將作為偽代碼轉儲。
import logging

logging.basicConfig(level=logging.DEBUG) # to dump TVM IR after fusion

target = “cuda”
lib = relay.build_module.build(net, target, params=params)

ctx = tvm.context(target, 0)
data = np.random.uniform(-1, 1, size=data_shape).astype(“float32”)
module = runtime.GraphModule(lib"default")
module.set_input(“data”, data)
module.run()
out_shape = (batch_size, out_channels, 224, 224)
out = module.get_output(0, tvm.nd.empty(out_shape))
out_cuda = out.asnumpy()
生成的偽代碼應如下所示。注意如何將偏差添加,批處理規范化和ReLU激活融合到卷積內核中。TVM根據此表示生成單個融合內核。
produce tensor {
// attr [iter_var(blockIdx.z, , blockIdx.z)] thread_extent = 1
// attr [compute] storage_scope = “local”
allocate compute[float32 * 32]
// attr [pad_temp.shared] storage_scope = “shared”
allocate pad_temp.shared[float32 * 180]
// attr [placeholder.shared] storage_scope = “shared”
allocate placeholder.shared[float32 * 144]
// attr [iter_var(blockIdx.y, , blockIdx.y)] thread_extent = 28
// attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 14
// attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4
// attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
// attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16
produce compute {
compute[0] = 0.000000f
compute[1] = 0.000000f
compute[2] = 0.000000f
compute[3] = 0.000000f
compute[4] = 0.000000f
compute[5] = 0.000000f
compute[6] = 0.000000f
compute[7] = 0.000000f
compute[8] = 0.000000f
compute[9] = 0.000000f
compute[10] = 0.000000f
compute[11] = 0.000000f
compute[12] = 0.000000f
compute[13] = 0.000000f
compute[14] = 0.000000f
compute[15] = 0.000000f
compute[16] = 0.000000f
compute[17] = 0.000000f
compute[18] = 0.000000f
compute[19] = 0.000000f
compute[20] = 0.000000f
compute[21] = 0.000000f
compute[22] = 0.000000f
compute[23] = 0.000000f
compute[24] = 0.000000f
compute[25] = 0.000000f
compute[26] = 0.000000f
compute[27] = 0.000000f
compute[28] = 0.000000f
compute[29] = 0.000000f
compute[30] = 0.000000f
compute[31] = 0.000000f
for (rc.outer, 0, 3) {
produce pad_temp.shared {
// attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4
// attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
// attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16
if (likely(((threadIdx.z15) < (60 - threadIdx.x)))) {
if (likely((threadIdx.x < 15))) {
pad_temp.shared[(((((threadIdx.z
15) + threadIdx.x)/60)180) + ((((((threadIdx.z15) + threadIdx.x)/6) % 10)18) + ((((threadIdx.z3) + threadIdx.x)3) % 18)))] = tvm_if_then_else((((((1 - ((((threadIdx.z15) + threadIdx.x)/6) % 10)) <= (blockIdx.y8)) && ((blockIdx.y8) < (225 - ((((threadIdx.z15) + threadIdx.x)/6) % 10)))) && ((1 - ((((threadIdx.z3) + threadIdx.x)3) % 18)) <= (blockIdx.x16))) && ((blockIdx.x16) < (225 - ((((threadIdx.z3) + threadIdx.x)3) % 18)))), placeholder[((((((((blockIdx.y112) + blockIdx.x) + (rc.outer3136)) + ((((threadIdx.z15) + threadIdx.x)/60)9408))16) + ((((threadIdx.z3) + threadIdx.x)3) % 18)) + (((((threadIdx.z15) + threadIdx.x)/6) % 10)224)) + -225)], 0.000000f)
pad_temp.shared[(((((((threadIdx.z
15) + threadIdx.x)3) + 1)/180)180) + ((((((((threadIdx.z15) + threadIdx.x)3) + 1)/18) % 10)18) + (((((threadIdx.z3) + threadIdx.x)3) + 1) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z15) + threadIdx.x)3) + 1)/18) % 10)) <= (blockIdx.y8)) && ((blockIdx.y8) < (225 - ((((((threadIdx.z15) + threadIdx.x)3) + 1)/18) % 10)))) && ((1 - (((((threadIdx.z3) + threadIdx.x)3) + 1) % 18)) <= (blockIdx.x16))) && ((blockIdx.x
16) < (225 - (((((threadIdx.z3) + threadIdx.x)3) + 1) % 18)))), placeholder[((((((((blockIdx.y112) + blockIdx.x) + (rc.outer3136)) + ((((((threadIdx.z15) + threadIdx.x)3) + 1)/180)9408))16) + (((((threadIdx.z3) + threadIdx.x)3) + 1) % 18)) + (((((((threadIdx.z15) + threadIdx.x)3) + 1)/18) % 10)224)) + -225)], 0.000000f)
pad_temp.shared[(((((((threadIdx.z
15) + threadIdx.x)3) + 2)/180)180) + ((((((((threadIdx.z15) + threadIdx.x)3) + 2)/18) % 10)18) + (((((threadIdx.z3) + threadIdx.x)3) + 2) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z15) + threadIdx.x)3) + 2)/18) % 10)) <= (blockIdx.y8)) && ((blockIdx.y8) < (225 - ((((((threadIdx.z15) + threadIdx.x)3) + 2)/18) % 10)))) && ((1 - (((((threadIdx.z3) + threadIdx.x)3) + 2) % 18)) <= (blockIdx.x16))) && ((blockIdx.x
16) < (225 - (((((threadIdx.z
3) + threadIdx.x)3) + 2) % 18)))), placeholder[((((((((blockIdx.y112) + blockIdx.x) + (rc.outer
3136)) + ((((((threadIdx.z
15) + threadIdx.x)3) + 2)/180)9408))16) + (((((threadIdx.z3) + threadIdx.x)3) + 2) % 18)) + (((((((threadIdx.z15) + threadIdx.x)3) + 2)/18) % 10)224)) + -225)], 0.000000f)
}
}
}
produce placeholder.shared {
// attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4
// attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1
// attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16
if (likely(((threadIdx.z
4) < (16 - (threadIdx.x/3))))) {
if (likely(((threadIdx.z
12) < (48 - threadIdx.x)))) {
if (likely((threadIdx.x < 12))) {
placeholder.shared[(((((threadIdx.z
4) + (threadIdx.x/3))3) + (threadIdx.x % 3))3)] = placeholder[(((((rc.outer + (threadIdx.z12)) + ((threadIdx.x/3)3))3) + (threadIdx.x % 3))3)]
placeholder.shared[((((((threadIdx.z
4) + (threadIdx.x/3))3) + (threadIdx.x % 3))3) + 1)] = placeholder[((((((rc.outer + (threadIdx.z12)) + ((threadIdx.x/3)3))3) + (threadIdx.x % 3))3) + 1)]
placeholder.shared[((((((threadIdx.z
4) + (threadIdx.x/3))3) + (threadIdx.x % 3))3) + 2)] = placeholder[((((((rc.outer + (threadIdx.z12)) + ((threadIdx.x/3)3))3) + (threadIdx.x % 3))3) + 2)]
}
}
}
}
compute[0] = (compute[0] + (pad_temp.shared[threadIdx.x]placeholder.shared[(threadIdx.z36)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[(threadIdx.z36)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[(threadIdx.z36)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[(threadIdx.z36)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[(threadIdx.z36)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[(threadIdx.z36)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[(threadIdx.z36)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[(threadIdx.z36)]))
compute[8] = (compute[8] + (pad_temp.shared[threadIdx.x]placeholder.shared[((threadIdx.z36) + 9)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 9)]))
compute[16] = (compute[16] + (pad_temp.shared[threadIdx.x]placeholder.shared[((threadIdx.z36) + 18)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 18)]))
compute[24] = (compute[24] + (pad_temp.shared[threadIdx.x]placeholder.shared[((threadIdx.z36) + 27)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 27)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 1)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 1)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 1)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 10)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 1)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 19)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 1)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 28)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 2)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 2)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 2)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 11)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 2)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 20)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 2)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 29)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 3)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 12)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 21)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 18)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 30)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 4)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 13)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 22)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 19)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 31)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 5)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 14)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 23)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 20)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 32)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 162)]placeholder.shared[((threadIdx.z36) + 6)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 162)]placeholder.shared[((threadIdx.z36) + 15)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 162)]placeholder.shared[((threadIdx.z36) + 24)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 36)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 54)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 72)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 90)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 108)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 126)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 144)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 162)]placeholder.shared[((threadIdx.z36) + 33)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 163)]placeholder.shared[((threadIdx.z36) + 7)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 163)]placeholder.shared[((threadIdx.z36) + 16)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 163)]placeholder.shared[((threadIdx.z36) + 25)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 37)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 55)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 73)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 91)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 109)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 127)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 145)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 163)]placeholder.shared[((threadIdx.z36) + 34)]))
compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 164)]placeholder.shared[((threadIdx.z36) + 8)]))
compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 164)]placeholder.shared[((threadIdx.z36) + 17)]))
compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 164)]placeholder.shared[((threadIdx.z36) + 26)]))
compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 38)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 56)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 74)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 92)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 110)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 128)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 146)]placeholder.shared[((threadIdx.z36) + 35)]))
compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 164)]placeholder.shared[((threadIdx.z36) + 35)]))
}
}
tensor[(((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z
12544))16) + threadIdx.x)] = max(((compute[0]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z
12544))16) + threadIdx.x) + 224)] = max(((compute[1]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z
12544))16) + threadIdx.x) + 448)] = max(((compute[2]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z
12544))16) + threadIdx.x) + 672)] = max(((compute[3]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z
12544))16) + threadIdx.x) + 896)] = max(((compute[4]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 1120)] = max(((compute[5]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 1344)] = max(((compute[6]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 1568)] = max(((compute[7]placeholder[(threadIdx.z4)]) + placeholder[(threadIdx.z4)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 50176)] = max(((compute[8]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 50400)] = max(((compute[9]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 50624)] = max(((compute[10]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 50848)] = max(((compute[11]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 51072)] = max(((compute[12]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 51296)] = max(((compute[13]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 51520)] = max(((compute[14]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 51744)] = max(((compute[15]placeholder[((threadIdx.z4) + 1)]) + placeholder[((threadIdx.z4) + 1)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 100352)] = max(((compute[16]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 100576)] = max(((compute[17]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 100800)] = max(((compute[18]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 101024)] = max(((compute[19]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 101248)] = max(((compute[20]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 101472)] = max(((compute[21]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 101696)] = max(((compute[22]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 101920)] = max(((compute[23]placeholder[((threadIdx.z4) + 2)]) + placeholder[((threadIdx.z4) + 2)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 150528)] = max(((compute[24]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 150752)] = max(((compute[25]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 150976)] = max(((compute[26]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 151200)] = max(((compute[27]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 151424)] = max(((compute[28]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 151648)] = max(((compute[29]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z12544))16) + threadIdx.x) + 151872)] = max(((compute[30]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
tensor[((((((blockIdx.y
112) + blockIdx.x) + (threadIdx.z*12544))16) + threadIdx.x) + 152096)] = max(((compute[31]placeholder[((threadIdx.z4) + 3)]) + placeholder[((threadIdx.z4) + 3)]), 0.000000f)
}
將cuDNN用于卷積層
可以使用cuDNN將卷積內核替換為cuDNN。將選項“ -libs = cudnn”附加到目標字符串。
net, params = testing.create_workload(simple_net)
target = “cuda -libs=cudnn” # use cudnn for convolution
lib = relay.build_module.build(net, target, params=params)

ctx = tvm.context(target, 0)
data = np.random.uniform(-1, 1, size=data_shape).astype(“float32”)
module = runtime.GraphModule(lib"default")
module.set_input(“data”, data)
module.run()
out_shape = (batch_size, out_channels, 224, 224)
out = module.get_output(0, tvm.nd.empty(out_shape))
out_cudnn = out.asnumpy()
如果使用cuDNN,則Relay無法將卷積與其后的圖層融合在一起。層融合發生在TVM內部表示(IR)級別。Relay將外部庫視為黑匣子,無法與TVM IR融合。
下面的偽代碼顯示cuDNN卷積+偏差加+批處理范數+ ReLU分為兩個計算階段,一個階段用于cuDNN調用,另一個階段用于其余操作。
// attr [y] storage_scope = “global”
allocate y[float32 * 802816]
produce y {
// attr [0] extern_scope = 0
tvm_call_packed(“tvm.contrib.cudnn.conv2d.forward”, 1, 0, 1, 1, 1, 1, 1, 1, 1, tvm_stack_make_array(placeholder, tvm_stack_make_shape(1, 3, 224, 224), 0, 4, 0.000000f, 0), tvm_stack_make_array(placeholder, tvm_stack_make_shape(16, 3, 3, 3), 0, 4, 0.000000f, 0), tvm_stack_make_array(y, tvm_stack_make_shape(1, 16, 224, 224), 0, 4, 0.000000f, 0))
}
produce tensor {
// attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 256
// attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 512
for (ax0.ax1.fused.ax2.fused.ax3.fused.outer, 0, 7) {
if (likely(((blockIdx.x512) < ((802816 - (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072)) - threadIdx.x)))) {
tensor[(((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/802816)802816) + (((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/224) % 224)224) + ((((blockIdx.x64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer32)) % 224))) + ((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/50176) % 16)50176))] = max(((y[(((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/802816)802816) + (((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/224) % 224)224) + ((((blockIdx.x64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer32)) % 224))) + ((((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/50176) % 16)50176))]placeholder[(((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer131072))/50176) % 16)]) + placeholder[(((((blockIdx.x512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)]), 0.000000f)
}
}
}
驗證結果
可以檢查兩次運行的結果是否匹配。
tvm.testing.assert_allclose(out_cuda, out_cudnn, rtol=1e-5)
結論
本文介紹了cuDNN與Relay的用法。也支持cuBLAS。如果啟用了cuBLAS,將在完全連接的層(relay.dense)中使用。要使用cuBLAS,將目標字符串設置為“ cuda -libs = cublas”。可以將cuDNN和cuBLAS與“ cuda -libs = cudnn,cublas”一起使用。
對于ROCm后端,支持MIOpen和rocBLAS。可以通過目標“ rocm -libs = miopen,rocblas”啟用。
能夠使用外部庫是很棒的,需要牢記一些注意事項。
首先,使用外部庫,可能會限制對TVM和Relay的使用。例如,MIOpen目前僅支持NCHW布局和fp32數據類型,不能在TVM中使用其他布局或數據類型。
其次,更重要的是,外部庫限制了在圖形編譯過程中算子融合的可能性,如上所述。TVM和Relay旨在通過聯合算子級別和圖形級別優化來在各種硬件上實現最佳性能。應該繼續為TVM和Relay開發更好的優化方法,在必要時使用外部庫作為回退到現有實現的一種好方法。

總結

以上是生活随笔為你收集整理的Relay外部库使用的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。