NN-512

Back

Index

Files

Top || Input graph file

Config Prefix=Example20 Platform=AVX512Float32 L1DataCachePerThread=32KiB L2CachePerThreadExL1=960KiB L3CachePerThreadExL1L2=1408KiB
Input ToTensor=in Channels=1317 Height=405 Width=259
Pooling FromTensor=in ToTensor=out Kind=Avg3x3Stride2 PaddingH=1 PaddingW=2
Output FromTensor=out

Top || Output Example20.h file

#pragma once

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <pthread.h>
#include <stddef.h>

#ifdef __cplusplus
extern "C" { /**/
#endif

// All weights, biases, and other trained parameters are passed into
// the initialization code through the Params struct that is declared
// just below this comment. The corresponding struct definition can be
// found near the end of this header file.
//
// Each field of the Params struct is an array of float that holds a
// parameter tensor in NCHW format with no padding. The struct fields
// are ordered by name, lexically bytewise. If you concatenate all the
// trained parameter tensors to a file in this same format and order
// you can load the struct as follows (error checking omitted here):
//
// size_t size = sizeof(Example20Params);
// Example20Params* to = malloc(size);
// FILE* from = fopen("ParamsFile", "r");
// fread(to, size, 1, from);
// fclose(from);
//
// Be careful to match endianness (and floating point format).

typedef struct Example20Params Example20Params;

// The Net contains weights, biases, and other trained parameters in a
// form that enables efficient inference. It is created from the input
// parameter struct without modifying that struct. The input parameter
// struct is no longer needed once the Net has been created. Threads
// that are used to create the Net are temporary (in particular, those
// threads are not used for inference).
//
// Example20Params* params = malloc(sizeof(Example20Params));
//
// ... Load params (read from a file, perhaps) ...
//
// Example20Net* net; // For example, 4 threads:
// char* err = Example20NetCreate(&net, params, 4);
// free(params);
//
// if (err) { // Nonzero err indicates failure; net is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Perform all inference that depends on net ...
//
// Example20NetDestroy(net);
//
// The Net can be shared and reused without restriction because it is
// never modified (not even temporarily) after being created. The Net
// should be destroyed (to free memory) once all dependent inference
// is complete.

typedef struct Example20Net Example20Net;

char* Example20NetCreate(
Example20Net**,
Example20Params*,
ptrdiff_t threads
);

void Example20NetDestroy(Example20Net*);

// An Engine performs inference. It contains inference threads, scratch
// memory, and a pointer to the Net. Any number of Engines can share the
// same Net (and perform inference in parallel) because the Net is never
// modified. For best performance the number of inference threads should
// not exceed the number of CPU cores.
//
// Example20Net* net;
//
// ... Create net ...
//
// Example20Engine* engine; // For example, 4 inference threads:
// char* err = Example20EngineCreate(&engine, net, 4);
//
// if (err) { // Nonzero err means failure; engine is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
//
// ... Destroy net ...
//
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Use the POSIX threads API to adjust engine's threads ...
// ... Use engine to perform inference (dependent on net) ...
//
// Example20EngineDestroy(engine); // Terminate threads, free memory.
//
// ... Destroy net ...
//
// The POSIX threads API can be used to adjust an Engine's threads. If
// an Engine has N threads, those threads are indexed 0, 1, 2, ..., N-1
// and a pthread_t identifier is associated with each index. To set the
// CPU affinity mask for the first inference thread, for example:
//
// pthread_t thread; // The first thread has index 0:
// char* err = Example20EnginePthreadT(engine, 0, &thread);
//
// assert(!err); // Can only fail if the thread index is invalid.
//
// pthread_setaffinity_np(thread, ...); // Details omitted.
//
// The inference function reads floats from (one or more) input tensors
// and writes floats to (one or more) output tensors. All the input and
// output tensors are owned (allocated and freed) by the caller and are
// in CHW format, 32-bit floating point, fully packed (in other words,
// C has the largest pitch, W has the smallest pitch, and there is no
// padding anywhere).
//
// float* inData = malloc(sizeof(float)*1317*405*259);
// float* outData = malloc(sizeof(float)*1317*203*131);
//
// for (...) { // Reuse the input and output tensors.
//
// ... Write the input floats ...
//
// Example20EngineInference( // This function cannot fail.
// engine, // Pass an Engine as the first argument.
// inData, // The tensor arguments are sorted by name.
// outData
// );
//
// ... Read the output floats ...
//
// }
//
// free(inData);
// free(outData);
//
// The tensor parameters of the inference function are ordered by name,
// lexically bytewise. In other words, the function parameters have been
// sorted by name using Go's "<" string comparison operator (a bytewise
// lexical string sort).

typedef struct Example20Engine Example20Engine;

char* Example20EngineCreate(
Example20Engine**,
Example20Net*,
ptrdiff_t threads
);

char* Example20EnginePthreadT(
Example20Engine*,
ptrdiff_t threadIdx,
pthread_t* to
);

void Example20EngineInference(
Example20Engine*,
float* inData,
float* outData
);

void Example20EngineDestroy(Example20Engine*);

// The fields of the following struct have been sorted by name using
// Go's "<" string comparison operator (bytewise lexical string sort).
// Tensor dimensions are NxCxHxW where N is the outermost/slowest and
// W is the innermost/fastest. There is no padding anywhere.

struct Example20Params {
} __attribute__((packed));

#ifdef __cplusplus
/**/ }
#endif

// End of file.

Top || Output Example20.c file

// To build an object file:
// gcc -c -w -std=c99 -pthread -Ofast -mavx512f Example20.c

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <errno.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <immintrin.h>

#include "Example20.h"

static char* Example20Errmsg1(ptrdiff_t lineNum1, char* format1, ...) {
char* msg1 = malloc(277);
int step1 = sprintf(msg1, "Example20: line %td: ", lineNum1);
va_list ap1;
va_start(ap1, format1);
vsnprintf(msg1+step1, 277-step1, format1, ap1);
va_end(ap1);
return msg1;
}

typedef struct Example20ThreaderTask1 Example20ThreaderTask1;
typedef void (*Example20ThreaderCallee1)(Example20ThreaderTask1*, int64_t*);
typedef struct Example20ThreaderHub1 Example20ThreaderHub1;
typedef struct Example20ThreaderNode1 Example20ThreaderNode1;
typedef struct Example20ThreaderUnwind1 Example20ThreaderUnwind1;
typedef struct Example20ThreaderTeam1 Example20ThreaderTeam1;

struct Example20ThreaderTask1 {
Example20ThreaderCallee1 callee1;
void* any1;
ptrdiff_t nd1;
int64_t hull1[4];
};

struct Example20ThreaderHub1 {
pthread_mutex_t mut1;
pthread_cond_t cond1;
ptrdiff_t pending1;
ptrdiff_t offset1;
long mask1;
long status1[];
};

struct Example20ThreaderNode1 {
pthread_mutex_t mut2;
int64_t np1;
int64_t pt1[4];
Example20ThreaderTask1* task1;
pthread_cond_t cond2;
Example20ThreaderTeam1* team1;
pthread_t thr1;
} __attribute__((aligned(64)));

struct Example20ThreaderUnwind1 {
ptrdiff_t join1;
ptrdiff_t nodeConds1;
ptrdiff_t nodeMuts1;
ptrdiff_t hubCond1;
ptrdiff_t hubMut1;
void* nodes1;
void* hub1;
};

struct Example20ThreaderTeam1 {
ptrdiff_t nt1;
Example20ThreaderHub1* hub2;
Example20ThreaderNode1* nodes2;
Example20ThreaderUnwind1 unwind1;
};

static void Example20ThreaderInc1(
ptrdiff_t nd2,
int64_t*restrict hull2,
int64_t*restrict pt2
) {
for (ptrdiff_t i1 = 0; i1 < nd2; ++i1) {
int64_t elem1 = pt2[i1];
if (++elem1 == hull2[i1]) {
pt2[i1] = 0;
} else {
pt2[i1] = elem1;
break;
}
}
}

static void Example20ThreaderPut1(
ptrdiff_t nd3,
int64_t*restrict hull3,
int64_t*restrict pt3,
int64_t val1
) {
ptrdiff_t i2 = 0;
for (; i2 < nd3 && val1; ) {
int64_t wrap1 = hull3[i2];
int64_t carry1 = val1/wrap1;
pt3[i2++] = val1-carry1*wrap1;
val1 = carry1;
}
for (; i2 < nd3; pt3[i2++] = 0);
}

static void Example20ThreaderAdd1(
ptrdiff_t nd4,
int64_t*restrict hull4,
int64_t*restrict pt4,
int64_t*restrict plus1,
int64_t carry2
) {
for (ptrdiff_t i3 = 0; i3 < nd4; ++i3) {
int64_t wrap2 = hull4[i3];
int64_t sum1 = pt4[i3]+plus1[i3]+carry2;
if (sum1 < wrap2) {
pt4[i3] = sum1;
carry2 = 0;
} else {
pt4[i3] = sum1-wrap2;
carry2 = 1;
}
}
}

static void* Example20ThreaderMain1(void* arg1) {
Example20ThreaderNode1* node1 = arg1;
Example20ThreaderTeam1* team2 = node1->team1;
ptrdiff_t nt2 = team2->nt1;
Example20ThreaderHub1* hub3 = team2->hub2;
Example20ThreaderNode1* nodes3 = team2->nodes2;
size_t role1 = node1-nodes3;
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
for (; ; ) {
Example20ThreaderTask1* task2 = node1->task1;
if (!task2) {
for (; __builtin_expect(pthread_cond_wait(&node1->cond2, &node1->mut2), 0); );
continue;
}
int64_t np2 = node1->np1;
if (np2 < 0) {
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
return 0;
}
node1->task1 = 0;
Example20ThreaderCallee1 callee2 = task2->callee1;
ptrdiff_t nd5 = task2->nd1;
int64_t pt5[4];
for (; np2; np2 = node1->np1) {
memcpy(pt5, node1->pt1, sizeof(pt5));
node1->np1 = np2-1;
Example20ThreaderInc1(nd5, task2->hull1, node1->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[role1/(sizeof(long)*8)] &= ~((long)1<<role1%(sizeof(long)*8));
ptrdiff_t offset2 = hub3->offset1;
long mask2 = hub3->mask1;
ptrdiff_t wrapped1 = 0;
for (; ; ) {
long hand1 = hub3->status1[offset2]&mask2;
if (!hand1) {
++offset2;
mask2 = -1;
continue;
}
ptrdiff_t target1 = offset2*(sizeof(long)*8)+__builtin_ctzl(hand1);
if (target1 == nt2) {
if (wrapped1) break;
offset2 = 0;
mask2 = -1;
wrapped1 = 1;
continue;
}
hand1 &= -hand1;
hub3->offset1 = offset2;
hub3->mask1 = mask2-hand1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
Example20ThreaderNode1* node2 = nodes3+target1;
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
for (np2 = node2->np1; np2; np2 = node2->np1) {
memcpy(pt5, node2->pt1, sizeof(pt5));
node2->np1 = np2-1;
Example20ThreaderInc1(nd5, task2->hull1, node2->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[offset2] &= ~hand1;
offset2 = hub3->offset1;
mask2 = hub3->mask1;
wrapped1 = 0;
}
ptrdiff_t pending2 = --hub3->pending1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
if (!pending2) for (; __builtin_expect(pthread_cond_signal(&hub3->cond1), 0); );
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
}

static void Example20ThreaderDestroy1(Example20ThreaderTeam1* team3) {
if (!team3) return;
Example20ThreaderNode1* nodes4 = team3->nodes2;
Example20ThreaderNode1* stop1 = nodes4+team3->unwind1.join1;
for (Example20ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_lock(&node3->mut2), 0); );
node3->np1 = -1;
node3->task1 = (Example20ThreaderTask1*)1;
for (; __builtin_expect(pthread_mutex_unlock(&node3->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node3->cond2), 0); );
}
for (Example20ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_join(node3->thr1, 0), 0); );
}
stop1 = nodes4+team3->unwind1.nodeConds1;
for (Example20ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_cond_destroy(&node3->cond2), 0); );
}
stop1 = nodes4+team3->unwind1.nodeMuts1;
for (Example20ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_destroy(&node3->mut2), 0); );
}
Example20ThreaderHub1* hub4 = team3->hub2;
if (team3->unwind1.hubCond1) {
for (; __builtin_expect(pthread_cond_destroy(&hub4->cond1), 0); );
}
if (team3->unwind1.hubMut1) {
for (; __builtin_expect(pthread_mutex_destroy(&hub4->mut1), 0); );
}
free(team3->unwind1.nodes1);
free(team3->unwind1.hub1);
free(team3);
}

static char* Example20ThreaderCreate1Up4(Example20ThreaderTeam1* team8, ptrdiff_t nt7) {
Example20ThreaderNode1* nodes5 = team8->nodes2;
for (Example20ThreaderNode1* node4 = nodes5; node4 != nodes5+nt7; ++node4) {
int err2 = pthread_mutex_init(&node4->mut2, 0);
if (__builtin_expect(err2, 0)) {
char* msg2 = Example20Errmsg1(__LINE__, "errno %d", err2);
team8->unwind1.nodeMuts1 = node4-nodes5;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg2;
}
node4->task1 = 0;
int err3 = pthread_cond_init(&node4->cond2, 0);
if (__builtin_expect(err3, 0)) {
char* msg3 = Example20Errmsg1(__LINE__, "errno %d", err3);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg3;
}
node4->team1 = team8;
int err4 = pthread_create(&node4->thr1, 0, Example20ThreaderMain1, node4);
if (__builtin_expect(err4, 0)) {
char* msg4 = Example20Errmsg1(__LINE__, "errno %d", err4);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5+1;
team8->unwind1.join1 = node4-nodes5;
return msg4;
}
}
team8->unwind1.nodeMuts1 = nt7;
team8->unwind1.nodeConds1 = nt7;
team8->unwind1.join1 = nt7;
return 0;
}

static char* Example20ThreaderCreate1Up3(Example20ThreaderTeam1* team7, ptrdiff_t nt6) {
Example20ThreaderHub1* hub5 = team7->hub2;
int err5 = pthread_mutex_init(&hub5->mut1, 0);
if (__builtin_expect(err5, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", err5);
}
team7->unwind1.hubMut1 = 1;
int err6 = pthread_cond_init(&hub5->cond1, 0);
if (__builtin_expect(err6, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", err6);
}
team7->unwind1.hubCond1 = 1;
return Example20ThreaderCreate1Up4(team7, nt6);
}

static char* Example20ThreaderCreate1Up2(Example20ThreaderTeam1* team6, ptrdiff_t nt5) {
size_t size2 = nt5*sizeof(Example20ThreaderNode1);
if (__builtin_expect(size2/sizeof(Example20ThreaderNode1) != (size_t)nt5, 0)) {
return Example20Errmsg1(__LINE__, "too many threads");
}
void* addr3 = malloc(size2+63);
if (__builtin_expect(!addr3, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", errno);
}
team6->unwind1.nodes1 = addr3;
team6->nodes2 = (void*)(((size_t)addr3+63)&-64);
return Example20ThreaderCreate1Up3(team6, nt5);
}

static char* Example20ThreaderCreate1Up1(Example20ThreaderTeam1* team5, ptrdiff_t nt4) {
team5->nt1 = nt4;
size_t size1 = sizeof(Example20ThreaderHub1);
size1 += sizeof(long)*((size_t)nt4/(sizeof(long)*8)+1);
size1 = (size1+63)&-64;
void* addr2 = malloc(size1+63);
if (__builtin_expect(!addr2, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", errno);
}
team5->unwind1.hub1 = addr2;
team5->hub2 = (void*)(((size_t)addr2+63)&-64);
return Example20ThreaderCreate1Up2(team5, nt4);
}

static char* Example20ThreaderCreate1(Example20ThreaderTeam1** team4, ptrdiff_t nt3) {
if (__builtin_expect(nt3 < 1, 0)) {
return Example20Errmsg1(__LINE__, "too few threads");
}
void* addr1 = calloc(1, sizeof(Example20ThreaderTeam1));
if (__builtin_expect(!addr1, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", errno);
}
char* err1 = Example20ThreaderCreate1Up1(addr1, nt3);
if (__builtin_expect(!!err1, 0)) {
Example20ThreaderDestroy1(addr1);
} else {
*team4 = addr1;
}
return err1;
}

static char* Example20ThreaderPthreadT1(
pthread_t* thr2,
Example20ThreaderTeam1* team9,
ptrdiff_t idx1
) {
if (__builtin_expect(idx1 < 0 || idx1 >= team9->nt1, 0)) {
return Example20Errmsg1(__LINE__, "bad thread idx");
}
*thr2 = team9->nodes2[idx1].thr1;
return 0;
}

static void Example20ThreaderDo1(Example20ThreaderTeam1* team10, Example20ThreaderTask1* task3) {
ptrdiff_t nd6 = task3->nd1;
if (nd6 < 1) return;
int64_t tot1 = task3->hull1[0];
for (ptrdiff_t i4 = 1; i4 < nd6; tot1 *= task3->hull1[i4++]);
ptrdiff_t nt8 = team10->nt1;
int64_t each1 = tot1/nt8;
ptrdiff_t more1 = tot1%nt8;
int64_t plus2[4];
Example20ThreaderPut1(nd6, task3->hull1, plus2, each1);
int64_t pt6[4] = {0};
Example20ThreaderHub1* hub6 = team10->hub2;
for (; __builtin_expect(pthread_mutex_lock(&hub6->mut1), 0); );
Example20ThreaderNode1* node5 = team10->nodes2;
for (ptrdiff_t i4 = 0; ; ++node5) {
for (; __builtin_expect(pthread_mutex_lock(&node5->mut2), 0); );
int64_t carry3 = i4 < more1;
node5->np1 = each1+carry3;
memcpy(node5->pt1, pt6, sizeof(pt6));
node5->task1 = task3;
for (; __builtin_expect(pthread_mutex_unlock(&node5->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node5->cond2), 0); );
if (++i4 == nt8) break;
Example20ThreaderAdd1(nd6, task3->hull1, pt6, plus2, carry3);
}
hub6->offset1 = 0;
hub6->mask1 = -1;
for (ptrdiff_t i4 = (size_t)nt8/(sizeof(long)*8); i4 >= 0; ) {
hub6->status1[i4--] = -1;
}
for (hub6->pending1 = nt8; hub6->pending1; ) {
for (; __builtin_expect(pthread_cond_wait(&hub6->cond1, &hub6->mut1), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&hub6->mut1), 0); );
}

static __m512 Example20Exp1(__m512 x1) {
x1 = _mm512_max_ps(x1, _mm512_set1_ps(-8.733654e+01f));
x1 = _mm512_min_ps(x1, _mm512_set1_ps(8.872284e+01f));
__m512 t1 = _mm512_mul_ps(x1, _mm512_set1_ps(1.442695e+00f));
__m512 r1 = _mm512_roundscale_ps(t1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512 f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-6.9314575e-01f), x1);
f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-1.4286068e-06f), f1);
__m512 g1 = _mm512_set1_ps(4.194439e-02f);
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(1.6800667e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(4.9999994e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.999569e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.9999964e-01f));
__m512i y1 = _mm512_slli_epi32(_mm512_cvtps_epi32(t1), 23);
return _mm512_castsi512_ps(_mm512_add_epi32(y1, _mm512_castps_si512(g1)));
}

static __m512 Example20Rsqrt1(__m512 x2) {
__m512 y2 = _mm512_rsqrt14_ps(x2);
__m512 z1 = _mm512_mul_ps(x2, y2);
__m512 a1 = _mm512_mul_ps(y2, _mm512_set1_ps(5e-01f));
__m512 b1 = _mm512_fnmadd_ps(y2, z1, _mm512_set1_ps(3e+00f));
return _mm512_mul_ps(a1, b1);
}

static void Example20Thrpl1Callee1(Example20ThreaderTask1* task4, int64_t* pt7) {
char** tensors2 = task4->any1;
ptrdiff_t b2 = pt7[0];
ptrdiff_t e1 = pt7[1];
ptrdiff_t c1 = pt7[2];
char*restrict ptr1 = tensors2[0]-(ptrdiff_t)1036+(ptrdiff_t)31080*b2+(ptrdiff_t)1036*e1+(ptrdiff_t)419580*c1;
char*restrict ptr2 = tensors2[1]+(ptrdiff_t)7860*b2+(ptrdiff_t)518*e1+(ptrdiff_t)106372*c1;
if (b2 < 12) {
if (!b2) {
for (ptrdiff_t i5 = 0; i5 < 1; ++i5) {
__m512 in1 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*0);
__m512 in2 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*0);
__m512 dat1 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*0);
__m512 dat2 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*0);
in1 = _mm512_add_ps(in1, dat1);
in2 = _mm512_add_ps(in2, dat2);
__m512i pm1 = _mm512_set_epi32(28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 30);
__m512i pm2 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm3 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out1 = _mm512_permutex2var_ps(in1, pm1, in2);
__m512 pack1 = _mm512_permutex2var_ps(in1, pm2, in2);
__m512 pack2 = _mm512_permutex2var_ps(in1, pm3, in2);
out1 = _mm512_mask_mov_ps(out1, 1, pack1);
out1 = _mm512_mask_add_ps(out1, 65534, out1, pack1);
out1 = _mm512_mask_add_ps(out1, 65534, out1, pack2);
__m512 rcp1 = _mm512_set_ps(1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 5e-01f);
out1 = _mm512_mul_ps(out1, rcp1);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*0+(ptrdiff_t)64*0, 65535, out1);
for (ptrdiff_t k1 = 1; k1 < 8; ++k1) {
__m512 in3 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*k1);
__m512 in4 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*k1);
__m512 dat3 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*k1);
__m512 dat4 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*k1);
in3 = _mm512_add_ps(in3, dat3);
in4 = _mm512_add_ps(in4, dat4);
__m512 blend1 = _mm512_mask_mov_ps(in4, 49152, in2);
__m512 out2 = _mm512_permutex2var_ps(in3, pm1, blend1);
__m512 pack3 = _mm512_permutex2var_ps(in3, pm2, in4);
__m512 pack4 = _mm512_permutex2var_ps(in3, pm3, blend1);
out2 = _mm512_mask_add_ps(out2, 65535, out2, pack3);
out2 = _mm512_mask_add_ps(out2, 65535, out2, pack4);
in2 = in4;
out2 = _mm512_mul_ps(out2, _mm512_set1_ps(1.6666667e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*0+(ptrdiff_t)64*k1, 65535, out2);
}
__m512 in5 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*8);
__m512 dat5 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*0+(ptrdiff_t)128*8);
in5 = _mm512_add_ps(in5, dat5);
__m512 out3 = _mm512_permutex2var_ps(in5, pm1, in2);
__m512 pack5 = _mm512_shuffle_ps(in5, in5, 8);
__m512 pack6 = _mm512_permutex2var_ps(in5, pm3, in2);
out3 = _mm512_mask_add_ps(out3, 3, out3, pack5);
out3 = _mm512_mask_add_ps(out3, 3, out3, pack6);
__m512 rcp2 = _mm512_set_ps(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5e-01f, 1.6666667e-01f, 1.6666667e-01f);
out3 = _mm512_mul_ps(out3, rcp2);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*0+(ptrdiff_t)64*8, 7, out3);
for (ptrdiff_t j1 = 1; j1 < 15; ++j1) {
__m512 in6 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
__m512 in7 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
__m512 dat6 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
__m512 dat8 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
__m512 dat7 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
__m512 dat9 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*0);
in6 = _mm512_add_ps(in6, dat6);
in7 = _mm512_add_ps(in7, dat8);
in6 = _mm512_add_ps(in6, dat7);
in7 = _mm512_add_ps(in7, dat9);
__m512i pm4 = _mm512_set_epi32(28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 30);
__m512i pm5 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm6 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out4 = _mm512_permutex2var_ps(in6, pm4, in7);
__m512 pack7 = _mm512_permutex2var_ps(in6, pm5, in7);
__m512 pack8 = _mm512_permutex2var_ps(in6, pm6, in7);
out4 = _mm512_mask_mov_ps(out4, 1, pack7);
out4 = _mm512_mask_add_ps(out4, 65534, out4, pack7);
out4 = _mm512_mask_add_ps(out4, 65534, out4, pack8);
__m512 rcp3 = _mm512_set_ps(1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 3.3333334e-01f);
out4 = _mm512_mul_ps(out4, rcp3);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*j1+(ptrdiff_t)64*0, 65535, out4);
for (ptrdiff_t k2 = 1; k2 < 8; ++k2) {
__m512 in8 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
__m512 in9 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
__m512 dat10 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
__m512 dat12 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
__m512 dat11 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
__m512 dat13 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*k2);
in8 = _mm512_add_ps(in8, dat10);
in9 = _mm512_add_ps(in9, dat12);
in8 = _mm512_add_ps(in8, dat11);
in9 = _mm512_add_ps(in9, dat13);
__m512 blend2 = _mm512_mask_mov_ps(in9, 49152, in7);
__m512 out5 = _mm512_permutex2var_ps(in8, pm4, blend2);
__m512 pack9 = _mm512_permutex2var_ps(in8, pm5, in9);
__m512 pack10 = _mm512_permutex2var_ps(in8, pm6, blend2);
out5 = _mm512_mask_add_ps(out5, 65535, out5, pack9);
out5 = _mm512_mask_add_ps(out5, 65535, out5, pack10);
in7 = in9;
out5 = _mm512_mul_ps(out5, _mm512_set1_ps(1.1111111e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*j1+(ptrdiff_t)64*k2, 65535, out5);
}
__m512 in10 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*8);
__m512 dat14 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*8);
__m512 dat15 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i5+(ptrdiff_t)2072*j1+(ptrdiff_t)128*8);
in10 = _mm512_add_ps(in10, dat14);
in10 = _mm512_add_ps(in10, dat15);
__m512 out6 = _mm512_permutex2var_ps(in10, pm4, in7);
__m512 pack11 = _mm512_shuffle_ps(in10, in10, 8);
__m512 pack12 = _mm512_permutex2var_ps(in10, pm6, in7);
out6 = _mm512_mask_add_ps(out6, 3, out6, pack11);
out6 = _mm512_mask_add_ps(out6, 3, out6, pack12);
__m512 rcp4 = _mm512_set_ps(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.3333334e-01f, 1.1111111e-01f, 1.1111111e-01f);
out6 = _mm512_mul_ps(out6, rcp4);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i5+(ptrdiff_t)524*j1+(ptrdiff_t)64*8, 7, out6);
}
}
return;
}
for (ptrdiff_t i6 = 0; i6 < 1; ++i6) {
for (ptrdiff_t j2 = 0; j2 < 15; ++j2) {
__m512 in11 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
__m512 in12 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
__m512 dat16 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
__m512 dat18 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
__m512 dat17 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
__m512 dat19 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*0);
in11 = _mm512_add_ps(in11, dat16);
in12 = _mm512_add_ps(in12, dat18);
in11 = _mm512_add_ps(in11, dat17);
in12 = _mm512_add_ps(in12, dat19);
__m512i pm7 = _mm512_set_epi32(28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 30);
__m512i pm8 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm9 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out7 = _mm512_permutex2var_ps(in11, pm7, in12);
__m512 pack13 = _mm512_permutex2var_ps(in11, pm8, in12);
__m512 pack14 = _mm512_permutex2var_ps(in11, pm9, in12);
out7 = _mm512_mask_mov_ps(out7, 1, pack13);
out7 = _mm512_mask_add_ps(out7, 65534, out7, pack13);
out7 = _mm512_mask_add_ps(out7, 65534, out7, pack14);
__m512 rcp5 = _mm512_set_ps(1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 3.3333334e-01f);
out7 = _mm512_mul_ps(out7, rcp5);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i6+(ptrdiff_t)524*j2+(ptrdiff_t)64*0, 65535, out7);
for (ptrdiff_t k3 = 1; k3 < 8; ++k3) {
__m512 in13 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
__m512 in14 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
__m512 dat20 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
__m512 dat22 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
__m512 dat21 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
__m512 dat23 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*k3);
in13 = _mm512_add_ps(in13, dat20);
in14 = _mm512_add_ps(in14, dat22);
in13 = _mm512_add_ps(in13, dat21);
in14 = _mm512_add_ps(in14, dat23);
__m512 blend3 = _mm512_mask_mov_ps(in14, 49152, in12);
__m512 out8 = _mm512_permutex2var_ps(in13, pm7, blend3);
__m512 pack15 = _mm512_permutex2var_ps(in13, pm8, in14);
__m512 pack16 = _mm512_permutex2var_ps(in13, pm9, blend3);
out8 = _mm512_mask_add_ps(out8, 65535, out8, pack15);
out8 = _mm512_mask_add_ps(out8, 65535, out8, pack16);
in12 = in14;
out8 = _mm512_mul_ps(out8, _mm512_set1_ps(1.1111111e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i6+(ptrdiff_t)524*j2+(ptrdiff_t)64*k3, 65535, out8);
}
__m512 in15 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*8);
__m512 dat24 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*8);
__m512 dat25 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i6+(ptrdiff_t)2072*j2+(ptrdiff_t)128*8);
in15 = _mm512_add_ps(in15, dat24);
in15 = _mm512_add_ps(in15, dat25);
__m512 out9 = _mm512_permutex2var_ps(in15, pm7, in12);
__m512 pack17 = _mm512_shuffle_ps(in15, in15, 8);
__m512 pack18 = _mm512_permutex2var_ps(in15, pm9, in12);
out9 = _mm512_mask_add_ps(out9, 3, out9, pack17);
out9 = _mm512_mask_add_ps(out9, 3, out9, pack18);
__m512 rcp6 = _mm512_set_ps(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.3333334e-01f, 1.1111111e-01f, 1.1111111e-01f);
out9 = _mm512_mul_ps(out9, rcp6);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i6+(ptrdiff_t)524*j2+(ptrdiff_t)64*8, 7, out9);
}
}
return;
}
for (ptrdiff_t i7 = 0; i7 < 1; ++i7) {
for (ptrdiff_t j3 = 0; j3 < 22; ++j3) {
__m512 in16 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
__m512 in17 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
__m512 dat26 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
__m512 dat28 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
__m512 dat27 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
__m512 dat29 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*0);
in16 = _mm512_add_ps(in16, dat26);
in17 = _mm512_add_ps(in17, dat28);
in16 = _mm512_add_ps(in16, dat27);
in17 = _mm512_add_ps(in17, dat29);
__m512i pm10 = _mm512_set_epi32(28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 30);
__m512i pm11 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm12 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out10 = _mm512_permutex2var_ps(in16, pm10, in17);
__m512 pack19 = _mm512_permutex2var_ps(in16, pm11, in17);
__m512 pack20 = _mm512_permutex2var_ps(in16, pm12, in17);
out10 = _mm512_mask_mov_ps(out10, 1, pack19);
out10 = _mm512_mask_add_ps(out10, 65534, out10, pack19);
out10 = _mm512_mask_add_ps(out10, 65534, out10, pack20);
__m512 rcp7 = _mm512_set_ps(1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 1.1111111e-01f, 3.3333334e-01f);
out10 = _mm512_mul_ps(out10, rcp7);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*j3+(ptrdiff_t)64*0, 65535, out10);
for (ptrdiff_t k4 = 1; k4 < 8; ++k4) {
__m512 in18 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
__m512 in19 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
__m512 dat30 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
__m512 dat32 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
__m512 dat31 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
__m512 dat33 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)2136+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*k4);
in18 = _mm512_add_ps(in18, dat30);
in19 = _mm512_add_ps(in19, dat32);
in18 = _mm512_add_ps(in18, dat31);
in19 = _mm512_add_ps(in19, dat33);
__m512 blend4 = _mm512_mask_mov_ps(in19, 49152, in17);
__m512 out11 = _mm512_permutex2var_ps(in18, pm10, blend4);
__m512 pack21 = _mm512_permutex2var_ps(in18, pm11, in19);
__m512 pack22 = _mm512_permutex2var_ps(in18, pm12, blend4);
out11 = _mm512_mask_add_ps(out11, 65535, out11, pack21);
out11 = _mm512_mask_add_ps(out11, 65535, out11, pack22);
in17 = in19;
out11 = _mm512_mul_ps(out11, _mm512_set1_ps(1.1111111e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*j3+(ptrdiff_t)64*k4, 65535, out11);
}
__m512 in20 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*8);
__m512 dat34 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*8);
__m512 dat35 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)2072+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*j3+(ptrdiff_t)128*8);
in20 = _mm512_add_ps(in20, dat34);
in20 = _mm512_add_ps(in20, dat35);
__m512 out12 = _mm512_permutex2var_ps(in20, pm10, in17);
__m512 pack23 = _mm512_shuffle_ps(in20, in20, 8);
__m512 pack24 = _mm512_permutex2var_ps(in20, pm12, in17);
out12 = _mm512_mask_add_ps(out12, 3, out12, pack23);
out12 = _mm512_mask_add_ps(out12, 3, out12, pack24);
__m512 rcp8 = _mm512_set_ps(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.3333334e-01f, 1.1111111e-01f, 1.1111111e-01f);
out12 = _mm512_mul_ps(out12, rcp8);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*j3+(ptrdiff_t)64*8, 7, out12);
}
__m512 in21 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*0);
__m512 in22 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*0);
__m512 dat36 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*0);
__m512 dat37 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*0);
in21 = _mm512_add_ps(in21, dat36);
in22 = _mm512_add_ps(in22, dat37);
__m512i pm13 = _mm512_set_epi32(28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 30);
__m512i pm14 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm15 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out13 = _mm512_permutex2var_ps(in21, pm13, in22);
__m512 pack25 = _mm512_permutex2var_ps(in21, pm14, in22);
__m512 pack26 = _mm512_permutex2var_ps(in21, pm15, in22);
out13 = _mm512_mask_mov_ps(out13, 1, pack25);
out13 = _mm512_mask_add_ps(out13, 65534, out13, pack25);
out13 = _mm512_mask_add_ps(out13, 65534, out13, pack26);
__m512 rcp9 = _mm512_set_ps(1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 1.6666667e-01f, 5e-01f);
out13 = _mm512_mul_ps(out13, rcp9);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*22+(ptrdiff_t)64*0, 65535, out13);
for (ptrdiff_t k5 = 1; k5 < 8; ++k5) {
__m512 in23 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*k5);
__m512 in24 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*k5);
__m512 dat38 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*k5);
__m512 dat39 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)1100+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*k5);
in23 = _mm512_add_ps(in23, dat38);
in24 = _mm512_add_ps(in24, dat39);
__m512 blend5 = _mm512_mask_mov_ps(in24, 49152, in22);
__m512 out14 = _mm512_permutex2var_ps(in23, pm13, blend5);
__m512 pack27 = _mm512_permutex2var_ps(in23, pm14, in24);
__m512 pack28 = _mm512_permutex2var_ps(in23, pm15, blend5);
out14 = _mm512_mask_add_ps(out14, 65535, out14, pack27);
out14 = _mm512_mask_add_ps(out14, 65535, out14, pack28);
in22 = in24;
out14 = _mm512_mul_ps(out14, _mm512_set1_ps(1.6666667e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*22+(ptrdiff_t)64*k5, 65535, out14);
}
__m512 in25 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)0+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*8);
__m512 dat40 = _mm512_maskz_loadu_ps(7, ptr1+(ptrdiff_t)1036+(ptrdiff_t)419580*i7+(ptrdiff_t)2072*22+(ptrdiff_t)128*8);
in25 = _mm512_add_ps(in25, dat40);
__m512 out15 = _mm512_permutex2var_ps(in25, pm13, in22);
__m512 pack29 = _mm512_shuffle_ps(in25, in25, 8);
__m512 pack30 = _mm512_permutex2var_ps(in25, pm15, in22);
out15 = _mm512_mask_add_ps(out15, 3, out15, pack29);
out15 = _mm512_mask_add_ps(out15, 3, out15, pack30);
__m512 rcp10 = _mm512_set_ps(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5e-01f, 1.6666667e-01f, 1.6666667e-01f);
out15 = _mm512_mul_ps(out15, rcp10);
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)106372*i7+(ptrdiff_t)524*22+(ptrdiff_t)64*8, 7, out15);
}
}

static void Example20Thrpl1(Example20ThreaderTeam1* team13, char** tensors1) {
Example20ThreaderTask1 task5;
task5.callee1 = Example20Thrpl1Callee1;
task5.any1 = tensors1;
task5.nd1 = 3;
task5.hull1[0] = 13;
task5.hull1[1] = 1;
task5.hull1[2] = 1317;
Example20ThreaderDo1(team13, &task5);
}

struct Example20Net {
char* alloc1;
char* align1;
};

void Example20NetDestroy(Example20Net* net2) {
free(net2->alloc1);
free(net2);
}

char* Example20NetCreate(
Example20Net** net1,
Example20Params* params1,
ptrdiff_t threads1
) {
(void)params1;
(void)threads1;
if (__builtin_expect(!__builtin_cpu_supports("avx512f"), 0)) {
return Example20Errmsg1(__LINE__, "CPU does not support AVX512F");
}
Example20Net* net5 = malloc(sizeof(Example20Net));
if (__builtin_expect(!net5, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", errno);
}
net5->alloc1 = 0;
net5->align1 = 0;
*net1 = net5;
return 0;
}

struct Example20Engine {
Example20Net* net3;
Example20ThreaderTeam1* team11;
char* alloc2;
char* align2;
};

char* Example20EnginePthreadT(
Example20Engine* eng2,
ptrdiff_t idx2,
pthread_t* to1
) {
return Example20ThreaderPthreadT1(to1, eng2->team11, idx2);
}

void Example20EngineDestroy(Example20Engine* eng3) {
Example20ThreaderDestroy1(eng3->team11);
free(eng3->alloc2);
free(eng3);
}

char* Example20EngineCreate(
Example20Engine** eng4,
Example20Net* net4,
ptrdiff_t threads2
) {
Example20Engine* eng5 = malloc(sizeof(Example20Engine));
if (__builtin_expect(!eng5, 0)) {
return Example20Errmsg1(__LINE__, "errno %d", errno);
}
char* alloc3 = malloc(63);
if (__builtin_expect(!alloc3, 0)) {
char* msg5 = Example20Errmsg1(__LINE__, "errno %d", errno);
free(eng5);
return msg5;
}
eng5->alloc2 = alloc3;
eng5->align2 = (void*)(((size_t)alloc3+63)&-64);
char* err7 = Example20ThreaderCreate1(&eng5->team11, threads2);
if (__builtin_expect(!!err7, 0)) {
free(eng5);
free(alloc3);
return err7;
}
eng5->net3 = net4;
*eng4 = eng5;
return 0;
}

void Example20EngineInference(
Example20Engine* eng1,
float* inData,
float* outData
) {
Example20ThreaderTeam1* team12 = eng1->team11;
{
char* tensors3[] = {
(char*)inData,
(char*)outData
};
Example20Thrpl1(team12, tensors3);
}
}

// End of file.

Top