NN-512

Back

Index

Files

Top || Input graph file

Config Prefix=Example31 Platform=AVX512Float32 L1DataCachePerThread=32KiB L2CachePerThreadExL1=960KiB L3CachePerThreadExL1L2=1408KiB
Input ToTensor=in1 Channels=38 Height=6 Width=21
Input ToTensor=in2 Channels=38 Height=6 Width=21
Input ToTensor=in3 Channels=38 Height=3 Width=11
BatchNorm FromTensor=in1 ToTensor=bn1 Epsilon=0.00001
Activation FromTensor=bn1 ToTensor=act1 Kind=ReLU Param=0
Add FromTensor1=act1 FromTensor2=in2 ToTensor=add1
BatchNorm FromTensor=add1 ToTensor=bn2 Epsilon=0.00001
Pooling FromTensor=bn2 ToTensor=pool Kind=Max3x3Stride2 PaddingH=1 PaddingW=1
BatchNorm FromTensor=pool ToTensor=bn3 Epsilon=0.00001
Activation FromTensor=bn3 ToTensor=act2 Kind=ReLU Param=0
Add FromTensor1=act2 FromTensor2=in3 ToTensor=add2
BatchNorm FromTensor=add2 ToTensor=bn4 Epsilon=0.00001
Output FromTensor=bn4

Top || Output Example31.h file

#pragma once

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <pthread.h>
#include <stddef.h>

#ifdef __cplusplus
extern "C" { /**/
#endif

// All weights, biases, and other trained parameters are passed into
// the initialization code through the Params struct that is declared
// just below this comment. The corresponding struct definition can be
// found near the end of this header file.
//
// Each field of the Params struct is an array of float that holds a
// parameter tensor in NCHW format with no padding. The struct fields
// are ordered by name, lexically bytewise. If you concatenate all the
// trained parameter tensors to a file in this same format and order
// you can load the struct as follows (error checking omitted here):
//
// size_t size = sizeof(Example31Params);
// Example31Params* to = malloc(size);
// FILE* from = fopen("ParamsFile", "r");
// fread(to, size, 1, from);
// fclose(from);
//
// Be careful to match endianness (and floating point format).

typedef struct Example31Params Example31Params;

// The Net contains weights, biases, and other trained parameters in a
// form that enables efficient inference. It is created from the input
// parameter struct without modifying that struct. The input parameter
// struct is no longer needed once the Net has been created. Threads
// that are used to create the Net are temporary (in particular, those
// threads are not used for inference).
//
// Example31Params* params = malloc(sizeof(Example31Params));
//
// ... Load params (read from a file, perhaps) ...
//
// Example31Net* net; // For example, 4 threads:
// char* err = Example31NetCreate(&net, params, 4);
// free(params);
//
// if (err) { // Nonzero err indicates failure; net is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Perform all inference that depends on net ...
//
// Example31NetDestroy(net);
//
// The Net can be shared and reused without restriction because it is
// never modified (not even temporarily) after being created. The Net
// should be destroyed (to free memory) once all dependent inference
// is complete.

typedef struct Example31Net Example31Net;

char* Example31NetCreate(
Example31Net**,
Example31Params*,
ptrdiff_t threads
);

void Example31NetDestroy(Example31Net*);

// An Engine performs inference. It contains inference threads, scratch
// memory, and a pointer to the Net. Any number of Engines can share the
// same Net (and perform inference in parallel) because the Net is never
// modified. For best performance the number of inference threads should
// not exceed the number of CPU cores.
//
// Example31Net* net;
//
// ... Create net ...
//
// Example31Engine* engine; // For example, 4 inference threads:
// char* err = Example31EngineCreate(&engine, net, 4);
//
// if (err) { // Nonzero err means failure; engine is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
//
// ... Destroy net ...
//
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Use the POSIX threads API to adjust engine's threads ...
// ... Use engine to perform inference (dependent on net) ...
//
// Example31EngineDestroy(engine); // Terminate threads, free memory.
//
// ... Destroy net ...
//
// The POSIX threads API can be used to adjust an Engine's threads. If
// an Engine has N threads, those threads are indexed 0, 1, 2, ..., N-1
// and a pthread_t identifier is associated with each index. To set the
// CPU affinity mask for the first inference thread, for example:
//
// pthread_t thread; // The first thread has index 0:
// char* err = Example31EnginePthreadT(engine, 0, &thread);
//
// assert(!err); // Can only fail if the thread index is invalid.
//
// pthread_setaffinity_np(thread, ...); // Details omitted.
//
// The inference function reads floats from (one or more) input tensors
// and writes floats to (one or more) output tensors. All the input and
// output tensors are owned (allocated and freed) by the caller and are
// in CHW format, 32-bit floating point, fully packed (in other words,
// C has the largest pitch, W has the smallest pitch, and there is no
// padding anywhere).
//
// float* bn4Data = malloc(sizeof(float)*38*3*11);
// float* in1Data = malloc(sizeof(float)*38*6*21);
// float* in2Data = malloc(sizeof(float)*38*6*21);
// float* in3Data = malloc(sizeof(float)*38*3*11);
//
// for (...) { // Reuse the input and output tensors.
//
// ... Write the input floats ...
//
// Example31EngineInference( // This function cannot fail.
// engine, // Pass an Engine as the first argument.
// bn4Data, // The tensor arguments are sorted by name.
// in1Data,
// in2Data,
// in3Data
// );
//
// ... Read the output floats ...
//
// }
//
// free(bn4Data);
// free(in1Data);
// free(in2Data);
// free(in3Data);
//
// The tensor parameters of the inference function are ordered by name,
// lexically bytewise. In other words, the function parameters have been
// sorted by name using Go's "<" string comparison operator (a bytewise
// lexical string sort).

typedef struct Example31Engine Example31Engine;

char* Example31EngineCreate(
Example31Engine**,
Example31Net*,
ptrdiff_t threads
);

char* Example31EnginePthreadT(
Example31Engine*,
ptrdiff_t threadIdx,
pthread_t* to
);

void Example31EngineInference(
Example31Engine*,
float* bn4Data,
float* in1Data,
float* in2Data,
float* in3Data
);

void Example31EngineDestroy(Example31Engine*);

// The fields of the following struct have been sorted by name using
// Go's "<" string comparison operator (bytewise lexical string sort).
// Tensor dimensions are NxCxHxW where N is the outermost/slowest and
// W is the innermost/fastest. There is no padding anywhere.

struct Example31Params {
float bn1Means[38]; // 1x38x1x1
float bn1Scales[38]; // 1x38x1x1
float bn1Shifts[38]; // 1x38x1x1
float bn1Variances[38]; // 1x38x1x1
float bn2Means[38]; // 1x38x1x1
float bn2Scales[38]; // 1x38x1x1
float bn2Shifts[38]; // 1x38x1x1
float bn2Variances[38]; // 1x38x1x1
float bn3Means[38]; // 1x38x1x1
float bn3Scales[38]; // 1x38x1x1
float bn3Shifts[38]; // 1x38x1x1
float bn3Variances[38]; // 1x38x1x1
float bn4Means[38]; // 1x38x1x1
float bn4Scales[38]; // 1x38x1x1
float bn4Shifts[38]; // 1x38x1x1
float bn4Variances[38]; // 1x38x1x1
} __attribute__((packed));

#ifdef __cplusplus
/**/ }
#endif

// End of file.

Top || Output Example31.c file

// To build an object file:
// gcc -c -w -std=c99 -pthread -Ofast -mavx512f Example31.c

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <errno.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <immintrin.h>

#include "Example31.h"

static char* Example31Errmsg1(ptrdiff_t lineNum1, char* format1, ...) {
char* msg1 = malloc(277);
int step1 = sprintf(msg1, "Example31: line %td: ", lineNum1);
va_list ap1;
va_start(ap1, format1);
vsnprintf(msg1+step1, 277-step1, format1, ap1);
va_end(ap1);
return msg1;
}

typedef struct Example31ThreaderTask1 Example31ThreaderTask1;
typedef void (*Example31ThreaderCallee1)(Example31ThreaderTask1*, int64_t*);
typedef struct Example31ThreaderHub1 Example31ThreaderHub1;
typedef struct Example31ThreaderNode1 Example31ThreaderNode1;
typedef struct Example31ThreaderUnwind1 Example31ThreaderUnwind1;
typedef struct Example31ThreaderTeam1 Example31ThreaderTeam1;

struct Example31ThreaderTask1 {
Example31ThreaderCallee1 callee1;
void* any1;
ptrdiff_t nd1;
int64_t hull1[4];
};

struct Example31ThreaderHub1 {
pthread_mutex_t mut1;
pthread_cond_t cond1;
ptrdiff_t pending1;
ptrdiff_t offset1;
long mask1;
long status1[];
};

struct Example31ThreaderNode1 {
pthread_mutex_t mut2;
int64_t np1;
int64_t pt1[4];
Example31ThreaderTask1* task1;
pthread_cond_t cond2;
Example31ThreaderTeam1* team1;
pthread_t thr1;
} __attribute__((aligned(64)));

struct Example31ThreaderUnwind1 {
ptrdiff_t join1;
ptrdiff_t nodeConds1;
ptrdiff_t nodeMuts1;
ptrdiff_t hubCond1;
ptrdiff_t hubMut1;
void* nodes1;
void* hub1;
};

struct Example31ThreaderTeam1 {
ptrdiff_t nt1;
Example31ThreaderHub1* hub2;
Example31ThreaderNode1* nodes2;
Example31ThreaderUnwind1 unwind1;
};

static void Example31ThreaderInc1(
ptrdiff_t nd2,
int64_t*restrict hull2,
int64_t*restrict pt2
) {
for (ptrdiff_t i1 = 0; i1 < nd2; ++i1) {
int64_t elem1 = pt2[i1];
if (++elem1 == hull2[i1]) {
pt2[i1] = 0;
} else {
pt2[i1] = elem1;
break;
}
}
}

static void Example31ThreaderPut1(
ptrdiff_t nd3,
int64_t*restrict hull3,
int64_t*restrict pt3,
int64_t val1
) {
ptrdiff_t i2 = 0;
for (; i2 < nd3 && val1; ) {
int64_t wrap1 = hull3[i2];
int64_t carry1 = val1/wrap1;
pt3[i2++] = val1-carry1*wrap1;
val1 = carry1;
}
for (; i2 < nd3; pt3[i2++] = 0);
}

static void Example31ThreaderAdd1(
ptrdiff_t nd4,
int64_t*restrict hull4,
int64_t*restrict pt4,
int64_t*restrict plus1,
int64_t carry2
) {
for (ptrdiff_t i3 = 0; i3 < nd4; ++i3) {
int64_t wrap2 = hull4[i3];
int64_t sum1 = pt4[i3]+plus1[i3]+carry2;
if (sum1 < wrap2) {
pt4[i3] = sum1;
carry2 = 0;
} else {
pt4[i3] = sum1-wrap2;
carry2 = 1;
}
}
}

static void* Example31ThreaderMain1(void* arg1) {
Example31ThreaderNode1* node1 = arg1;
Example31ThreaderTeam1* team2 = node1->team1;
ptrdiff_t nt2 = team2->nt1;
Example31ThreaderHub1* hub3 = team2->hub2;
Example31ThreaderNode1* nodes3 = team2->nodes2;
size_t role1 = node1-nodes3;
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
for (; ; ) {
Example31ThreaderTask1* task2 = node1->task1;
if (!task2) {
for (; __builtin_expect(pthread_cond_wait(&node1->cond2, &node1->mut2), 0); );
continue;
}
int64_t np2 = node1->np1;
if (np2 < 0) {
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
return 0;
}
node1->task1 = 0;
Example31ThreaderCallee1 callee2 = task2->callee1;
ptrdiff_t nd5 = task2->nd1;
int64_t pt5[4];
for (; np2; np2 = node1->np1) {
memcpy(pt5, node1->pt1, sizeof(pt5));
node1->np1 = np2-1;
Example31ThreaderInc1(nd5, task2->hull1, node1->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[role1/(sizeof(long)*8)] &= ~((long)1<<role1%(sizeof(long)*8));
ptrdiff_t offset2 = hub3->offset1;
long mask2 = hub3->mask1;
ptrdiff_t wrapped1 = 0;
for (; ; ) {
long hand1 = hub3->status1[offset2]&mask2;
if (!hand1) {
++offset2;
mask2 = -1;
continue;
}
ptrdiff_t target1 = offset2*(sizeof(long)*8)+__builtin_ctzl(hand1);
if (target1 == nt2) {
if (wrapped1) break;
offset2 = 0;
mask2 = -1;
wrapped1 = 1;
continue;
}
hand1 &= -hand1;
hub3->offset1 = offset2;
hub3->mask1 = mask2-hand1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
Example31ThreaderNode1* node2 = nodes3+target1;
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
for (np2 = node2->np1; np2; np2 = node2->np1) {
memcpy(pt5, node2->pt1, sizeof(pt5));
node2->np1 = np2-1;
Example31ThreaderInc1(nd5, task2->hull1, node2->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[offset2] &= ~hand1;
offset2 = hub3->offset1;
mask2 = hub3->mask1;
wrapped1 = 0;
}
ptrdiff_t pending2 = --hub3->pending1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
if (!pending2) for (; __builtin_expect(pthread_cond_signal(&hub3->cond1), 0); );
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
}

static void Example31ThreaderDestroy1(Example31ThreaderTeam1* team3) {
if (!team3) return;
Example31ThreaderNode1* nodes4 = team3->nodes2;
Example31ThreaderNode1* stop1 = nodes4+team3->unwind1.join1;
for (Example31ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_lock(&node3->mut2), 0); );
node3->np1 = -1;
node3->task1 = (Example31ThreaderTask1*)1;
for (; __builtin_expect(pthread_mutex_unlock(&node3->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node3->cond2), 0); );
}
for (Example31ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_join(node3->thr1, 0), 0); );
}
stop1 = nodes4+team3->unwind1.nodeConds1;
for (Example31ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_cond_destroy(&node3->cond2), 0); );
}
stop1 = nodes4+team3->unwind1.nodeMuts1;
for (Example31ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_destroy(&node3->mut2), 0); );
}
Example31ThreaderHub1* hub4 = team3->hub2;
if (team3->unwind1.hubCond1) {
for (; __builtin_expect(pthread_cond_destroy(&hub4->cond1), 0); );
}
if (team3->unwind1.hubMut1) {
for (; __builtin_expect(pthread_mutex_destroy(&hub4->mut1), 0); );
}
free(team3->unwind1.nodes1);
free(team3->unwind1.hub1);
free(team3);
}

static char* Example31ThreaderCreate1Up4(Example31ThreaderTeam1* team8, ptrdiff_t nt7) {
Example31ThreaderNode1* nodes5 = team8->nodes2;
for (Example31ThreaderNode1* node4 = nodes5; node4 != nodes5+nt7; ++node4) {
int err2 = pthread_mutex_init(&node4->mut2, 0);
if (__builtin_expect(err2, 0)) {
char* msg2 = Example31Errmsg1(__LINE__, "errno %d", err2);
team8->unwind1.nodeMuts1 = node4-nodes5;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg2;
}
node4->task1 = 0;
int err3 = pthread_cond_init(&node4->cond2, 0);
if (__builtin_expect(err3, 0)) {
char* msg3 = Example31Errmsg1(__LINE__, "errno %d", err3);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg3;
}
node4->team1 = team8;
int err4 = pthread_create(&node4->thr1, 0, Example31ThreaderMain1, node4);
if (__builtin_expect(err4, 0)) {
char* msg4 = Example31Errmsg1(__LINE__, "errno %d", err4);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5+1;
team8->unwind1.join1 = node4-nodes5;
return msg4;
}
}
team8->unwind1.nodeMuts1 = nt7;
team8->unwind1.nodeConds1 = nt7;
team8->unwind1.join1 = nt7;
return 0;
}

static char* Example31ThreaderCreate1Up3(Example31ThreaderTeam1* team7, ptrdiff_t nt6) {
Example31ThreaderHub1* hub5 = team7->hub2;
int err5 = pthread_mutex_init(&hub5->mut1, 0);
if (__builtin_expect(err5, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", err5);
}
team7->unwind1.hubMut1 = 1;
int err6 = pthread_cond_init(&hub5->cond1, 0);
if (__builtin_expect(err6, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", err6);
}
team7->unwind1.hubCond1 = 1;
return Example31ThreaderCreate1Up4(team7, nt6);
}

static char* Example31ThreaderCreate1Up2(Example31ThreaderTeam1* team6, ptrdiff_t nt5) {
size_t size2 = nt5*sizeof(Example31ThreaderNode1);
if (__builtin_expect(size2/sizeof(Example31ThreaderNode1) != (size_t)nt5, 0)) {
return Example31Errmsg1(__LINE__, "too many threads");
}
void* addr3 = malloc(size2+63);
if (__builtin_expect(!addr3, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", errno);
}
team6->unwind1.nodes1 = addr3;
team6->nodes2 = (void*)(((size_t)addr3+63)&-64);
return Example31ThreaderCreate1Up3(team6, nt5);
}

static char* Example31ThreaderCreate1Up1(Example31ThreaderTeam1* team5, ptrdiff_t nt4) {
team5->nt1 = nt4;
size_t size1 = sizeof(Example31ThreaderHub1);
size1 += sizeof(long)*((size_t)nt4/(sizeof(long)*8)+1);
size1 = (size1+63)&-64;
void* addr2 = malloc(size1+63);
if (__builtin_expect(!addr2, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", errno);
}
team5->unwind1.hub1 = addr2;
team5->hub2 = (void*)(((size_t)addr2+63)&-64);
return Example31ThreaderCreate1Up2(team5, nt4);
}

static char* Example31ThreaderCreate1(Example31ThreaderTeam1** team4, ptrdiff_t nt3) {
if (__builtin_expect(nt3 < 1, 0)) {
return Example31Errmsg1(__LINE__, "too few threads");
}
void* addr1 = calloc(1, sizeof(Example31ThreaderTeam1));
if (__builtin_expect(!addr1, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", errno);
}
char* err1 = Example31ThreaderCreate1Up1(addr1, nt3);
if (__builtin_expect(!!err1, 0)) {
Example31ThreaderDestroy1(addr1);
} else {
*team4 = addr1;
}
return err1;
}

static char* Example31ThreaderPthreadT1(
pthread_t* thr2,
Example31ThreaderTeam1* team9,
ptrdiff_t idx1
) {
if (__builtin_expect(idx1 < 0 || idx1 >= team9->nt1, 0)) {
return Example31Errmsg1(__LINE__, "bad thread idx");
}
*thr2 = team9->nodes2[idx1].thr1;
return 0;
}

static void Example31ThreaderDo1(Example31ThreaderTeam1* team10, Example31ThreaderTask1* task3) {
ptrdiff_t nd6 = task3->nd1;
if (nd6 < 1) return;
int64_t tot1 = task3->hull1[0];
for (ptrdiff_t i4 = 1; i4 < nd6; tot1 *= task3->hull1[i4++]);
ptrdiff_t nt8 = team10->nt1;
int64_t each1 = tot1/nt8;
ptrdiff_t more1 = tot1%nt8;
int64_t plus2[4];
Example31ThreaderPut1(nd6, task3->hull1, plus2, each1);
int64_t pt6[4] = {0};
Example31ThreaderHub1* hub6 = team10->hub2;
for (; __builtin_expect(pthread_mutex_lock(&hub6->mut1), 0); );
Example31ThreaderNode1* node5 = team10->nodes2;
for (ptrdiff_t i4 = 0; ; ++node5) {
for (; __builtin_expect(pthread_mutex_lock(&node5->mut2), 0); );
int64_t carry3 = i4 < more1;
node5->np1 = each1+carry3;
memcpy(node5->pt1, pt6, sizeof(pt6));
node5->task1 = task3;
for (; __builtin_expect(pthread_mutex_unlock(&node5->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node5->cond2), 0); );
if (++i4 == nt8) break;
Example31ThreaderAdd1(nd6, task3->hull1, pt6, plus2, carry3);
}
hub6->offset1 = 0;
hub6->mask1 = -1;
for (ptrdiff_t i4 = (size_t)nt8/(sizeof(long)*8); i4 >= 0; ) {
hub6->status1[i4--] = -1;
}
for (hub6->pending1 = nt8; hub6->pending1; ) {
for (; __builtin_expect(pthread_cond_wait(&hub6->cond1, &hub6->mut1), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&hub6->mut1), 0); );
}

static __m512 Example31Exp1(__m512 x1) {
x1 = _mm512_max_ps(x1, _mm512_set1_ps(-8.733654e+01f));
x1 = _mm512_min_ps(x1, _mm512_set1_ps(8.872284e+01f));
__m512 t1 = _mm512_mul_ps(x1, _mm512_set1_ps(1.442695e+00f));
__m512 r1 = _mm512_roundscale_ps(t1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512 f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-6.9314575e-01f), x1);
f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-1.4286068e-06f), f1);
__m512 g1 = _mm512_set1_ps(4.194439e-02f);
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(1.6800667e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(4.9999994e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.999569e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.9999964e-01f));
__m512i y1 = _mm512_slli_epi32(_mm512_cvtps_epi32(t1), 23);
return _mm512_castsi512_ps(_mm512_add_epi32(y1, _mm512_castps_si512(g1)));
}

static __m512 Example31Rsqrt1(__m512 x2) {
__m512 y2 = _mm512_rsqrt14_ps(x2);
__m512 z1 = _mm512_mul_ps(x2, y2);
__m512 a1 = _mm512_mul_ps(y2, _mm512_set1_ps(5e-01f));
__m512 b1 = _mm512_fnmadd_ps(y2, z1, _mm512_set1_ps(3e+00f));
return _mm512_mul_ps(a1, b1);
}

static void Example31BnSimplify1(
float*restrict means1,
float*restrict variances1,
float*restrict scales1,
float*restrict shifts1,
char*restrict mas1
) {
__m512 eps1 = _mm512_set1_ps(1e-05f);
__m512i xlo1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0);
__m512i xhi1 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8);
__m512 va1 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*0);
__m512 va2 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*1);
__m512 va3 = _mm512_maskz_loadu_ps(63, variances1+(ptrdiff_t)16*2);
__m512 rcp1 = Example31Rsqrt1(_mm512_add_ps(eps1, va1));
__m512 rcp2 = Example31Rsqrt1(_mm512_add_ps(eps1, va2));
__m512 rcp3 = Example31Rsqrt1(_mm512_add_ps(eps1, va3));
__m512 sc1 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*0);
__m512 sc2 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*1);
__m512 sc3 = _mm512_maskz_loadu_ps(63, scales1+(ptrdiff_t)16*2);
__m512 mul1 = _mm512_mul_ps(rcp1, sc1);
__m512 mul2 = _mm512_mul_ps(rcp2, sc2);
__m512 mul3 = _mm512_mul_ps(rcp3, sc3);
__m512 me1 = _mm512_loadu_ps(means1+(ptrdiff_t)16*0);
__m512 me2 = _mm512_loadu_ps(means1+(ptrdiff_t)16*1);
__m512 me3 = _mm512_maskz_loadu_ps(63, means1+(ptrdiff_t)16*2);
__m512 sh1 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*0);
__m512 sh2 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*1);
__m512 sh3 = _mm512_maskz_loadu_ps(63, shifts1+(ptrdiff_t)16*2);
__m512 add1 = _mm512_fnmadd_ps(me1, mul1, sh1);
__m512 add2 = _mm512_fnmadd_ps(me2, mul2, sh2);
__m512 add3 = _mm512_fnmadd_ps(me3, mul3, sh3);
__m512 lo1 = _mm512_permutex2var_ps(mul1, xlo1, add1);
__m512 lo2 = _mm512_permutex2var_ps(mul2, xlo1, add2);
__m512 lo3 = _mm512_permutex2var_ps(mul3, xlo1, add3);
__m512 hi1 = _mm512_permutex2var_ps(mul1, xhi1, add1);
__m512 hi2 = _mm512_permutex2var_ps(mul2, xhi1, add2);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*0, lo1);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*1, hi1);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*2, lo2);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*3, hi2);
_mm512_mask_storeu_ps(mas1+(ptrdiff_t)64*4, 4095, lo3);
}

static void Example31Thrpl1Callee1(Example31ThreaderTask1* task4, int64_t* pt7) {
char** tensors2 = task4->any1;
ptrdiff_t b2 = pt7[0];
ptrdiff_t e1 = pt7[1];
ptrdiff_t c1 = pt7[2];
char*restrict ptr1 = tensors2[0]-(ptrdiff_t)84+(ptrdiff_t)504*b2+(ptrdiff_t)84*e1+(ptrdiff_t)11088*c1;
char*restrict ptr2 = tensors2[1]+(ptrdiff_t)8*22*c1;
char*restrict ptr3 = tensors2[2]-(ptrdiff_t)84+(ptrdiff_t)504*b2+(ptrdiff_t)84*e1+(ptrdiff_t)11088*c1;
char*restrict ptr4 = tensors2[3]+(ptrdiff_t)8*22*c1;
char*restrict ptr5 = tensors2[4]+(ptrdiff_t)8*22*c1;
char*restrict ptr6 = tensors2[5]+(ptrdiff_t)132*b2+(ptrdiff_t)42*e1+(ptrdiff_t)2904*c1;
char*restrict ptr7 = tensors2[6]+(ptrdiff_t)8*22*c1;
char*restrict ptr8 = tensors2[7]+(ptrdiff_t)132*b2+(ptrdiff_t)42*e1+(ptrdiff_t)2904*c1;
if (c1 < 1) {
for (ptrdiff_t i5 = 0; i5 < 22; ++i5) {
__m512 bnMul1 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*i5)[0]);
__m512 bnAdd1 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*i5)[1]);
__m512 bnMul2 = _mm512_set1_ps(((float*)ptr4+(ptrdiff_t)2*i5)[0]);
__m512 bnAdd2 = _mm512_set1_ps(((float*)ptr4+(ptrdiff_t)2*i5)[1]);
__m512 bnMul3 = _mm512_set1_ps(((float*)ptr5+(ptrdiff_t)2*i5)[0]);
__m512 bnAdd3 = _mm512_set1_ps(((float*)ptr5+(ptrdiff_t)2*i5)[1]);
__m512 bnMul4 = _mm512_set1_ps(((float*)ptr7+(ptrdiff_t)2*i5)[0]);
__m512 bnAdd4 = _mm512_set1_ps(((float*)ptr7+(ptrdiff_t)2*i5)[1]);
__m512 in1 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)84+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 in2 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)148+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat2 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)168+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat5 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)232+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat1 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)84+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat4 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)148+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat3 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)168+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat6 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)232+(ptrdiff_t)504*i5+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat7 = _mm512_maskz_loadu_ps(2047, ptr6+(ptrdiff_t)132*i5+(ptrdiff_t)44*0+(ptrdiff_t)64*0);
in1 = _mm512_fmadd_ps(in1, bnMul1, bnAdd1);
in2 = _mm512_fmadd_ps(in2, bnMul1, bnAdd1);
dat2 = _mm512_fmadd_ps(dat2, bnMul1, bnAdd1);
dat5 = _mm512_fmadd_ps(dat5, bnMul1, bnAdd1);
in1 = _mm512_max_ps(_mm512_setzero_ps(), in1);
in2 = _mm512_max_ps(_mm512_setzero_ps(), in2);
dat2 = _mm512_max_ps(_mm512_setzero_ps(), dat2);
dat5 = _mm512_max_ps(_mm512_setzero_ps(), dat5);
in1 = _mm512_add_ps(in1, dat1);
in2 = _mm512_add_ps(in2, dat4);
dat2 = _mm512_add_ps(dat2, dat3);
dat5 = _mm512_add_ps(dat5, dat6);
in1 = _mm512_fmadd_ps(in1, bnMul2, bnAdd2);
in2 = _mm512_fmadd_ps(in2, bnMul2, bnAdd2);
dat2 = _mm512_fmadd_ps(dat2, bnMul2, bnAdd2);
dat5 = _mm512_fmadd_ps(dat5, bnMul2, bnAdd2);
in1 = _mm512_max_ps(in1, dat2);
in2 = _mm512_max_ps(in2, dat5);
__m512i pm1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm3 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out1 = _mm512_permutex2var_ps(in1, pm1, in2);
__m512 pack1 = _mm512_permutex2var_ps(in1, pm2, in2);
__m512 pack2 = _mm512_permutex2var_ps(in1, pm3, in2);
out1 = _mm512_mask_max_ps(out1, 1023, out1, pack1);
out1 = _mm512_mask_max_ps(out1, 2046, out1, pack2);
out1 = _mm512_fmadd_ps(out1, bnMul3, bnAdd3);
out1 = _mm512_max_ps(_mm512_setzero_ps(), out1);
out1 = _mm512_add_ps(out1, dat7);
out1 = _mm512_fmadd_ps(out1, bnMul4, bnAdd4);
_mm512_mask_storeu_ps(ptr8+(ptrdiff_t)132*i5+(ptrdiff_t)44*0+(ptrdiff_t)64*0, 2047, out1);
for (ptrdiff_t j1 = 1; j1 < 3; ++j1) {
__m512 in3 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 in4 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)64+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat9 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)84+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat14 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)148+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat11 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)168+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat16 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)232+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat8 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat13 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)64+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat10 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)84+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat15 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)148+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat12 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)168+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat17 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)232+(ptrdiff_t)504*i5+(ptrdiff_t)168*j1+(ptrdiff_t)128*0);
__m512 dat18 = _mm512_maskz_loadu_ps(2047, ptr6+(ptrdiff_t)132*i5+(ptrdiff_t)44*j1+(ptrdiff_t)64*0);
in3 = _mm512_fmadd_ps(in3, bnMul1, bnAdd1);
in4 = _mm512_fmadd_ps(in4, bnMul1, bnAdd1);
dat9 = _mm512_fmadd_ps(dat9, bnMul1, bnAdd1);
dat14 = _mm512_fmadd_ps(dat14, bnMul1, bnAdd1);
dat11 = _mm512_fmadd_ps(dat11, bnMul1, bnAdd1);
dat16 = _mm512_fmadd_ps(dat16, bnMul1, bnAdd1);
in3 = _mm512_max_ps(_mm512_setzero_ps(), in3);
in4 = _mm512_max_ps(_mm512_setzero_ps(), in4);
dat9 = _mm512_max_ps(_mm512_setzero_ps(), dat9);
dat14 = _mm512_max_ps(_mm512_setzero_ps(), dat14);
dat11 = _mm512_max_ps(_mm512_setzero_ps(), dat11);
dat16 = _mm512_max_ps(_mm512_setzero_ps(), dat16);
in3 = _mm512_add_ps(in3, dat8);
in4 = _mm512_add_ps(in4, dat13);
dat9 = _mm512_add_ps(dat9, dat10);
dat14 = _mm512_add_ps(dat14, dat15);
dat11 = _mm512_add_ps(dat11, dat12);
dat16 = _mm512_add_ps(dat16, dat17);
in3 = _mm512_fmadd_ps(in3, bnMul2, bnAdd2);
in4 = _mm512_fmadd_ps(in4, bnMul2, bnAdd2);
dat9 = _mm512_fmadd_ps(dat9, bnMul2, bnAdd2);
dat14 = _mm512_fmadd_ps(dat14, bnMul2, bnAdd2);
dat11 = _mm512_fmadd_ps(dat11, bnMul2, bnAdd2);
dat16 = _mm512_fmadd_ps(dat16, bnMul2, bnAdd2);
in3 = _mm512_max_ps(in3, dat9);
in4 = _mm512_max_ps(in4, dat14);
in3 = _mm512_max_ps(in3, dat11);
in4 = _mm512_max_ps(in4, dat16);
__m512i pm4 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm5 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm6 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out2 = _mm512_permutex2var_ps(in3, pm4, in4);
__m512 pack3 = _mm512_permutex2var_ps(in3, pm5, in4);
__m512 pack4 = _mm512_permutex2var_ps(in3, pm6, in4);
out2 = _mm512_mask_max_ps(out2, 1023, out2, pack3);
out2 = _mm512_mask_max_ps(out2, 2046, out2, pack4);
out2 = _mm512_fmadd_ps(out2, bnMul3, bnAdd3);
out2 = _mm512_max_ps(_mm512_setzero_ps(), out2);
out2 = _mm512_add_ps(out2, dat18);
out2 = _mm512_fmadd_ps(out2, bnMul4, bnAdd4);
_mm512_mask_storeu_ps(ptr8+(ptrdiff_t)132*i5+(ptrdiff_t)44*j1+(ptrdiff_t)64*0, 2047, out2);
}
}
return;
}
for (ptrdiff_t i6 = 0; i6 < 16; ++i6) {
__m512 bnMul5 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*i6)[0]);
__m512 bnAdd5 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*i6)[1]);
__m512 bnMul6 = _mm512_set1_ps(((float*)ptr4+(ptrdiff_t)2*i6)[0]);
__m512 bnAdd6 = _mm512_set1_ps(((float*)ptr4+(ptrdiff_t)2*i6)[1]);
__m512 bnMul7 = _mm512_set1_ps(((float*)ptr5+(ptrdiff_t)2*i6)[0]);
__m512 bnAdd7 = _mm512_set1_ps(((float*)ptr5+(ptrdiff_t)2*i6)[1]);
__m512 bnMul8 = _mm512_set1_ps(((float*)ptr7+(ptrdiff_t)2*i6)[0]);
__m512 bnAdd8 = _mm512_set1_ps(((float*)ptr7+(ptrdiff_t)2*i6)[1]);
__m512 in5 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)84+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 in6 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)148+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat20 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)168+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat23 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)232+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat19 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)84+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat22 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)148+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat21 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)168+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat24 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)232+(ptrdiff_t)504*i6+(ptrdiff_t)168*0+(ptrdiff_t)128*0);
__m512 dat25 = _mm512_maskz_loadu_ps(2047, ptr6+(ptrdiff_t)132*i6+(ptrdiff_t)44*0+(ptrdiff_t)64*0);
in5 = _mm512_fmadd_ps(in5, bnMul5, bnAdd5);
in6 = _mm512_fmadd_ps(in6, bnMul5, bnAdd5);
dat20 = _mm512_fmadd_ps(dat20, bnMul5, bnAdd5);
dat23 = _mm512_fmadd_ps(dat23, bnMul5, bnAdd5);
in5 = _mm512_max_ps(_mm512_setzero_ps(), in5);
in6 = _mm512_max_ps(_mm512_setzero_ps(), in6);
dat20 = _mm512_max_ps(_mm512_setzero_ps(), dat20);
dat23 = _mm512_max_ps(_mm512_setzero_ps(), dat23);
in5 = _mm512_add_ps(in5, dat19);
in6 = _mm512_add_ps(in6, dat22);
dat20 = _mm512_add_ps(dat20, dat21);
dat23 = _mm512_add_ps(dat23, dat24);
in5 = _mm512_fmadd_ps(in5, bnMul6, bnAdd6);
in6 = _mm512_fmadd_ps(in6, bnMul6, bnAdd6);
dat20 = _mm512_fmadd_ps(dat20, bnMul6, bnAdd6);
dat23 = _mm512_fmadd_ps(dat23, bnMul6, bnAdd6);
in5 = _mm512_max_ps(in5, dat20);
in6 = _mm512_max_ps(in6, dat23);
__m512i pm7 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm8 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm9 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out3 = _mm512_permutex2var_ps(in5, pm7, in6);
__m512 pack5 = _mm512_permutex2var_ps(in5, pm8, in6);
__m512 pack6 = _mm512_permutex2var_ps(in5, pm9, in6);
out3 = _mm512_mask_max_ps(out3, 1023, out3, pack5);
out3 = _mm512_mask_max_ps(out3, 2046, out3, pack6);
out3 = _mm512_fmadd_ps(out3, bnMul7, bnAdd7);
out3 = _mm512_max_ps(_mm512_setzero_ps(), out3);
out3 = _mm512_add_ps(out3, dat25);
out3 = _mm512_fmadd_ps(out3, bnMul8, bnAdd8);
_mm512_mask_storeu_ps(ptr8+(ptrdiff_t)132*i6+(ptrdiff_t)44*0+(ptrdiff_t)64*0, 2047, out3);
for (ptrdiff_t j2 = 1; j2 < 3; ++j2) {
__m512 in7 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 in8 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)64+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat27 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)84+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat32 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)148+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat29 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)168+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat34 = _mm512_maskz_loadu_ps(31, ptr1+(ptrdiff_t)232+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat26 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat31 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)64+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat28 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)84+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat33 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)148+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat30 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)168+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat35 = _mm512_maskz_loadu_ps(31, ptr3+(ptrdiff_t)232+(ptrdiff_t)504*i6+(ptrdiff_t)168*j2+(ptrdiff_t)128*0);
__m512 dat36 = _mm512_maskz_loadu_ps(2047, ptr6+(ptrdiff_t)132*i6+(ptrdiff_t)44*j2+(ptrdiff_t)64*0);
in7 = _mm512_fmadd_ps(in7, bnMul5, bnAdd5);
in8 = _mm512_fmadd_ps(in8, bnMul5, bnAdd5);
dat27 = _mm512_fmadd_ps(dat27, bnMul5, bnAdd5);
dat32 = _mm512_fmadd_ps(dat32, bnMul5, bnAdd5);
dat29 = _mm512_fmadd_ps(dat29, bnMul5, bnAdd5);
dat34 = _mm512_fmadd_ps(dat34, bnMul5, bnAdd5);
in7 = _mm512_max_ps(_mm512_setzero_ps(), in7);
in8 = _mm512_max_ps(_mm512_setzero_ps(), in8);
dat27 = _mm512_max_ps(_mm512_setzero_ps(), dat27);
dat32 = _mm512_max_ps(_mm512_setzero_ps(), dat32);
dat29 = _mm512_max_ps(_mm512_setzero_ps(), dat29);
dat34 = _mm512_max_ps(_mm512_setzero_ps(), dat34);
in7 = _mm512_add_ps(in7, dat26);
in8 = _mm512_add_ps(in8, dat31);
dat27 = _mm512_add_ps(dat27, dat28);
dat32 = _mm512_add_ps(dat32, dat33);
dat29 = _mm512_add_ps(dat29, dat30);
dat34 = _mm512_add_ps(dat34, dat35);
in7 = _mm512_fmadd_ps(in7, bnMul6, bnAdd6);
in8 = _mm512_fmadd_ps(in8, bnMul6, bnAdd6);
dat27 = _mm512_fmadd_ps(dat27, bnMul6, bnAdd6);
dat32 = _mm512_fmadd_ps(dat32, bnMul6, bnAdd6);
dat29 = _mm512_fmadd_ps(dat29, bnMul6, bnAdd6);
dat34 = _mm512_fmadd_ps(dat34, bnMul6, bnAdd6);
in7 = _mm512_max_ps(in7, dat27);
in8 = _mm512_max_ps(in8, dat32);
in7 = _mm512_max_ps(in7, dat29);
in8 = _mm512_max_ps(in8, dat34);
__m512i pm10 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pm11 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm12 = _mm512_set_epi32(29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1, 31);
__m512 out4 = _mm512_permutex2var_ps(in7, pm10, in8);
__m512 pack7 = _mm512_permutex2var_ps(in7, pm11, in8);
__m512 pack8 = _mm512_permutex2var_ps(in7, pm12, in8);
out4 = _mm512_mask_max_ps(out4, 1023, out4, pack7);
out4 = _mm512_mask_max_ps(out4, 2046, out4, pack8);
out4 = _mm512_fmadd_ps(out4, bnMul7, bnAdd7);
out4 = _mm512_max_ps(_mm512_setzero_ps(), out4);
out4 = _mm512_add_ps(out4, dat36);
out4 = _mm512_fmadd_ps(out4, bnMul8, bnAdd8);
_mm512_mask_storeu_ps(ptr8+(ptrdiff_t)132*i6+(ptrdiff_t)44*j2+(ptrdiff_t)64*0, 2047, out4);
}
}
}

static void Example31Thrpl1(Example31ThreaderTeam1* team13, char** tensors1) {
Example31ThreaderTask1 task5;
task5.callee1 = Example31Thrpl1Callee1;
task5.any1 = tensors1;
task5.nd1 = 3;
task5.hull1[0] = 1;
task5.hull1[1] = 1;
task5.hull1[2] = 2;
Example31ThreaderDo1(team13, &task5);
}

struct Example31Net {
char* alloc1;
char* align1;
};

void Example31NetDestroy(Example31Net* net2) {
free(net2->alloc1);
free(net2);
}

char* Example31NetCreate(
Example31Net** net1,
Example31Params* params1,
ptrdiff_t threads1
) {
(void)threads1;
if (__builtin_expect(!__builtin_cpu_supports("avx512f"), 0)) {
return Example31Errmsg1(__LINE__, "CPU does not support AVX512F");
}
char* alloc3 = malloc(1327);
if (__builtin_expect(!alloc3, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", errno);
}
char* align3 = (void*)(((size_t)alloc3+63)&-64);
{
Example31BnSimplify1(
params1->bn1Means,
params1->bn1Variances,
params1->bn1Scales,
params1->bn1Shifts,
align3+0
);
Example31BnSimplify1(
params1->bn2Means,
params1->bn2Variances,
params1->bn2Scales,
params1->bn2Shifts,
align3+320
);
Example31BnSimplify1(
params1->bn3Means,
params1->bn3Variances,
params1->bn3Scales,
params1->bn3Shifts,
align3+640
);
Example31BnSimplify1(
params1->bn4Means,
params1->bn4Variances,
params1->bn4Scales,
params1->bn4Shifts,
align3+960
);
}
Example31Net* net5 = malloc(sizeof(Example31Net));
if (__builtin_expect(!net5, 0)) {
char* msg6 = Example31Errmsg1(__LINE__, "errno %d", errno);
free(alloc3);
return msg6;
}
net5->alloc1 = alloc3;
net5->align1 = align3;
*net1 = net5;
return 0;
}

struct Example31Engine {
Example31Net* net3;
Example31ThreaderTeam1* team11;
char* alloc2;
char* align2;
};

char* Example31EnginePthreadT(
Example31Engine* eng2,
ptrdiff_t idx2,
pthread_t* to1
) {
return Example31ThreaderPthreadT1(to1, eng2->team11, idx2);
}

void Example31EngineDestroy(Example31Engine* eng3) {
Example31ThreaderDestroy1(eng3->team11);
free(eng3->alloc2);
free(eng3);
}

char* Example31EngineCreate(
Example31Engine** eng4,
Example31Net* net4,
ptrdiff_t threads2
) {
Example31Engine* eng5 = malloc(sizeof(Example31Engine));
if (__builtin_expect(!eng5, 0)) {
return Example31Errmsg1(__LINE__, "errno %d", errno);
}
char* alloc4 = malloc(63);
if (__builtin_expect(!alloc4, 0)) {
char* msg5 = Example31Errmsg1(__LINE__, "errno %d", errno);
free(eng5);
return msg5;
}
eng5->alloc2 = alloc4;
eng5->align2 = (void*)(((size_t)alloc4+63)&-64);
char* err7 = Example31ThreaderCreate1(&eng5->team11, threads2);
if (__builtin_expect(!!err7, 0)) {
free(eng5);
free(alloc4);
return err7;
}
eng5->net3 = net4;
*eng4 = eng5;
return 0;
}

void Example31EngineInference(
Example31Engine* eng1,
float* bn4Data,
float* in1Data,
float* in2Data,
float* in3Data
) {
char* netAlign1 = eng1->net3->align1;
Example31ThreaderTeam1* team12 = eng1->team11;
{
char* tensors3[] = {
(char*)in1Data,
netAlign1+0,
(char*)in2Data,
netAlign1+320,
netAlign1+640,
(char*)in3Data,
netAlign1+960,
(char*)bn4Data
};
Example31Thrpl1(team12, tensors3);
}
}

// End of file.

Top