NN-512

Back

Index

Files

Top || Input graph file

Config Prefix=Example34 Platform=AVX512Float32 L1DataCachePerThread=32KiB L2CachePerThreadExL1=960KiB L3CachePerThreadExL1L2=1408KiB
Input ToTensor=in1 Channels=90 Height=16 Width=5
Input ToTensor=in2 Channels=90 Height=16 Width=5
Input ToTensor=in3 Channels=94 Height=1 Width=1
BatchNorm FromTensor=in1 ToTensor=bn1 Epsilon=0.00001
Activation FromTensor=bn1 ToTensor=act1 Kind=ReLU Param=0.25
Add FromTensor1=act1 FromTensor2=in2 ToTensor=add1
BatchNorm FromTensor=add1 ToTensor=bn2 Epsilon=0.00001
FullyConnected FromTensor=bn2 ToTensor=fc ToChannels=94
BatchNorm FromTensor=fc ToTensor=bn3 Epsilon=0.00001
Activation FromTensor=bn3 ToTensor=act2 Kind=ReLU Param=0
Add FromTensor1=act2 FromTensor2=in3 ToTensor=add2
BatchNorm FromTensor=add2 ToTensor=bn4 Epsilon=0.00001
Output FromTensor=bn4

Top || Output Example34.h file

#pragma once

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <pthread.h>
#include <stddef.h>

#ifdef __cplusplus
extern "C" { /**/
#endif

// All weights, biases, and other trained parameters are passed into
// the initialization code through the Params struct that is declared
// just below this comment. The corresponding struct definition can be
// found near the end of this header file.
//
// Each field of the Params struct is an array of float that holds a
// parameter tensor in NCHW format with no padding. The struct fields
// are ordered by name, lexically bytewise. If you concatenate all the
// trained parameter tensors to a file in this same format and order
// you can load the struct as follows (error checking omitted here):
//
// size_t size = sizeof(Example34Params);
// Example34Params* to = malloc(size);
// FILE* from = fopen("ParamsFile", "r");
// fread(to, size, 1, from);
// fclose(from);
//
// Be careful to match endianness (and floating point format).

typedef struct Example34Params Example34Params;

// The Net contains weights, biases, and other trained parameters in a
// form that enables efficient inference. It is created from the input
// parameter struct without modifying that struct. The input parameter
// struct is no longer needed once the Net has been created. Threads
// that are used to create the Net are temporary (in particular, those
// threads are not used for inference).
//
// Example34Params* params = malloc(sizeof(Example34Params));
//
// ... Load params (read from a file, perhaps) ...
//
// Example34Net* net; // For example, 4 threads:
// char* err = Example34NetCreate(&net, params, 4);
// free(params);
//
// if (err) { // Nonzero err indicates failure; net is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Perform all inference that depends on net ...
//
// Example34NetDestroy(net);
//
// The Net can be shared and reused without restriction because it is
// never modified (not even temporarily) after being created. The Net
// should be destroyed (to free memory) once all dependent inference
// is complete.

typedef struct Example34Net Example34Net;

char* Example34NetCreate(
Example34Net**,
Example34Params*,
ptrdiff_t threads
);

void Example34NetDestroy(Example34Net*);

// An Engine performs inference. It contains inference threads, scratch
// memory, and a pointer to the Net. Any number of Engines can share the
// same Net (and perform inference in parallel) because the Net is never
// modified. For best performance the number of inference threads should
// not exceed the number of CPU cores.
//
// Example34Net* net;
//
// ... Create net ...
//
// Example34Engine* engine; // For example, 4 inference threads:
// char* err = Example34EngineCreate(&engine, net, 4);
//
// if (err) { // Nonzero err means failure; engine is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
//
// ... Destroy net ...
//
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Use the POSIX threads API to adjust engine's threads ...
// ... Use engine to perform inference (dependent on net) ...
//
// Example34EngineDestroy(engine); // Terminate threads, free memory.
//
// ... Destroy net ...
//
// The POSIX threads API can be used to adjust an Engine's threads. If
// an Engine has N threads, those threads are indexed 0, 1, 2, ..., N-1
// and a pthread_t identifier is associated with each index. To set the
// CPU affinity mask for the first inference thread, for example:
//
// pthread_t thread; // The first thread has index 0:
// char* err = Example34EnginePthreadT(engine, 0, &thread);
//
// assert(!err); // Can only fail if the thread index is invalid.
//
// pthread_setaffinity_np(thread, ...); // Details omitted.
//
// The inference function reads floats from (one or more) input tensors
// and writes floats to (one or more) output tensors. All the input and
// output tensors are owned (allocated and freed) by the caller and are
// in CHW format, 32-bit floating point, fully packed (in other words,
// C has the largest pitch, W has the smallest pitch, and there is no
// padding anywhere).
//
// float* bn4Data = malloc(sizeof(float)*94*1*1);
// float* in1Data = malloc(sizeof(float)*90*16*5);
// float* in2Data = malloc(sizeof(float)*90*16*5);
// float* in3Data = malloc(sizeof(float)*94*1*1);
//
// for (...) { // Reuse the input and output tensors.
//
// ... Write the input floats ...
//
// Example34EngineInference( // This function cannot fail.
// engine, // Pass an Engine as the first argument.
// bn4Data, // The tensor arguments are sorted by name.
// in1Data,
// in2Data,
// in3Data
// );
//
// ... Read the output floats ...
//
// }
//
// free(bn4Data);
// free(in1Data);
// free(in2Data);
// free(in3Data);
//
// The tensor parameters of the inference function are ordered by name,
// lexically bytewise. In other words, the function parameters have been
// sorted by name using Go's "<" string comparison operator (a bytewise
// lexical string sort).

typedef struct Example34Engine Example34Engine;

char* Example34EngineCreate(
Example34Engine**,
Example34Net*,
ptrdiff_t threads
);

char* Example34EnginePthreadT(
Example34Engine*,
ptrdiff_t threadIdx,
pthread_t* to
);

void Example34EngineInference(
Example34Engine*,
float* bn4Data,
float* in1Data,
float* in2Data,
float* in3Data
);

void Example34EngineDestroy(Example34Engine*);

// The fields of the following struct have been sorted by name using
// Go's "<" string comparison operator (bytewise lexical string sort).
// Tensor dimensions are NxCxHxW where N is the outermost/slowest and
// W is the innermost/fastest. There is no padding anywhere.

struct Example34Params {
float bn1Means[90]; // 1x90x1x1
float bn1Scales[90]; // 1x90x1x1
float bn1Shifts[90]; // 1x90x1x1
float bn1Variances[90]; // 1x90x1x1
float bn2Means[90]; // 1x90x1x1
float bn2Scales[90]; // 1x90x1x1
float bn2Shifts[90]; // 1x90x1x1
float bn2Variances[90]; // 1x90x1x1
float bn3Means[94]; // 1x94x1x1
float bn3Scales[94]; // 1x94x1x1
float bn3Shifts[94]; // 1x94x1x1
float bn3Variances[94]; // 1x94x1x1
float bn4Means[94]; // 1x94x1x1
float bn4Scales[94]; // 1x94x1x1
float bn4Shifts[94]; // 1x94x1x1
float bn4Variances[94]; // 1x94x1x1
float fcBiases[94]; // 1x94x1x1
float fcWeights[676800]; // 94x90x16x5
} __attribute__((packed));

#ifdef __cplusplus
/**/ }
#endif

// End of file.

Top || Output Example34.c file

// To build an object file:
// gcc -c -w -std=c99 -pthread -Ofast -mavx512f Example34.c

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <errno.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <immintrin.h>

#include "Example34.h"

static char* Example34Errmsg1(ptrdiff_t lineNum1, char* format1, ...) {
char* msg1 = malloc(277);
int step1 = sprintf(msg1, "Example34: line %td: ", lineNum1);
va_list ap1;
va_start(ap1, format1);
vsnprintf(msg1+step1, 277-step1, format1, ap1);
va_end(ap1);
return msg1;
}

typedef struct Example34ThreaderTask1 Example34ThreaderTask1;
typedef void (*Example34ThreaderCallee1)(Example34ThreaderTask1*, int64_t*);
typedef struct Example34ThreaderHub1 Example34ThreaderHub1;
typedef struct Example34ThreaderNode1 Example34ThreaderNode1;
typedef struct Example34ThreaderUnwind1 Example34ThreaderUnwind1;
typedef struct Example34ThreaderTeam1 Example34ThreaderTeam1;

struct Example34ThreaderTask1 {
Example34ThreaderCallee1 callee1;
void* any1;
ptrdiff_t nd1;
int64_t hull1[4];
};

struct Example34ThreaderHub1 {
pthread_mutex_t mut1;
pthread_cond_t cond1;
ptrdiff_t pending1;
ptrdiff_t offset1;
long mask1;
long status1[];
};

struct Example34ThreaderNode1 {
pthread_mutex_t mut2;
int64_t np1;
int64_t pt1[4];
Example34ThreaderTask1* task1;
pthread_cond_t cond2;
Example34ThreaderTeam1* team1;
pthread_t thr1;
} __attribute__((aligned(64)));

struct Example34ThreaderUnwind1 {
ptrdiff_t join1;
ptrdiff_t nodeConds1;
ptrdiff_t nodeMuts1;
ptrdiff_t hubCond1;
ptrdiff_t hubMut1;
void* nodes1;
void* hub1;
};

struct Example34ThreaderTeam1 {
ptrdiff_t nt1;
Example34ThreaderHub1* hub2;
Example34ThreaderNode1* nodes2;
Example34ThreaderUnwind1 unwind1;
};

static void Example34ThreaderInc1(
ptrdiff_t nd2,
int64_t*restrict hull2,
int64_t*restrict pt2
) {
for (ptrdiff_t i1 = 0; i1 < nd2; ++i1) {
int64_t elem1 = pt2[i1];
if (++elem1 == hull2[i1]) {
pt2[i1] = 0;
} else {
pt2[i1] = elem1;
break;
}
}
}

static void Example34ThreaderPut1(
ptrdiff_t nd3,
int64_t*restrict hull3,
int64_t*restrict pt3,
int64_t val1
) {
ptrdiff_t i2 = 0;
for (; i2 < nd3 && val1; ) {
int64_t wrap1 = hull3[i2];
int64_t carry1 = val1/wrap1;
pt3[i2++] = val1-carry1*wrap1;
val1 = carry1;
}
for (; i2 < nd3; pt3[i2++] = 0);
}

static void Example34ThreaderAdd1(
ptrdiff_t nd4,
int64_t*restrict hull4,
int64_t*restrict pt4,
int64_t*restrict plus1,
int64_t carry2
) {
for (ptrdiff_t i3 = 0; i3 < nd4; ++i3) {
int64_t wrap2 = hull4[i3];
int64_t sum1 = pt4[i3]+plus1[i3]+carry2;
if (sum1 < wrap2) {
pt4[i3] = sum1;
carry2 = 0;
} else {
pt4[i3] = sum1-wrap2;
carry2 = 1;
}
}
}

static void* Example34ThreaderMain1(void* arg1) {
Example34ThreaderNode1* node1 = arg1;
Example34ThreaderTeam1* team2 = node1->team1;
ptrdiff_t nt2 = team2->nt1;
Example34ThreaderHub1* hub3 = team2->hub2;
Example34ThreaderNode1* nodes3 = team2->nodes2;
size_t role1 = node1-nodes3;
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
for (; ; ) {
Example34ThreaderTask1* task2 = node1->task1;
if (!task2) {
for (; __builtin_expect(pthread_cond_wait(&node1->cond2, &node1->mut2), 0); );
continue;
}
int64_t np2 = node1->np1;
if (np2 < 0) {
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
return 0;
}
node1->task1 = 0;
Example34ThreaderCallee1 callee2 = task2->callee1;
ptrdiff_t nd5 = task2->nd1;
int64_t pt5[4];
for (; np2; np2 = node1->np1) {
memcpy(pt5, node1->pt1, sizeof(pt5));
node1->np1 = np2-1;
Example34ThreaderInc1(nd5, task2->hull1, node1->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[role1/(sizeof(long)*8)] &= ~((long)1<<role1%(sizeof(long)*8));
ptrdiff_t offset2 = hub3->offset1;
long mask2 = hub3->mask1;
ptrdiff_t wrapped1 = 0;
for (; ; ) {
long hand1 = hub3->status1[offset2]&mask2;
if (!hand1) {
++offset2;
mask2 = -1;
continue;
}
ptrdiff_t target1 = offset2*(sizeof(long)*8)+__builtin_ctzl(hand1);
if (target1 == nt2) {
if (wrapped1) break;
offset2 = 0;
mask2 = -1;
wrapped1 = 1;
continue;
}
hand1 &= -hand1;
hub3->offset1 = offset2;
hub3->mask1 = mask2-hand1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
Example34ThreaderNode1* node2 = nodes3+target1;
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
for (np2 = node2->np1; np2; np2 = node2->np1) {
memcpy(pt5, node2->pt1, sizeof(pt5));
node2->np1 = np2-1;
Example34ThreaderInc1(nd5, task2->hull1, node2->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[offset2] &= ~hand1;
offset2 = hub3->offset1;
mask2 = hub3->mask1;
wrapped1 = 0;
}
ptrdiff_t pending2 = --hub3->pending1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
if (!pending2) for (; __builtin_expect(pthread_cond_signal(&hub3->cond1), 0); );
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
}

static void Example34ThreaderDestroy1(Example34ThreaderTeam1* team3) {
if (!team3) return;
Example34ThreaderNode1* nodes4 = team3->nodes2;
Example34ThreaderNode1* stop1 = nodes4+team3->unwind1.join1;
for (Example34ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_lock(&node3->mut2), 0); );
node3->np1 = -1;
node3->task1 = (Example34ThreaderTask1*)1;
for (; __builtin_expect(pthread_mutex_unlock(&node3->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node3->cond2), 0); );
}
for (Example34ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_join(node3->thr1, 0), 0); );
}
stop1 = nodes4+team3->unwind1.nodeConds1;
for (Example34ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_cond_destroy(&node3->cond2), 0); );
}
stop1 = nodes4+team3->unwind1.nodeMuts1;
for (Example34ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_destroy(&node3->mut2), 0); );
}
Example34ThreaderHub1* hub4 = team3->hub2;
if (team3->unwind1.hubCond1) {
for (; __builtin_expect(pthread_cond_destroy(&hub4->cond1), 0); );
}
if (team3->unwind1.hubMut1) {
for (; __builtin_expect(pthread_mutex_destroy(&hub4->mut1), 0); );
}
free(team3->unwind1.nodes1);
free(team3->unwind1.hub1);
free(team3);
}

static char* Example34ThreaderCreate1Up4(Example34ThreaderTeam1* team8, ptrdiff_t nt7) {
Example34ThreaderNode1* nodes5 = team8->nodes2;
for (Example34ThreaderNode1* node4 = nodes5; node4 != nodes5+nt7; ++node4) {
int err2 = pthread_mutex_init(&node4->mut2, 0);
if (__builtin_expect(err2, 0)) {
char* msg2 = Example34Errmsg1(__LINE__, "errno %d", err2);
team8->unwind1.nodeMuts1 = node4-nodes5;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg2;
}
node4->task1 = 0;
int err3 = pthread_cond_init(&node4->cond2, 0);
if (__builtin_expect(err3, 0)) {
char* msg3 = Example34Errmsg1(__LINE__, "errno %d", err3);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg3;
}
node4->team1 = team8;
int err4 = pthread_create(&node4->thr1, 0, Example34ThreaderMain1, node4);
if (__builtin_expect(err4, 0)) {
char* msg4 = Example34Errmsg1(__LINE__, "errno %d", err4);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5+1;
team8->unwind1.join1 = node4-nodes5;
return msg4;
}
}
team8->unwind1.nodeMuts1 = nt7;
team8->unwind1.nodeConds1 = nt7;
team8->unwind1.join1 = nt7;
return 0;
}

static char* Example34ThreaderCreate1Up3(Example34ThreaderTeam1* team7, ptrdiff_t nt6) {
Example34ThreaderHub1* hub5 = team7->hub2;
int err5 = pthread_mutex_init(&hub5->mut1, 0);
if (__builtin_expect(err5, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", err5);
}
team7->unwind1.hubMut1 = 1;
int err6 = pthread_cond_init(&hub5->cond1, 0);
if (__builtin_expect(err6, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", err6);
}
team7->unwind1.hubCond1 = 1;
return Example34ThreaderCreate1Up4(team7, nt6);
}

static char* Example34ThreaderCreate1Up2(Example34ThreaderTeam1* team6, ptrdiff_t nt5) {
size_t size2 = nt5*sizeof(Example34ThreaderNode1);
if (__builtin_expect(size2/sizeof(Example34ThreaderNode1) != (size_t)nt5, 0)) {
return Example34Errmsg1(__LINE__, "too many threads");
}
void* addr3 = malloc(size2+63);
if (__builtin_expect(!addr3, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", errno);
}
team6->unwind1.nodes1 = addr3;
team6->nodes2 = (void*)(((size_t)addr3+63)&-64);
return Example34ThreaderCreate1Up3(team6, nt5);
}

static char* Example34ThreaderCreate1Up1(Example34ThreaderTeam1* team5, ptrdiff_t nt4) {
team5->nt1 = nt4;
size_t size1 = sizeof(Example34ThreaderHub1);
size1 += sizeof(long)*((size_t)nt4/(sizeof(long)*8)+1);
size1 = (size1+63)&-64;
void* addr2 = malloc(size1+63);
if (__builtin_expect(!addr2, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", errno);
}
team5->unwind1.hub1 = addr2;
team5->hub2 = (void*)(((size_t)addr2+63)&-64);
return Example34ThreaderCreate1Up2(team5, nt4);
}

static char* Example34ThreaderCreate1(Example34ThreaderTeam1** team4, ptrdiff_t nt3) {
if (__builtin_expect(nt3 < 1, 0)) {
return Example34Errmsg1(__LINE__, "too few threads");
}
void* addr1 = calloc(1, sizeof(Example34ThreaderTeam1));
if (__builtin_expect(!addr1, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", errno);
}
char* err1 = Example34ThreaderCreate1Up1(addr1, nt3);
if (__builtin_expect(!!err1, 0)) {
Example34ThreaderDestroy1(addr1);
} else {
*team4 = addr1;
}
return err1;
}

static char* Example34ThreaderPthreadT1(
pthread_t* thr2,
Example34ThreaderTeam1* team9,
ptrdiff_t idx1
) {
if (__builtin_expect(idx1 < 0 || idx1 >= team9->nt1, 0)) {
return Example34Errmsg1(__LINE__, "bad thread idx");
}
*thr2 = team9->nodes2[idx1].thr1;
return 0;
}

static void Example34ThreaderDo1(Example34ThreaderTeam1* team10, Example34ThreaderTask1* task3) {
ptrdiff_t nd6 = task3->nd1;
if (nd6 < 1) return;
int64_t tot1 = task3->hull1[0];
for (ptrdiff_t i4 = 1; i4 < nd6; tot1 *= task3->hull1[i4++]);
ptrdiff_t nt8 = team10->nt1;
int64_t each1 = tot1/nt8;
ptrdiff_t more1 = tot1%nt8;
int64_t plus2[4];
Example34ThreaderPut1(nd6, task3->hull1, plus2, each1);
int64_t pt6[4] = {0};
Example34ThreaderHub1* hub6 = team10->hub2;
for (; __builtin_expect(pthread_mutex_lock(&hub6->mut1), 0); );
Example34ThreaderNode1* node5 = team10->nodes2;
for (ptrdiff_t i4 = 0; ; ++node5) {
for (; __builtin_expect(pthread_mutex_lock(&node5->mut2), 0); );
int64_t carry3 = i4 < more1;
node5->np1 = each1+carry3;
memcpy(node5->pt1, pt6, sizeof(pt6));
node5->task1 = task3;
for (; __builtin_expect(pthread_mutex_unlock(&node5->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node5->cond2), 0); );
if (++i4 == nt8) break;
Example34ThreaderAdd1(nd6, task3->hull1, pt6, plus2, carry3);
}
hub6->offset1 = 0;
hub6->mask1 = -1;
for (ptrdiff_t i4 = (size_t)nt8/(sizeof(long)*8); i4 >= 0; ) {
hub6->status1[i4--] = -1;
}
for (hub6->pending1 = nt8; hub6->pending1; ) {
for (; __builtin_expect(pthread_cond_wait(&hub6->cond1, &hub6->mut1), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&hub6->mut1), 0); );
}

static __m512 Example34Exp1(__m512 x1) {
x1 = _mm512_max_ps(x1, _mm512_set1_ps(-8.733654e+01f));
x1 = _mm512_min_ps(x1, _mm512_set1_ps(8.872284e+01f));
__m512 t1 = _mm512_mul_ps(x1, _mm512_set1_ps(1.442695e+00f));
__m512 r1 = _mm512_roundscale_ps(t1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512 f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-6.9314575e-01f), x1);
f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-1.4286068e-06f), f1);
__m512 g1 = _mm512_set1_ps(4.194439e-02f);
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(1.6800667e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(4.9999994e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.999569e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.9999964e-01f));
__m512i y1 = _mm512_slli_epi32(_mm512_cvtps_epi32(t1), 23);
return _mm512_castsi512_ps(_mm512_add_epi32(y1, _mm512_castps_si512(g1)));
}

static __m512 Example34Rsqrt1(__m512 x2) {
__m512 y2 = _mm512_rsqrt14_ps(x2);
__m512 z1 = _mm512_mul_ps(x2, y2);
__m512 a1 = _mm512_mul_ps(y2, _mm512_set1_ps(5e-01f));
__m512 b1 = _mm512_fnmadd_ps(y2, z1, _mm512_set1_ps(3e+00f));
return _mm512_mul_ps(a1, b1);
}

static void Example34BnSimplify1(
float*restrict means1,
float*restrict variances1,
float*restrict scales1,
float*restrict shifts1,
char*restrict mas1
) {
__m512 eps1 = _mm512_set1_ps(1e-05f);
__m512i xlo1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0);
__m512i xhi1 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8);
for (ptrdiff_t i5 = 0; i5 < 1; ++i5) {
__m512 va1 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*0+(ptrdiff_t)80*i5);
__m512 va2 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*1+(ptrdiff_t)80*i5);
__m512 va3 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*2+(ptrdiff_t)80*i5);
__m512 va4 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*3+(ptrdiff_t)80*i5);
__m512 va5 = _mm512_loadu_ps(variances1+(ptrdiff_t)16*4+(ptrdiff_t)80*i5);
__m512 rcp1 = Example34Rsqrt1(_mm512_add_ps(eps1, va1));
__m512 rcp2 = Example34Rsqrt1(_mm512_add_ps(eps1, va2));
__m512 rcp3 = Example34Rsqrt1(_mm512_add_ps(eps1, va3));
__m512 rcp4 = Example34Rsqrt1(_mm512_add_ps(eps1, va4));
__m512 rcp5 = Example34Rsqrt1(_mm512_add_ps(eps1, va5));
__m512 sc1 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*0+(ptrdiff_t)80*i5);
__m512 sc2 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*1+(ptrdiff_t)80*i5);
__m512 sc3 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*2+(ptrdiff_t)80*i5);
__m512 sc4 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*3+(ptrdiff_t)80*i5);
__m512 sc5 = _mm512_loadu_ps(scales1+(ptrdiff_t)16*4+(ptrdiff_t)80*i5);
__m512 mul1 = _mm512_mul_ps(rcp1, sc1);
__m512 mul2 = _mm512_mul_ps(rcp2, sc2);
__m512 mul3 = _mm512_mul_ps(rcp3, sc3);
__m512 mul4 = _mm512_mul_ps(rcp4, sc4);
__m512 mul5 = _mm512_mul_ps(rcp5, sc5);
__m512 me1 = _mm512_loadu_ps(means1+(ptrdiff_t)16*0+(ptrdiff_t)80*i5);
__m512 me2 = _mm512_loadu_ps(means1+(ptrdiff_t)16*1+(ptrdiff_t)80*i5);
__m512 me3 = _mm512_loadu_ps(means1+(ptrdiff_t)16*2+(ptrdiff_t)80*i5);
__m512 me4 = _mm512_loadu_ps(means1+(ptrdiff_t)16*3+(ptrdiff_t)80*i5);
__m512 me5 = _mm512_loadu_ps(means1+(ptrdiff_t)16*4+(ptrdiff_t)80*i5);
__m512 sh1 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*0+(ptrdiff_t)80*i5);
__m512 sh2 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*1+(ptrdiff_t)80*i5);
__m512 sh3 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*2+(ptrdiff_t)80*i5);
__m512 sh4 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*3+(ptrdiff_t)80*i5);
__m512 sh5 = _mm512_loadu_ps(shifts1+(ptrdiff_t)16*4+(ptrdiff_t)80*i5);
__m512 add1 = _mm512_fnmadd_ps(me1, mul1, sh1);
__m512 add2 = _mm512_fnmadd_ps(me2, mul2, sh2);
__m512 add3 = _mm512_fnmadd_ps(me3, mul3, sh3);
__m512 add4 = _mm512_fnmadd_ps(me4, mul4, sh4);
__m512 add5 = _mm512_fnmadd_ps(me5, mul5, sh5);
__m512 lo1 = _mm512_permutex2var_ps(mul1, xlo1, add1);
__m512 lo2 = _mm512_permutex2var_ps(mul2, xlo1, add2);
__m512 lo3 = _mm512_permutex2var_ps(mul3, xlo1, add3);
__m512 lo4 = _mm512_permutex2var_ps(mul4, xlo1, add4);
__m512 lo5 = _mm512_permutex2var_ps(mul5, xlo1, add5);
__m512 hi1 = _mm512_permutex2var_ps(mul1, xhi1, add1);
__m512 hi2 = _mm512_permutex2var_ps(mul2, xhi1, add2);
__m512 hi3 = _mm512_permutex2var_ps(mul3, xhi1, add3);
__m512 hi4 = _mm512_permutex2var_ps(mul4, xhi1, add4);
__m512 hi5 = _mm512_permutex2var_ps(mul5, xhi1, add5);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*0+(ptrdiff_t)640*i5, lo1);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*1+(ptrdiff_t)640*i5, hi1);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*2+(ptrdiff_t)640*i5, lo2);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*3+(ptrdiff_t)640*i5, hi2);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*4+(ptrdiff_t)640*i5, lo3);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*5+(ptrdiff_t)640*i5, hi3);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*6+(ptrdiff_t)640*i5, lo4);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*7+(ptrdiff_t)640*i5, hi4);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*8+(ptrdiff_t)640*i5, lo5);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*9+(ptrdiff_t)640*i5, hi5);
}
__m512 va6 = _mm512_maskz_loadu_ps(1023, variances1+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 rcp6 = Example34Rsqrt1(_mm512_add_ps(eps1, va6));
__m512 sc6 = _mm512_maskz_loadu_ps(1023, scales1+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 mul6 = _mm512_mul_ps(rcp6, sc6);
__m512 me6 = _mm512_maskz_loadu_ps(1023, means1+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 sh6 = _mm512_maskz_loadu_ps(1023, shifts1+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 add6 = _mm512_fnmadd_ps(me6, mul6, sh6);
__m512 lo6 = _mm512_permutex2var_ps(mul6, xlo1, add6);
__m512 hi6 = _mm512_permutex2var_ps(mul6, xhi1, add6);
_mm512_storeu_ps(mas1+(ptrdiff_t)64*0+(ptrdiff_t)640*1, lo6);
_mm512_mask_storeu_ps(mas1+(ptrdiff_t)64*1+(ptrdiff_t)640*1, 15, hi6);
}

static void Example34BnSimplify2(
float*restrict means2,
float*restrict variances2,
float*restrict scales2,
float*restrict shifts2,
char*restrict mas2
) {
__m512 eps2 = _mm512_set1_ps(1e-05f);
__m512i xlo2 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0);
__m512i xhi2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8);
for (ptrdiff_t i8 = 0; i8 < 1; ++i8) {
__m512 va7 = _mm512_loadu_ps(variances2+(ptrdiff_t)16*0+(ptrdiff_t)80*i8);
__m512 va8 = _mm512_loadu_ps(variances2+(ptrdiff_t)16*1+(ptrdiff_t)80*i8);
__m512 va9 = _mm512_loadu_ps(variances2+(ptrdiff_t)16*2+(ptrdiff_t)80*i8);
__m512 va10 = _mm512_loadu_ps(variances2+(ptrdiff_t)16*3+(ptrdiff_t)80*i8);
__m512 va11 = _mm512_loadu_ps(variances2+(ptrdiff_t)16*4+(ptrdiff_t)80*i8);
__m512 rcp7 = Example34Rsqrt1(_mm512_add_ps(eps2, va7));
__m512 rcp8 = Example34Rsqrt1(_mm512_add_ps(eps2, va8));
__m512 rcp9 = Example34Rsqrt1(_mm512_add_ps(eps2, va9));
__m512 rcp10 = Example34Rsqrt1(_mm512_add_ps(eps2, va10));
__m512 rcp11 = Example34Rsqrt1(_mm512_add_ps(eps2, va11));
__m512 sc7 = _mm512_loadu_ps(scales2+(ptrdiff_t)16*0+(ptrdiff_t)80*i8);
__m512 sc8 = _mm512_loadu_ps(scales2+(ptrdiff_t)16*1+(ptrdiff_t)80*i8);
__m512 sc9 = _mm512_loadu_ps(scales2+(ptrdiff_t)16*2+(ptrdiff_t)80*i8);
__m512 sc10 = _mm512_loadu_ps(scales2+(ptrdiff_t)16*3+(ptrdiff_t)80*i8);
__m512 sc11 = _mm512_loadu_ps(scales2+(ptrdiff_t)16*4+(ptrdiff_t)80*i8);
__m512 mul7 = _mm512_mul_ps(rcp7, sc7);
__m512 mul8 = _mm512_mul_ps(rcp8, sc8);
__m512 mul9 = _mm512_mul_ps(rcp9, sc9);
__m512 mul10 = _mm512_mul_ps(rcp10, sc10);
__m512 mul11 = _mm512_mul_ps(rcp11, sc11);
__m512 me7 = _mm512_loadu_ps(means2+(ptrdiff_t)16*0+(ptrdiff_t)80*i8);
__m512 me8 = _mm512_loadu_ps(means2+(ptrdiff_t)16*1+(ptrdiff_t)80*i8);
__m512 me9 = _mm512_loadu_ps(means2+(ptrdiff_t)16*2+(ptrdiff_t)80*i8);
__m512 me10 = _mm512_loadu_ps(means2+(ptrdiff_t)16*3+(ptrdiff_t)80*i8);
__m512 me11 = _mm512_loadu_ps(means2+(ptrdiff_t)16*4+(ptrdiff_t)80*i8);
__m512 sh7 = _mm512_loadu_ps(shifts2+(ptrdiff_t)16*0+(ptrdiff_t)80*i8);
__m512 sh8 = _mm512_loadu_ps(shifts2+(ptrdiff_t)16*1+(ptrdiff_t)80*i8);
__m512 sh9 = _mm512_loadu_ps(shifts2+(ptrdiff_t)16*2+(ptrdiff_t)80*i8);
__m512 sh10 = _mm512_loadu_ps(shifts2+(ptrdiff_t)16*3+(ptrdiff_t)80*i8);
__m512 sh11 = _mm512_loadu_ps(shifts2+(ptrdiff_t)16*4+(ptrdiff_t)80*i8);
__m512 add7 = _mm512_fnmadd_ps(me7, mul7, sh7);
__m512 add8 = _mm512_fnmadd_ps(me8, mul8, sh8);
__m512 add9 = _mm512_fnmadd_ps(me9, mul9, sh9);
__m512 add10 = _mm512_fnmadd_ps(me10, mul10, sh10);
__m512 add11 = _mm512_fnmadd_ps(me11, mul11, sh11);
__m512 lo7 = _mm512_permutex2var_ps(mul7, xlo2, add7);
__m512 lo8 = _mm512_permutex2var_ps(mul8, xlo2, add8);
__m512 lo9 = _mm512_permutex2var_ps(mul9, xlo2, add9);
__m512 lo10 = _mm512_permutex2var_ps(mul10, xlo2, add10);
__m512 lo11 = _mm512_permutex2var_ps(mul11, xlo2, add11);
__m512 hi7 = _mm512_permutex2var_ps(mul7, xhi2, add7);
__m512 hi8 = _mm512_permutex2var_ps(mul8, xhi2, add8);
__m512 hi9 = _mm512_permutex2var_ps(mul9, xhi2, add9);
__m512 hi10 = _mm512_permutex2var_ps(mul10, xhi2, add10);
__m512 hi11 = _mm512_permutex2var_ps(mul11, xhi2, add11);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*0+(ptrdiff_t)640*i8, lo7);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*1+(ptrdiff_t)640*i8, hi7);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*2+(ptrdiff_t)640*i8, lo8);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*3+(ptrdiff_t)640*i8, hi8);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*4+(ptrdiff_t)640*i8, lo9);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*5+(ptrdiff_t)640*i8, hi9);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*6+(ptrdiff_t)640*i8, lo10);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*7+(ptrdiff_t)640*i8, hi10);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*8+(ptrdiff_t)640*i8, lo11);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*9+(ptrdiff_t)640*i8, hi11);
}
__m512 va12 = _mm512_maskz_loadu_ps(16383, variances2+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 rcp12 = Example34Rsqrt1(_mm512_add_ps(eps2, va12));
__m512 sc12 = _mm512_maskz_loadu_ps(16383, scales2+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 mul12 = _mm512_mul_ps(rcp12, sc12);
__m512 me12 = _mm512_maskz_loadu_ps(16383, means2+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 sh12 = _mm512_maskz_loadu_ps(16383, shifts2+(ptrdiff_t)16*0+(ptrdiff_t)80*1);
__m512 add12 = _mm512_fnmadd_ps(me12, mul12, sh12);
__m512 lo12 = _mm512_permutex2var_ps(mul12, xlo2, add12);
__m512 hi12 = _mm512_permutex2var_ps(mul12, xhi2, add12);
_mm512_storeu_ps(mas2+(ptrdiff_t)64*0+(ptrdiff_t)640*1, lo12);
_mm512_mask_storeu_ps(mas2+(ptrdiff_t)64*1+(ptrdiff_t)640*1, 4095, hi12);
}

static void Example34Elwi1Callee1(Example34ThreaderTask1* task4, int64_t* pt7) {
char** tensors2 = task4->any1;
ptrdiff_t e1 = pt7[0];
ptrdiff_t c1 = pt7[1];
char*restrict ptr1 = tensors2[0]+(ptrdiff_t)320*e1+(ptrdiff_t)10880*c1;
char*restrict ptr2 = tensors2[1]+(ptrdiff_t)8*34*c1;
char*restrict ptr3 = tensors2[2]+(ptrdiff_t)320*e1+(ptrdiff_t)10880*c1;
char*restrict ptr4 = tensors2[3]+(ptrdiff_t)320*e1+(ptrdiff_t)10880*c1;
if (c1 < 2) {
for (ptrdiff_t i6 = 0; i6 < 11; ++i6) {
__m512 bnMul1 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*i6))[0]);
__m512 bnAdd1 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*i6))[1]);
__m512 bnMul2 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(1+(ptrdiff_t)3*i6))[0]);
__m512 bnAdd2 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(1+(ptrdiff_t)3*i6))[1]);
__m512 bnMul3 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(2+(ptrdiff_t)3*i6))[0]);
__m512 bnAdd3 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(2+(ptrdiff_t)3*i6))[1]);
for (ptrdiff_t j1 = 0; j1 < 5; ++j1) {
__m512 dat1 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
__m512 dat3 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)320+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
__m512 dat5 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)640+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
__m512 dat2 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
__m512 dat4 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)320+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
__m512 dat6 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)640+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1);
dat1 = _mm512_fmadd_ps(dat1, bnMul1, bnAdd1);
dat3 = _mm512_fmadd_ps(dat3, bnMul2, bnAdd2);
dat5 = _mm512_fmadd_ps(dat5, bnMul3, bnAdd3);
__mmask16 mask3 = _mm512_cmp_ps_mask(dat1, _mm512_setzero_ps(), _CMP_LT_OQ);
dat1 = _mm512_mask_mul_ps(dat1, mask3, dat1, _mm512_set1_ps(2.5e-01f));
__mmask16 mask4 = _mm512_cmp_ps_mask(dat3, _mm512_setzero_ps(), _CMP_LT_OQ);
dat3 = _mm512_mask_mul_ps(dat3, mask4, dat3, _mm512_set1_ps(2.5e-01f));
__mmask16 mask5 = _mm512_cmp_ps_mask(dat5, _mm512_setzero_ps(), _CMP_LT_OQ);
dat5 = _mm512_mask_mul_ps(dat5, mask5, dat5, _mm512_set1_ps(2.5e-01f));
dat1 = _mm512_add_ps(dat1, dat2);
dat3 = _mm512_add_ps(dat3, dat4);
dat5 = _mm512_add_ps(dat5, dat6);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)0+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1, 65535, dat1);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)320+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1, 65535, dat3);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)640+(ptrdiff_t)960*i6+(ptrdiff_t)64*j1, 65535, dat5);
}
}
__m512 bnMul4 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*11))[0]);
__m512 bnAdd4 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*11))[1]);
for (ptrdiff_t j2 = 0; j2 < 5; ++j2) {
__m512 dat7 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)960*11+(ptrdiff_t)64*j2);
__m512 dat8 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)960*11+(ptrdiff_t)64*j2);
dat7 = _mm512_fmadd_ps(dat7, bnMul4, bnAdd4);
__mmask16 mask6 = _mm512_cmp_ps_mask(dat7, _mm512_setzero_ps(), _CMP_LT_OQ);
dat7 = _mm512_mask_mul_ps(dat7, mask6, dat7, _mm512_set1_ps(2.5e-01f));
dat7 = _mm512_add_ps(dat7, dat8);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)0+(ptrdiff_t)960*11+(ptrdiff_t)64*j2, 65535, dat7);
}
return;
}
for (ptrdiff_t i7 = 0; i7 < 7; ++i7) {
__m512 bnMul5 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*i7))[0]);
__m512 bnAdd5 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*i7))[1]);
__m512 bnMul6 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(1+(ptrdiff_t)3*i7))[0]);
__m512 bnAdd6 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(1+(ptrdiff_t)3*i7))[1]);
__m512 bnMul7 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(2+(ptrdiff_t)3*i7))[0]);
__m512 bnAdd7 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(2+(ptrdiff_t)3*i7))[1]);
for (ptrdiff_t j3 = 0; j3 < 5; ++j3) {
__m512 dat9 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
__m512 dat11 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)320+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
__m512 dat13 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)640+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
__m512 dat10 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
__m512 dat12 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)320+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
__m512 dat14 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)640+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3);
dat9 = _mm512_fmadd_ps(dat9, bnMul5, bnAdd5);
dat11 = _mm512_fmadd_ps(dat11, bnMul6, bnAdd6);
dat13 = _mm512_fmadd_ps(dat13, bnMul7, bnAdd7);
__mmask16 mask7 = _mm512_cmp_ps_mask(dat9, _mm512_setzero_ps(), _CMP_LT_OQ);
dat9 = _mm512_mask_mul_ps(dat9, mask7, dat9, _mm512_set1_ps(2.5e-01f));
__mmask16 mask8 = _mm512_cmp_ps_mask(dat11, _mm512_setzero_ps(), _CMP_LT_OQ);
dat11 = _mm512_mask_mul_ps(dat11, mask8, dat11, _mm512_set1_ps(2.5e-01f));
__mmask16 mask9 = _mm512_cmp_ps_mask(dat13, _mm512_setzero_ps(), _CMP_LT_OQ);
dat13 = _mm512_mask_mul_ps(dat13, mask9, dat13, _mm512_set1_ps(2.5e-01f));
dat9 = _mm512_add_ps(dat9, dat10);
dat11 = _mm512_add_ps(dat11, dat12);
dat13 = _mm512_add_ps(dat13, dat14);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)0+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3, 65535, dat9);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)320+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3, 65535, dat11);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)640+(ptrdiff_t)960*i7+(ptrdiff_t)64*j3, 65535, dat13);
}
}
__m512 bnMul8 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*7))[0]);
__m512 bnAdd8 = _mm512_set1_ps(((float*)ptr2+(ptrdiff_t)2*(0+(ptrdiff_t)3*7))[1]);
for (ptrdiff_t j4 = 0; j4 < 5; ++j4) {
__m512 dat15 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)960*7+(ptrdiff_t)64*j4);
__m512 dat16 = _mm512_maskz_loadu_ps(65535, ptr3+(ptrdiff_t)0+(ptrdiff_t)960*7+(ptrdiff_t)64*j4);
dat15 = _mm512_fmadd_ps(dat15, bnMul8, bnAdd8);
__mmask16 mask10 = _mm512_cmp_ps_mask(dat15, _mm512_setzero_ps(), _CMP_LT_OQ);
dat15 = _mm512_mask_mul_ps(dat15, mask10, dat15, _mm512_set1_ps(2.5e-01f));
dat15 = _mm512_add_ps(dat15, dat16);
_mm512_mask_storeu_ps(ptr4+(ptrdiff_t)0+(ptrdiff_t)960*7+(ptrdiff_t)64*j4, 65535, dat15);
}
}

static void Example34Elwi1(Example34ThreaderTeam1* team13, char** tensors1) {
Example34ThreaderTask1 task5;
task5.callee1 = Example34Elwi1Callee1;
task5.any1 = tensors1;
task5.nd1 = 2;
task5.hull1[0] = 1;
task5.hull1[1] = 3;
Example34ThreaderDo1(team13, &task5);
}

static void Example34FcArrange1Callee1(Example34ThreaderTask1* task6, int64_t* pt8) {
char** tensors4 = task6->any1;
ptrdiff_t t2 = pt8[0];
char*restrict weights1 = tensors4[0]+(ptrdiff_t)460800*t2;
char*restrict biases1 = tensors4[1]+(ptrdiff_t)64*t2;
char*restrict bnPtr1 = tensors4[2];
char*restrict bnPtr2 = tensors4[3]+(ptrdiff_t)8*16*t2;
char*restrict weights2 = tensors4[4]+(ptrdiff_t)230400*t2;
char*restrict biases2 = tensors4[4]+(ptrdiff_t)1353600+(ptrdiff_t)64*t2;
if (t2 < 5) {
for (ptrdiff_t i9 = 0; i9 < 1; ++i9) {
for (ptrdiff_t j5 = 0; j5 < 2; ++j5) {
__m512 postMul1 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(0+16*i9+8*j5))[0]);
__m512 postMul2 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(1+16*i9+8*j5))[0]);
__m512 postMul3 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(2+16*i9+8*j5))[0]);
__m512 postMul4 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(3+16*i9+8*j5))[0]);
__m512 postMul5 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(4+16*i9+8*j5))[0]);
__m512 postMul6 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(5+16*i9+8*j5))[0]);
__m512 postMul7 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(6+16*i9+8*j5))[0]);
__m512 postMul8 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(7+16*i9+8*j5))[0]);
__m512 sum2 = _mm512_setzero_ps();
__m512 sum3 = _mm512_setzero_ps();
__m512 sum4 = _mm512_setzero_ps();
__m512 sum5 = _mm512_setzero_ps();
__m512 sum6 = _mm512_setzero_ps();
__m512 sum7 = _mm512_setzero_ps();
__m512 sum8 = _mm512_setzero_ps();
__m512 sum9 = _mm512_setzero_ps();
for (ptrdiff_t k1 = 0; k1 < 90; ++k1) {
__m512 preMul1 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k1))[0]);
__m512 preAdd1 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k1))[1]);
for (ptrdiff_t l1 = 0; l1 < 5; ++l1) {
__m512 wtLo1 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)0+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtHi1 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)28800+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtLo2 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)57600+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtHi2 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)86400+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
sum2 = _mm512_fmadd_ps(wtLo1, preAdd1, sum2);
sum3 = _mm512_fmadd_ps(wtHi1, preAdd1, sum3);
sum4 = _mm512_fmadd_ps(wtLo2, preAdd1, sum4);
sum5 = _mm512_fmadd_ps(wtHi2, preAdd1, sum5);
wtLo1 = _mm512_mul_ps(wtLo1, _mm512_mul_ps(postMul1, preMul1));
wtHi1 = _mm512_mul_ps(wtHi1, _mm512_mul_ps(postMul2, preMul1));
wtLo2 = _mm512_mul_ps(wtLo2, _mm512_mul_ps(postMul3, preMul1));
wtHi2 = _mm512_mul_ps(wtHi2, _mm512_mul_ps(postMul4, preMul1));
__m256i halfLo1 = _mm512_cvtps_ph(wtLo1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi1 = _mm512_cvtps_ph(wtHi1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfLo2 = _mm512_cvtps_ph(wtLo2, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi2 = _mm512_cvtps_ph(wtHi2, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield1 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo1), halfHi1, 1);
__m512i yield2 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo2), halfHi2, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)0+(ptrdiff_t)230400*i9+(ptrdiff_t)256*j5+(ptrdiff_t)2560*k1+(ptrdiff_t)512*l1, 65535, yield1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)64+(ptrdiff_t)230400*i9+(ptrdiff_t)256*j5+(ptrdiff_t)2560*k1+(ptrdiff_t)512*l1, 65535, yield2);
__m512 wtLo3 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)115200+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtHi3 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)144000+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtLo4 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)172800+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
__m512 wtHi4 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)201600+(ptrdiff_t)460800*i9+(ptrdiff_t)230400*j5+(ptrdiff_t)320*k1+(ptrdiff_t)64*l1);
sum6 = _mm512_fmadd_ps(wtLo3, preAdd1, sum6);
sum7 = _mm512_fmadd_ps(wtHi3, preAdd1, sum7);
sum8 = _mm512_fmadd_ps(wtLo4, preAdd1, sum8);
sum9 = _mm512_fmadd_ps(wtHi4, preAdd1, sum9);
wtLo3 = _mm512_mul_ps(wtLo3, _mm512_mul_ps(postMul5, preMul1));
wtHi3 = _mm512_mul_ps(wtHi3, _mm512_mul_ps(postMul6, preMul1));
wtLo4 = _mm512_mul_ps(wtLo4, _mm512_mul_ps(postMul7, preMul1));
wtHi4 = _mm512_mul_ps(wtHi4, _mm512_mul_ps(postMul8, preMul1));
__m256i halfLo3 = _mm512_cvtps_ph(wtLo3, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi3 = _mm512_cvtps_ph(wtHi3, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfLo4 = _mm512_cvtps_ph(wtLo4, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi4 = _mm512_cvtps_ph(wtHi4, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield3 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo3), halfHi3, 1);
__m512i yield4 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo4), halfHi4, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)128+(ptrdiff_t)230400*i9+(ptrdiff_t)256*j5+(ptrdiff_t)2560*k1+(ptrdiff_t)512*l1, 65535, yield3);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)192+(ptrdiff_t)230400*i9+(ptrdiff_t)256*j5+(ptrdiff_t)2560*k1+(ptrdiff_t)512*l1, 65535, yield4);
}
}
__m512i pmEven1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmOdd1 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm4Lo1 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0);
__m512i pm4Hi1 = _mm512_set_epi32(31, 30, 29, 28, 15, 14, 13, 12, 23, 22, 21, 20, 7, 6, 5, 4);
__m512 upper3 = _mm512_shuffle_f32x4(sum2, sum6, 238);
__m512 upper4 = _mm512_shuffle_f32x4(sum4, sum8, 238);
sum2 = _mm512_shuffle_f32x4(sum2, sum6, 68);
sum4 = _mm512_shuffle_f32x4(sum4, sum8, 68);
sum2 = _mm512_add_ps(sum2, upper3);
sum4 = _mm512_add_ps(sum4, upper4);
__m512 upper6 = _mm512_shuffle_f32x4(sum3, sum7, 238);
__m512 upper7 = _mm512_shuffle_f32x4(sum5, sum9, 238);
sum3 = _mm512_shuffle_f32x4(sum3, sum7, 68);
sum5 = _mm512_shuffle_f32x4(sum5, sum9, 68);
sum3 = _mm512_add_ps(sum3, upper6);
sum5 = _mm512_add_ps(sum5, upper7);
__m512 upper2 = _mm512_permutex2var_ps(sum2, pm4Hi1, sum4);
__m512 upper5 = _mm512_permutex2var_ps(sum3, pm4Hi1, sum5);
sum2 = _mm512_permutex2var_ps(sum2, pm4Lo1, sum4);
sum3 = _mm512_permutex2var_ps(sum3, pm4Lo1, sum5);
sum2 = _mm512_add_ps(sum2, upper2);
sum3 = _mm512_add_ps(sum3, upper5);
__m512 upper1 = _mm512_shuffle_ps(sum2, sum3, 238);
sum2 = _mm512_shuffle_ps(sum2, sum3, 68);
sum2 = _mm512_add_ps(sum2, upper1);
__m512 upper8 = _mm512_permutexvar_ps(pmOdd1, sum2);
sum2 = _mm512_permutexvar_ps(pmEven1, sum2);
sum2 = _mm512_add_ps(sum2, upper8);
__m512 bias1 = _mm512_maskz_loadu_ps(255, biases1+(ptrdiff_t)64*i9+(ptrdiff_t)32*j5);
bias1 = _mm512_add_ps(sum2, bias1);
__m512i pmMul1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmAdd1 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 mas3 = _mm512_maskz_loadu_ps(65535, bnPtr2+(ptrdiff_t)8*(0+16*i9+8*j5));
__m512 postMul9 = _mm512_permutexvar_ps(pmMul1, mas3);
__m512 postAdd1 = _mm512_permutexvar_ps(pmAdd1, mas3);
bias1 = _mm512_fmadd_ps(bias1, postMul9, postAdd1);
_mm512_mask_storeu_ps(biases2+(ptrdiff_t)64*i9+(ptrdiff_t)32*j5, 255, bias1);
}
}
return;
}
for (ptrdiff_t i10 = 0; i10 < 1; ++i10) {
for (ptrdiff_t j6 = 0; j6 < 1; ++j6) {
__m512 postMul10 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(0+14*i10+8*j6))[0]);
__m512 postMul11 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(1+14*i10+8*j6))[0]);
__m512 postMul12 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(2+14*i10+8*j6))[0]);
__m512 postMul13 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(3+14*i10+8*j6))[0]);
__m512 postMul14 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(4+14*i10+8*j6))[0]);
__m512 postMul15 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(5+14*i10+8*j6))[0]);
__m512 postMul16 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(6+14*i10+8*j6))[0]);
__m512 postMul17 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(7+14*i10+8*j6))[0]);
__m512 sum10 = _mm512_setzero_ps();
__m512 sum11 = _mm512_setzero_ps();
__m512 sum12 = _mm512_setzero_ps();
__m512 sum13 = _mm512_setzero_ps();
__m512 sum14 = _mm512_setzero_ps();
__m512 sum15 = _mm512_setzero_ps();
__m512 sum16 = _mm512_setzero_ps();
__m512 sum17 = _mm512_setzero_ps();
for (ptrdiff_t k2 = 0; k2 < 90; ++k2) {
__m512 preMul2 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k2))[0]);
__m512 preAdd2 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k2))[1]);
for (ptrdiff_t l2 = 0; l2 < 5; ++l2) {
__m512 wtLo5 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)0+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtHi5 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)28800+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtLo6 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)57600+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtHi6 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)86400+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
sum10 = _mm512_fmadd_ps(wtLo5, preAdd2, sum10);
sum11 = _mm512_fmadd_ps(wtHi5, preAdd2, sum11);
sum12 = _mm512_fmadd_ps(wtLo6, preAdd2, sum12);
sum13 = _mm512_fmadd_ps(wtHi6, preAdd2, sum13);
wtLo5 = _mm512_mul_ps(wtLo5, _mm512_mul_ps(postMul10, preMul2));
wtHi5 = _mm512_mul_ps(wtHi5, _mm512_mul_ps(postMul11, preMul2));
wtLo6 = _mm512_mul_ps(wtLo6, _mm512_mul_ps(postMul12, preMul2));
wtHi6 = _mm512_mul_ps(wtHi6, _mm512_mul_ps(postMul13, preMul2));
__m256i halfLo5 = _mm512_cvtps_ph(wtLo5, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi5 = _mm512_cvtps_ph(wtHi5, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfLo6 = _mm512_cvtps_ph(wtLo6, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi6 = _mm512_cvtps_ph(wtHi6, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield5 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo5), halfHi5, 1);
__m512i yield6 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo6), halfHi6, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)0+(ptrdiff_t)230400*i10+(ptrdiff_t)256*j6+(ptrdiff_t)2240*k2+(ptrdiff_t)448*l2, 65535, yield5);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)64+(ptrdiff_t)230400*i10+(ptrdiff_t)256*j6+(ptrdiff_t)2240*k2+(ptrdiff_t)448*l2, 65535, yield6);
__m512 wtLo7 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)115200+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtHi7 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)144000+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtLo8 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)172800+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
__m512 wtHi8 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)201600+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*j6+(ptrdiff_t)320*k2+(ptrdiff_t)64*l2);
sum14 = _mm512_fmadd_ps(wtLo7, preAdd2, sum14);
sum15 = _mm512_fmadd_ps(wtHi7, preAdd2, sum15);
sum16 = _mm512_fmadd_ps(wtLo8, preAdd2, sum16);
sum17 = _mm512_fmadd_ps(wtHi8, preAdd2, sum17);
wtLo7 = _mm512_mul_ps(wtLo7, _mm512_mul_ps(postMul14, preMul2));
wtHi7 = _mm512_mul_ps(wtHi7, _mm512_mul_ps(postMul15, preMul2));
wtLo8 = _mm512_mul_ps(wtLo8, _mm512_mul_ps(postMul16, preMul2));
wtHi8 = _mm512_mul_ps(wtHi8, _mm512_mul_ps(postMul17, preMul2));
__m256i halfLo7 = _mm512_cvtps_ph(wtLo7, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi7 = _mm512_cvtps_ph(wtHi7, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfLo8 = _mm512_cvtps_ph(wtLo8, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi8 = _mm512_cvtps_ph(wtHi8, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield7 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo7), halfHi7, 1);
__m512i yield8 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo8), halfHi8, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)128+(ptrdiff_t)230400*i10+(ptrdiff_t)256*j6+(ptrdiff_t)2240*k2+(ptrdiff_t)448*l2, 65535, yield7);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)192+(ptrdiff_t)230400*i10+(ptrdiff_t)256*j6+(ptrdiff_t)2240*k2+(ptrdiff_t)448*l2, 65535, yield8);
}
}
__m512i pmEven2 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmOdd2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm4Lo2 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0);
__m512i pm4Hi2 = _mm512_set_epi32(31, 30, 29, 28, 15, 14, 13, 12, 23, 22, 21, 20, 7, 6, 5, 4);
__m512 upper11 = _mm512_shuffle_f32x4(sum10, sum14, 238);
__m512 upper12 = _mm512_shuffle_f32x4(sum12, sum16, 238);
sum10 = _mm512_shuffle_f32x4(sum10, sum14, 68);
sum12 = _mm512_shuffle_f32x4(sum12, sum16, 68);
sum10 = _mm512_add_ps(sum10, upper11);
sum12 = _mm512_add_ps(sum12, upper12);
__m512 upper14 = _mm512_shuffle_f32x4(sum11, sum15, 238);
__m512 upper15 = _mm512_shuffle_f32x4(sum13, sum17, 238);
sum11 = _mm512_shuffle_f32x4(sum11, sum15, 68);
sum13 = _mm512_shuffle_f32x4(sum13, sum17, 68);
sum11 = _mm512_add_ps(sum11, upper14);
sum13 = _mm512_add_ps(sum13, upper15);
__m512 upper10 = _mm512_permutex2var_ps(sum10, pm4Hi2, sum12);
__m512 upper13 = _mm512_permutex2var_ps(sum11, pm4Hi2, sum13);
sum10 = _mm512_permutex2var_ps(sum10, pm4Lo2, sum12);
sum11 = _mm512_permutex2var_ps(sum11, pm4Lo2, sum13);
sum10 = _mm512_add_ps(sum10, upper10);
sum11 = _mm512_add_ps(sum11, upper13);
__m512 upper9 = _mm512_shuffle_ps(sum10, sum11, 238);
sum10 = _mm512_shuffle_ps(sum10, sum11, 68);
sum10 = _mm512_add_ps(sum10, upper9);
__m512 upper16 = _mm512_permutexvar_ps(pmOdd2, sum10);
sum10 = _mm512_permutexvar_ps(pmEven2, sum10);
sum10 = _mm512_add_ps(sum10, upper16);
__m512 bias2 = _mm512_maskz_loadu_ps(255, biases1+(ptrdiff_t)56*i10+(ptrdiff_t)32*j6);
bias2 = _mm512_add_ps(sum10, bias2);
__m512i pmMul2 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmAdd2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 mas4 = _mm512_maskz_loadu_ps(65535, bnPtr2+(ptrdiff_t)8*(0+14*i10+8*j6));
__m512 postMul18 = _mm512_permutexvar_ps(pmMul2, mas4);
__m512 postAdd2 = _mm512_permutexvar_ps(pmAdd2, mas4);
bias2 = _mm512_fmadd_ps(bias2, postMul18, postAdd2);
_mm512_mask_storeu_ps(biases2+(ptrdiff_t)56*i10+(ptrdiff_t)32*j6, 255, bias2);
}
__m512 postMul19 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(0+14*i10+8*1))[0]);
__m512 postMul20 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(1+14*i10+8*1))[0]);
__m512 postMul21 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(2+14*i10+8*1))[0]);
__m512 postMul22 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(3+14*i10+8*1))[0]);
__m512 postMul23 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(4+14*i10+8*1))[0]);
__m512 postMul24 = _mm512_set1_ps(((float*)bnPtr2+(ptrdiff_t)2*(5+14*i10+8*1))[0]);
__m512 sum18 = _mm512_setzero_ps();
__m512 sum19 = _mm512_setzero_ps();
__m512 sum20 = _mm512_setzero_ps();
__m512 sum21 = _mm512_setzero_ps();
__m512 sum22 = _mm512_setzero_ps();
__m512 sum23 = _mm512_setzero_ps();
for (ptrdiff_t k3 = 0; k3 < 90; ++k3) {
__m512 preMul3 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k3))[0]);
__m512 preAdd3 = _mm512_set1_ps(((float*)bnPtr1+(ptrdiff_t)2*(0+(ptrdiff_t)1*k3))[1]);
for (ptrdiff_t l3 = 0; l3 < 5; ++l3) {
__m512 wtLo9 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)0+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
__m512 wtHi9 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)28800+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
__m512 wtLo10 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)57600+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
__m512 wtHi10 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)86400+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
sum18 = _mm512_fmadd_ps(wtLo9, preAdd3, sum18);
sum19 = _mm512_fmadd_ps(wtHi9, preAdd3, sum19);
sum20 = _mm512_fmadd_ps(wtLo10, preAdd3, sum20);
sum21 = _mm512_fmadd_ps(wtHi10, preAdd3, sum21);
wtLo9 = _mm512_mul_ps(wtLo9, _mm512_mul_ps(postMul19, preMul3));
wtHi9 = _mm512_mul_ps(wtHi9, _mm512_mul_ps(postMul20, preMul3));
wtLo10 = _mm512_mul_ps(wtLo10, _mm512_mul_ps(postMul21, preMul3));
wtHi10 = _mm512_mul_ps(wtHi10, _mm512_mul_ps(postMul22, preMul3));
__m256i halfLo9 = _mm512_cvtps_ph(wtLo9, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi9 = _mm512_cvtps_ph(wtHi9, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfLo10 = _mm512_cvtps_ph(wtLo10, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi10 = _mm512_cvtps_ph(wtHi10, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield9 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo9), halfHi9, 1);
__m512i yield10 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo10), halfHi10, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)0+(ptrdiff_t)230400*i10+(ptrdiff_t)256*1+(ptrdiff_t)2240*k3+(ptrdiff_t)448*l3, 65535, yield9);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)64+(ptrdiff_t)230400*i10+(ptrdiff_t)256*1+(ptrdiff_t)2240*k3+(ptrdiff_t)448*l3, 65535, yield10);
__m512 wtLo11 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)115200+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
__m512 wtHi11 = _mm512_maskz_loadu_ps(65535, weights1+(ptrdiff_t)144000+(ptrdiff_t)403200*i10+(ptrdiff_t)230400*1+(ptrdiff_t)320*k3+(ptrdiff_t)64*l3);
sum22 = _mm512_fmadd_ps(wtLo11, preAdd3, sum22);
sum23 = _mm512_fmadd_ps(wtHi11, preAdd3, sum23);
wtLo11 = _mm512_mul_ps(wtLo11, _mm512_mul_ps(postMul23, preMul3));
wtHi11 = _mm512_mul_ps(wtHi11, _mm512_mul_ps(postMul24, preMul3));
__m256i halfLo11 = _mm512_cvtps_ph(wtLo11, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m256i halfHi11 = _mm512_cvtps_ph(wtHi11, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512i yield11 = _mm512_inserti64x4(_mm512_castsi256_si512(halfLo11), halfHi11, 1);
_mm512_mask_storeu_epi32(weights2+(ptrdiff_t)128+(ptrdiff_t)230400*i10+(ptrdiff_t)256*1+(ptrdiff_t)2240*k3+(ptrdiff_t)448*l3, 65535, yield11);
}
}
__m512i pmEven3 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmOdd3 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512i pm4Lo3 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0);
__m512i pm4Hi3 = _mm512_set_epi32(31, 30, 29, 28, 15, 14, 13, 12, 23, 22, 21, 20, 7, 6, 5, 4);
__m512 upper19 = _mm512_shuffle_f32x4(sum18, sum22, 238);
__m512 upper20 = _mm512_shuffle_f32x4(sum20, sum20, 14);
sum18 = _mm512_shuffle_f32x4(sum18, sum22, 68);
sum18 = _mm512_add_ps(sum18, upper19);
sum20 = _mm512_add_ps(sum20, upper20);
__m512 upper22 = _mm512_shuffle_f32x4(sum19, sum23, 238);
__m512 upper23 = _mm512_shuffle_f32x4(sum21, sum21, 14);
sum19 = _mm512_shuffle_f32x4(sum19, sum23, 68);
sum19 = _mm512_add_ps(sum19, upper22);
sum21 = _mm512_add_ps(sum21, upper23);
__m512 upper18 = _mm512_permutex2var_ps(sum18, pm4Hi3, sum20);
__m512 upper21 = _mm512_permutex2var_ps(sum19, pm4Hi3, sum21);
sum18 = _mm512_permutex2var_ps(sum18, pm4Lo3, sum20);
sum19 = _mm512_permutex2var_ps(sum19, pm4Lo3, sum21);
sum18 = _mm512_add_ps(sum18, upper18);
sum19 = _mm512_add_ps(sum19, upper21);
__m512 upper17 = _mm512_shuffle_ps(sum18, sum19, 238);
sum18 = _mm512_shuffle_ps(sum18, sum19, 68);
sum18 = _mm512_add_ps(sum18, upper17);
__m512 upper24 = _mm512_permutexvar_ps(pmOdd3, sum18);
sum18 = _mm512_permutexvar_ps(pmEven3, sum18);
sum18 = _mm512_add_ps(sum18, upper24);
__m512 bias3 = _mm512_maskz_loadu_ps(63, biases1+(ptrdiff_t)56*i10+(ptrdiff_t)32*1);
bias3 = _mm512_add_ps(sum18, bias3);
__m512i pmMul3 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmAdd3 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 mas5 = _mm512_maskz_loadu_ps(4095, bnPtr2+(ptrdiff_t)8*(0+14*i10+8*1));
__m512 postMul25 = _mm512_permutexvar_ps(pmMul3, mas5);
__m512 postAdd3 = _mm512_permutexvar_ps(pmAdd3, mas5);
bias3 = _mm512_fmadd_ps(bias3, postMul25, postAdd3);
_mm512_mask_storeu_ps(biases2+(ptrdiff_t)56*i10+(ptrdiff_t)32*1, 63, bias3);
}
}

static void Example34FcArrange1(Example34ThreaderTeam1* team15, char** tensors3) {
Example34ThreaderTask1 task7;
task7.callee1 = Example34FcArrange1Callee1;
task7.any1 = tensors3;
task7.nd1 = 1;
task7.hull1[0] = 6;
Example34ThreaderDo1(team15, &task7);
}

static void Example34FcApply1Callee1(Example34ThreaderTask1* task8, int64_t* pt9) {
char** tensors6 = task8->any1;
ptrdiff_t t3 = pt9[0];
char*restrict wtPtr1 = tensors6[0]+(ptrdiff_t)230400*t3;
char*restrict biasPtr1 = tensors6[0]+(ptrdiff_t)1353600+(ptrdiff_t)64*t3;
char*restrict datPtr1 = tensors6[1];
char*restrict datPtr2 = tensors6[2]+(ptrdiff_t)64*t3;
char*restrict bnPtr3 = tensors6[3]+(ptrdiff_t)8*16*t3;
char*restrict datPtr3 = tensors6[4]+(ptrdiff_t)64*t3;
if (t3 < 5) {
for (ptrdiff_t i11 = 0; i11 < 1; ++i11) {
__m512 sum24 = _mm512_setzero_ps();
__m512 sum25 = _mm512_setzero_ps();
__m512 sum26 = _mm512_setzero_ps();
__m512 sum27 = _mm512_setzero_ps();
__m512 sum28 = _mm512_setzero_ps();
__m512 sum29 = _mm512_setzero_ps();
__m512 sum30 = _mm512_setzero_ps();
__m512 sum31 = _mm512_setzero_ps();
__m512 sum32 = _mm512_setzero_ps();
__m512 sum33 = _mm512_setzero_ps();
__m512 sum34 = _mm512_setzero_ps();
__m512 sum35 = _mm512_setzero_ps();
__m512 sum36 = _mm512_setzero_ps();
__m512 sum37 = _mm512_setzero_ps();
__m512 sum38 = _mm512_setzero_ps();
__m512 sum39 = _mm512_setzero_ps();
for (ptrdiff_t j7 = 0; j7 < 450; ++j7) {
__m512i wts1 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)0+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512 dat18 = _mm512_maskz_loadu_ps(65535, datPtr1+(ptrdiff_t)0+(ptrdiff_t)64*j7);
__m512i wts2 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)64+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512i wts3 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)128+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512i wts4 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)192+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512 wtLo12 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts1));
__m512 wtHi12 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts1, 1));
__m512 wtLo13 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts2));
__m512 wtHi13 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts2, 1));
__m512 wtLo14 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts3));
__m512 wtHi14 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts3, 1));
__m512 wtLo15 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts4));
__m512 wtHi15 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts4, 1));
sum24 = _mm512_fmadd_ps(wtLo12, dat18, sum24);
sum25 = _mm512_fmadd_ps(wtHi12, dat18, sum25);
sum26 = _mm512_fmadd_ps(wtLo13, dat18, sum26);
sum27 = _mm512_fmadd_ps(wtHi13, dat18, sum27);
sum28 = _mm512_fmadd_ps(wtLo14, dat18, sum28);
sum29 = _mm512_fmadd_ps(wtHi14, dat18, sum29);
sum30 = _mm512_fmadd_ps(wtLo15, dat18, sum30);
sum31 = _mm512_fmadd_ps(wtHi15, dat18, sum31);
__m512i wts5 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)256+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512i wts6 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)320+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512i wts7 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)384+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512i wts8 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)448+(ptrdiff_t)230400*i11+(ptrdiff_t)512*j7);
__m512 wtLo16 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts5));
__m512 wtHi16 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts5, 1));
__m512 wtLo17 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts6));
__m512 wtHi17 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts6, 1));
__m512 wtLo18 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts7));
__m512 wtHi18 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts7, 1));
__m512 wtLo19 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts8));
__m512 wtHi19 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts8, 1));
sum32 = _mm512_fmadd_ps(wtLo16, dat18, sum32);
sum33 = _mm512_fmadd_ps(wtHi16, dat18, sum33);
sum34 = _mm512_fmadd_ps(wtLo17, dat18, sum34);
sum35 = _mm512_fmadd_ps(wtHi17, dat18, sum35);
sum36 = _mm512_fmadd_ps(wtLo18, dat18, sum36);
sum37 = _mm512_fmadd_ps(wtHi18, dat18, sum37);
sum38 = _mm512_fmadd_ps(wtLo19, dat18, sum38);
sum39 = _mm512_fmadd_ps(wtHi19, dat18, sum39);
}
__m512 bias4 = _mm512_maskz_loadu_ps(65535, biasPtr1+(ptrdiff_t)0+(ptrdiff_t)64*i11);
__m512 dat17 = _mm512_maskz_loadu_ps(65535, datPtr2+(ptrdiff_t)0+(ptrdiff_t)64*i11);
__m512i pmMul4 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmAdd4 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 masLo1 = _mm512_loadu_ps(bnPtr3+(ptrdiff_t)8*(0+16*i11));
__m512 masHi1 = _mm512_maskz_loadu_ps(65535, bnPtr3+(ptrdiff_t)8*(0+16*i11)+(ptrdiff_t)64);
__m512 bnMul9 = _mm512_permutex2var_ps(masLo1, pmMul4, masHi1);
__m512 bnAdd9 = _mm512_permutex2var_ps(masLo1, pmAdd4, masHi1);
__m512i pm1Lo1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
__m512i pm1Hi1 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
__m512i pm4Lo4 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0);
__m512i pm4Hi4 = _mm512_set_epi32(31, 30, 29, 28, 15, 14, 13, 12, 23, 22, 21, 20, 7, 6, 5, 4);
__m512 upper28 = _mm512_shuffle_f32x4(sum24, sum32, 238);
__m512 upper29 = _mm512_shuffle_f32x4(sum28, sum36, 238);
sum24 = _mm512_shuffle_f32x4(sum24, sum32, 68);
sum28 = _mm512_shuffle_f32x4(sum28, sum36, 68);
sum24 = _mm512_add_ps(sum24, upper28);
sum28 = _mm512_add_ps(sum28, upper29);
__m512 upper31 = _mm512_shuffle_f32x4(sum26, sum34, 238);
__m512 upper32 = _mm512_shuffle_f32x4(sum30, sum38, 238);
sum26 = _mm512_shuffle_f32x4(sum26, sum34, 68);
sum30 = _mm512_shuffle_f32x4(sum30, sum38, 68);
sum26 = _mm512_add_ps(sum26, upper31);
sum30 = _mm512_add_ps(sum30, upper32);
__m512 upper27 = _mm512_permutex2var_ps(sum24, pm4Hi4, sum28);
__m512 upper30 = _mm512_permutex2var_ps(sum26, pm4Hi4, sum30);
sum24 = _mm512_permutex2var_ps(sum24, pm4Lo4, sum28);
sum26 = _mm512_permutex2var_ps(sum26, pm4Lo4, sum30);
sum24 = _mm512_add_ps(sum24, upper27);
sum26 = _mm512_add_ps(sum26, upper30);
__m512 upper35 = _mm512_shuffle_f32x4(sum25, sum33, 238);
__m512 upper36 = _mm512_shuffle_f32x4(sum29, sum37, 238);
sum25 = _mm512_shuffle_f32x4(sum25, sum33, 68);
sum29 = _mm512_shuffle_f32x4(sum29, sum37, 68);
sum25 = _mm512_add_ps(sum25, upper35);
sum29 = _mm512_add_ps(sum29, upper36);
__m512 upper38 = _mm512_shuffle_f32x4(sum27, sum35, 238);
__m512 upper39 = _mm512_shuffle_f32x4(sum31, sum39, 238);
sum27 = _mm512_shuffle_f32x4(sum27, sum35, 68);
sum31 = _mm512_shuffle_f32x4(sum31, sum39, 68);
sum27 = _mm512_add_ps(sum27, upper38);
sum31 = _mm512_add_ps(sum31, upper39);
__m512 upper34 = _mm512_permutex2var_ps(sum25, pm4Hi4, sum29);
__m512 upper37 = _mm512_permutex2var_ps(sum27, pm4Hi4, sum31);
sum25 = _mm512_permutex2var_ps(sum25, pm4Lo4, sum29);
sum27 = _mm512_permutex2var_ps(sum27, pm4Lo4, sum31);
sum25 = _mm512_add_ps(sum25, upper34);
sum27 = _mm512_add_ps(sum27, upper37);
__m512 upper26 = _mm512_shuffle_ps(sum24, sum26, 238);
__m512 upper33 = _mm512_shuffle_ps(sum25, sum27, 238);
sum24 = _mm512_shuffle_ps(sum24, sum26, 68);
sum25 = _mm512_shuffle_ps(sum25, sum27, 68);
sum24 = _mm512_add_ps(sum24, upper26);
sum25 = _mm512_add_ps(sum25, upper33);
__m512 upper25 = _mm512_permutex2var_ps(sum24, pm1Hi1, sum25);
sum24 = _mm512_permutex2var_ps(sum24, pm1Lo1, sum25);
sum24 = _mm512_add_ps(sum24, upper25);
sum24 = _mm512_add_ps(sum24, bias4);
sum24 = _mm512_max_ps(_mm512_setzero_ps(), sum24);
sum24 = _mm512_add_ps(sum24, dat17);
sum24 = _mm512_fmadd_ps(sum24, bnMul9, bnAdd9);
_mm512_mask_storeu_ps(datPtr3+(ptrdiff_t)0+(ptrdiff_t)64*i11, 65535, sum24);
}
return;
}
for (ptrdiff_t i12 = 0; i12 < 1; ++i12) {
__m512 sum40 = _mm512_setzero_ps();
__m512 sum41 = _mm512_setzero_ps();
__m512 sum42 = _mm512_setzero_ps();
__m512 sum43 = _mm512_setzero_ps();
__m512 sum44 = _mm512_setzero_ps();
__m512 sum45 = _mm512_setzero_ps();
__m512 sum46 = _mm512_setzero_ps();
__m512 sum47 = _mm512_setzero_ps();
__m512 sum48 = _mm512_setzero_ps();
__m512 sum49 = _mm512_setzero_ps();
__m512 sum50 = _mm512_setzero_ps();
__m512 sum51 = _mm512_setzero_ps();
__m512 sum52 = _mm512_setzero_ps();
__m512 sum53 = _mm512_setzero_ps();
for (ptrdiff_t j8 = 0; j8 < 450; ++j8) {
__m512i wts9 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)0+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512 dat20 = _mm512_maskz_loadu_ps(65535, datPtr1+(ptrdiff_t)0+(ptrdiff_t)64*j8);
__m512i wts10 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)64+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512i wts11 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)128+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512i wts12 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)192+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512 wtLo20 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts9));
__m512 wtHi20 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts9, 1));
__m512 wtLo21 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts10));
__m512 wtHi21 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts10, 1));
__m512 wtLo22 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts11));
__m512 wtHi22 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts11, 1));
__m512 wtLo23 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts12));
__m512 wtHi23 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts12, 1));
sum40 = _mm512_fmadd_ps(wtLo20, dat20, sum40);
sum41 = _mm512_fmadd_ps(wtHi20, dat20, sum41);
sum42 = _mm512_fmadd_ps(wtLo21, dat20, sum42);
sum43 = _mm512_fmadd_ps(wtHi21, dat20, sum43);
sum44 = _mm512_fmadd_ps(wtLo22, dat20, sum44);
sum45 = _mm512_fmadd_ps(wtHi22, dat20, sum45);
sum46 = _mm512_fmadd_ps(wtLo23, dat20, sum46);
sum47 = _mm512_fmadd_ps(wtHi23, dat20, sum47);
__m512i wts13 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)256+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512i wts14 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)320+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512i wts15 = _mm512_maskz_loadu_epi32(65535, wtPtr1+(ptrdiff_t)384+(ptrdiff_t)230400*i12+(ptrdiff_t)448*j8);
__m512 wtLo24 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts13));
__m512 wtHi24 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts13, 1));
__m512 wtLo25 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts14));
__m512 wtHi25 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts14, 1));
__m512 wtLo26 = _mm512_cvtph_ps(_mm512_castsi512_si256(wts15));
__m512 wtHi26 = _mm512_cvtph_ps(_mm512_extracti64x4_epi64(wts15, 1));
sum48 = _mm512_fmadd_ps(wtLo24, dat20, sum48);
sum49 = _mm512_fmadd_ps(wtHi24, dat20, sum49);
sum50 = _mm512_fmadd_ps(wtLo25, dat20, sum50);
sum51 = _mm512_fmadd_ps(wtHi25, dat20, sum51);
sum52 = _mm512_fmadd_ps(wtLo26, dat20, sum52);
sum53 = _mm512_fmadd_ps(wtHi26, dat20, sum53);
}
__m512 bias5 = _mm512_maskz_loadu_ps(16383, biasPtr1+(ptrdiff_t)0+(ptrdiff_t)56*i12);
__m512 dat19 = _mm512_maskz_loadu_ps(16383, datPtr2+(ptrdiff_t)0+(ptrdiff_t)56*i12);
__m512i pmMul5 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmAdd5 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 masLo2 = _mm512_loadu_ps(bnPtr3+(ptrdiff_t)8*(0+14*i12));
__m512 masHi2 = _mm512_maskz_loadu_ps(4095, bnPtr3+(ptrdiff_t)8*(0+14*i12)+(ptrdiff_t)64);
__m512 bnMul10 = _mm512_permutex2var_ps(masLo2, pmMul5, masHi2);
__m512 bnAdd10 = _mm512_permutex2var_ps(masLo2, pmAdd5, masHi2);
__m512i pm1Lo2 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
__m512i pm1Hi2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
__m512i pm4Lo5 = _mm512_set_epi32(27, 26, 25, 24, 11, 10, 9, 8, 19, 18, 17, 16, 3, 2, 1, 0);
__m512i pm4Hi5 = _mm512_set_epi32(31, 30, 29, 28, 15, 14, 13, 12, 23, 22, 21, 20, 7, 6, 5, 4);
__m512 upper43 = _mm512_shuffle_f32x4(sum40, sum48, 238);
__m512 upper44 = _mm512_shuffle_f32x4(sum44, sum52, 238);
sum40 = _mm512_shuffle_f32x4(sum40, sum48, 68);
sum44 = _mm512_shuffle_f32x4(sum44, sum52, 68);
sum40 = _mm512_add_ps(sum40, upper43);
sum44 = _mm512_add_ps(sum44, upper44);
__m512 upper46 = _mm512_shuffle_f32x4(sum42, sum50, 238);
__m512 upper47 = _mm512_shuffle_f32x4(sum46, sum46, 14);
sum42 = _mm512_shuffle_f32x4(sum42, sum50, 68);
sum42 = _mm512_add_ps(sum42, upper46);
sum46 = _mm512_add_ps(sum46, upper47);
__m512 upper42 = _mm512_permutex2var_ps(sum40, pm4Hi5, sum44);
__m512 upper45 = _mm512_permutex2var_ps(sum42, pm4Hi5, sum46);
sum40 = _mm512_permutex2var_ps(sum40, pm4Lo5, sum44);
sum42 = _mm512_permutex2var_ps(sum42, pm4Lo5, sum46);
sum40 = _mm512_add_ps(sum40, upper42);
sum42 = _mm512_add_ps(sum42, upper45);
__m512 upper50 = _mm512_shuffle_f32x4(sum41, sum49, 238);
__m512 upper51 = _mm512_shuffle_f32x4(sum45, sum53, 238);
sum41 = _mm512_shuffle_f32x4(sum41, sum49, 68);
sum45 = _mm512_shuffle_f32x4(sum45, sum53, 68);
sum41 = _mm512_add_ps(sum41, upper50);
sum45 = _mm512_add_ps(sum45, upper51);
__m512 upper53 = _mm512_shuffle_f32x4(sum43, sum51, 238);
__m512 upper54 = _mm512_shuffle_f32x4(sum47, sum47, 14);
sum43 = _mm512_shuffle_f32x4(sum43, sum51, 68);
sum43 = _mm512_add_ps(sum43, upper53);
sum47 = _mm512_add_ps(sum47, upper54);
__m512 upper49 = _mm512_permutex2var_ps(sum41, pm4Hi5, sum45);
__m512 upper52 = _mm512_permutex2var_ps(sum43, pm4Hi5, sum47);
sum41 = _mm512_permutex2var_ps(sum41, pm4Lo5, sum45);
sum43 = _mm512_permutex2var_ps(sum43, pm4Lo5, sum47);
sum41 = _mm512_add_ps(sum41, upper49);
sum43 = _mm512_add_ps(sum43, upper52);
__m512 upper41 = _mm512_shuffle_ps(sum40, sum42, 238);
__m512 upper48 = _mm512_shuffle_ps(sum41, sum43, 238);
sum40 = _mm512_shuffle_ps(sum40, sum42, 68);
sum41 = _mm512_shuffle_ps(sum41, sum43, 68);
sum40 = _mm512_add_ps(sum40, upper41);
sum41 = _mm512_add_ps(sum41, upper48);
__m512 upper40 = _mm512_permutex2var_ps(sum40, pm1Hi2, sum41);
sum40 = _mm512_permutex2var_ps(sum40, pm1Lo2, sum41);
sum40 = _mm512_add_ps(sum40, upper40);
sum40 = _mm512_add_ps(sum40, bias5);
sum40 = _mm512_max_ps(_mm512_setzero_ps(), sum40);
sum40 = _mm512_add_ps(sum40, dat19);
sum40 = _mm512_fmadd_ps(sum40, bnMul10, bnAdd10);
_mm512_mask_storeu_ps(datPtr3+(ptrdiff_t)0+(ptrdiff_t)56*i12, 16383, sum40);
}
}

static void Example34FcApply1(Example34ThreaderTeam1* team16, char** tensors5) {
Example34ThreaderTask1 task9;
task9.callee1 = Example34FcApply1Callee1;
task9.any1 = tensors5;
task9.nd1 = 1;
task9.hull1[0] = 6;
Example34ThreaderDo1(team16, &task9);
}

struct Example34Net {
char* alloc1;
char* align1;
};

void Example34NetDestroy(Example34Net* net2) {
free(net2->alloc1);
free(net2);
}

char* Example34NetCreate(
Example34Net** net1,
Example34Params* params1,
ptrdiff_t threads1
) {
if (__builtin_expect(!__builtin_cpu_supports("avx512f"), 0)) {
return Example34Errmsg1(__LINE__, "CPU does not support AVX512F");
}
char* alloc3 = malloc(1355575);
if (__builtin_expect(!alloc3, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", errno);
}
char* align3 = (void*)(((size_t)alloc3+63)&-64);
char* tmpAlloc1 = malloc(1583);
if (__builtin_expect(!tmpAlloc1, 0)) {
char* msg6 = Example34Errmsg1(__LINE__, "errno %d", errno);
free(alloc3);
return msg6;
}
char* tmpAlign1 = (void*)(((size_t)tmpAlloc1+63)&-64);
Example34ThreaderTeam1* team14 = 0;
char* err8 = Example34ThreaderCreate1(&team14, threads1);
if (__builtin_expect(!!err8, 0)) {
free(tmpAlloc1);
free(alloc3);
return err8;
}
{
Example34BnSimplify1(
params1->bn1Means,
params1->bn1Variances,
params1->bn1Scales,
params1->bn1Shifts,
align3+0
);
}
{
Example34BnSimplify2(
params1->bn4Means,
params1->bn4Variances,
params1->bn4Scales,
params1->bn4Shifts,
align3+768
);
Example34BnSimplify1(
params1->bn2Means,
params1->bn2Variances,
params1->bn2Scales,
params1->bn2Shifts,
tmpAlign1+0
);
Example34BnSimplify2(
params1->bn3Means,
params1->bn3Variances,
params1->bn3Scales,
params1->bn3Shifts,
tmpAlign1+768
);
char* tensors9[] = {
(char*)params1->fcWeights,
(char*)params1->fcBiases,
tmpAlign1+0,
tmpAlign1+768,
align3+1536
};
Example34FcArrange1(team14, tensors9);
}
Example34ThreaderDestroy1(team14);
free(tmpAlloc1);
Example34Net* net5 = malloc(sizeof(Example34Net));
if (__builtin_expect(!net5, 0)) {
char* msg7 = Example34Errmsg1(__LINE__, "errno %d", errno);
free(alloc3);
return msg7;
}
net5->alloc1 = alloc3;
net5->align1 = align3;
*net1 = net5;
return 0;
}

struct Example34Engine {
Example34Net* net3;
Example34ThreaderTeam1* team11;
char* alloc2;
char* align2;
};

char* Example34EnginePthreadT(
Example34Engine* eng2,
ptrdiff_t idx2,
pthread_t* to1
) {
return Example34ThreaderPthreadT1(to1, eng2->team11, idx2);
}

void Example34EngineDestroy(Example34Engine* eng3) {
Example34ThreaderDestroy1(eng3->team11);
free(eng3->alloc2);
free(eng3);
}

char* Example34EngineCreate(
Example34Engine** eng4,
Example34Net* net4,
ptrdiff_t threads2
) {
Example34Engine* eng5 = malloc(sizeof(Example34Engine));
if (__builtin_expect(!eng5, 0)) {
return Example34Errmsg1(__LINE__, "errno %d", errno);
}
char* alloc4 = malloc(28863);
if (__builtin_expect(!alloc4, 0)) {
char* msg5 = Example34Errmsg1(__LINE__, "errno %d", errno);
free(eng5);
return msg5;
}
eng5->alloc2 = alloc4;
eng5->align2 = (void*)(((size_t)alloc4+63)&-64);
char* err7 = Example34ThreaderCreate1(&eng5->team11, threads2);
if (__builtin_expect(!!err7, 0)) {
free(eng5);
free(alloc4);
return err7;
}
eng5->net3 = net4;
*eng4 = eng5;
return 0;
}

void Example34EngineInference(
Example34Engine* eng1,
float* bn4Data,
float* in1Data,
float* in2Data,
float* in3Data
) {
char* netAlign1 = eng1->net3->align1;
Example34ThreaderTeam1* team12 = eng1->team11;
char* align4 = eng1->align2;
{
char* tensors7[] = {
(char*)in1Data,
netAlign1+0,
(char*)in2Data,
align4+0
};
Example34Elwi1(team12, tensors7);
}
{
char* tensors8[] = {
netAlign1+1536,
align4+0,
(char*)in3Data,
netAlign1+768,
(char*)bn4Data
};
Example34FcApply1(team12, tensors8);
}
}

// End of file.

Top