NN-512

Back

Index

Files

Top || Input graph file

Config Prefix=Example22 Platform=AVX512Float32 L1DataCachePerThread=32KiB L2CachePerThreadExL1=960KiB L3CachePerThreadExL1L2=1408KiB
Input ToTensor=in Channels=185 Height=35 Width=116
Pooling FromTensor=in ToTensor=out Kind=Avg2x2Stride2 PaddingH=1 PaddingW=0
Output FromTensor=out

Top || Output Example22.h file

#pragma once

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <pthread.h>
#include <stddef.h>

#ifdef __cplusplus
extern "C" { /**/
#endif

// All weights, biases, and other trained parameters are passed into
// the initialization code through the Params struct that is declared
// just below this comment. The corresponding struct definition can be
// found near the end of this header file.
//
// Each field of the Params struct is an array of float that holds a
// parameter tensor in NCHW format with no padding. The struct fields
// are ordered by name, lexically bytewise. If you concatenate all the
// trained parameter tensors to a file in this same format and order
// you can load the struct as follows (error checking omitted here):
//
// size_t size = sizeof(Example22Params);
// Example22Params* to = malloc(size);
// FILE* from = fopen("ParamsFile", "r");
// fread(to, size, 1, from);
// fclose(from);
//
// Be careful to match endianness (and floating point format).

typedef struct Example22Params Example22Params;

// The Net contains weights, biases, and other trained parameters in a
// form that enables efficient inference. It is created from the input
// parameter struct without modifying that struct. The input parameter
// struct is no longer needed once the Net has been created. Threads
// that are used to create the Net are temporary (in particular, those
// threads are not used for inference).
//
// Example22Params* params = malloc(sizeof(Example22Params));
//
// ... Load params (read from a file, perhaps) ...
//
// Example22Net* net; // For example, 4 threads:
// char* err = Example22NetCreate(&net, params, 4);
// free(params);
//
// if (err) { // Nonzero err indicates failure; net is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Perform all inference that depends on net ...
//
// Example22NetDestroy(net);
//
// The Net can be shared and reused without restriction because it is
// never modified (not even temporarily) after being created. The Net
// should be destroyed (to free memory) once all dependent inference
// is complete.

typedef struct Example22Net Example22Net;

char* Example22NetCreate(
Example22Net**,
Example22Params*,
ptrdiff_t threads
);

void Example22NetDestroy(Example22Net*);

// An Engine performs inference. It contains inference threads, scratch
// memory, and a pointer to the Net. Any number of Engines can share the
// same Net (and perform inference in parallel) because the Net is never
// modified. For best performance the number of inference threads should
// not exceed the number of CPU cores.
//
// Example22Net* net;
//
// ... Create net ...
//
// Example22Engine* engine; // For example, 4 inference threads:
// char* err = Example22EngineCreate(&engine, net, 4);
//
// if (err) { // Nonzero err means failure; engine is unmodified.
// printf("%s\n", err); // Explain the failure, add a newline.
// free(err); // Free the error string to avoid a memory leak.
//
// ... Destroy net ...
//
// exit(1); // Exit, or propagate the failure some other way.
// }
//
// ... Use the POSIX threads API to adjust engine's threads ...
// ... Use engine to perform inference (dependent on net) ...
//
// Example22EngineDestroy(engine); // Terminate threads, free memory.
//
// ... Destroy net ...
//
// The POSIX threads API can be used to adjust an Engine's threads. If
// an Engine has N threads, those threads are indexed 0, 1, 2, ..., N-1
// and a pthread_t identifier is associated with each index. To set the
// CPU affinity mask for the first inference thread, for example:
//
// pthread_t thread; // The first thread has index 0:
// char* err = Example22EnginePthreadT(engine, 0, &thread);
//
// assert(!err); // Can only fail if the thread index is invalid.
//
// pthread_setaffinity_np(thread, ...); // Details omitted.
//
// The inference function reads floats from (one or more) input tensors
// and writes floats to (one or more) output tensors. All the input and
// output tensors are owned (allocated and freed) by the caller and are
// in CHW format, 32-bit floating point, fully packed (in other words,
// C has the largest pitch, W has the smallest pitch, and there is no
// padding anywhere).
//
// float* inData = malloc(sizeof(float)*185*35*116);
// float* outData = malloc(sizeof(float)*185*18*58);
//
// for (...) { // Reuse the input and output tensors.
//
// ... Write the input floats ...
//
// Example22EngineInference( // This function cannot fail.
// engine, // Pass an Engine as the first argument.
// inData, // The tensor arguments are sorted by name.
// outData
// );
//
// ... Read the output floats ...
//
// }
//
// free(inData);
// free(outData);
//
// The tensor parameters of the inference function are ordered by name,
// lexically bytewise. In other words, the function parameters have been
// sorted by name using Go's "<" string comparison operator (a bytewise
// lexical string sort).

typedef struct Example22Engine Example22Engine;

char* Example22EngineCreate(
Example22Engine**,
Example22Net*,
ptrdiff_t threads
);

char* Example22EnginePthreadT(
Example22Engine*,
ptrdiff_t threadIdx,
pthread_t* to
);

void Example22EngineInference(
Example22Engine*,
float* inData,
float* outData
);

void Example22EngineDestroy(Example22Engine*);

// The fields of the following struct have been sorted by name using
// Go's "<" string comparison operator (bytewise lexical string sort).
// Tensor dimensions are NxCxHxW where N is the outermost/slowest and
// W is the innermost/fastest. There is no padding anywhere.

struct Example22Params {
} __attribute__((packed));

#ifdef __cplusplus
/**/ }
#endif

// End of file.

Top || Output Example22.c file

// To build an object file:
// gcc -c -w -std=c99 -pthread -Ofast -mavx512f Example22.c

// NN-512 (https://NN-512.com)
//
// Copyright (C) 2019 [
// 37ef ced3 3727 60b4
// 3c29 f9c6 dc30 d518
// f4f3 4106 6964 cab4
// a06f c1a3 83fd 090e
// ]
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <errno.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <immintrin.h>

#include "Example22.h"

static char* Example22Errmsg1(ptrdiff_t lineNum1, char* format1, ...) {
char* msg1 = malloc(277);
int step1 = sprintf(msg1, "Example22: line %td: ", lineNum1);
va_list ap1;
va_start(ap1, format1);
vsnprintf(msg1+step1, 277-step1, format1, ap1);
va_end(ap1);
return msg1;
}

typedef struct Example22ThreaderTask1 Example22ThreaderTask1;
typedef void (*Example22ThreaderCallee1)(Example22ThreaderTask1*, int64_t*);
typedef struct Example22ThreaderHub1 Example22ThreaderHub1;
typedef struct Example22ThreaderNode1 Example22ThreaderNode1;
typedef struct Example22ThreaderUnwind1 Example22ThreaderUnwind1;
typedef struct Example22ThreaderTeam1 Example22ThreaderTeam1;

struct Example22ThreaderTask1 {
Example22ThreaderCallee1 callee1;
void* any1;
ptrdiff_t nd1;
int64_t hull1[4];
};

struct Example22ThreaderHub1 {
pthread_mutex_t mut1;
pthread_cond_t cond1;
ptrdiff_t pending1;
ptrdiff_t offset1;
long mask1;
long status1[];
};

struct Example22ThreaderNode1 {
pthread_mutex_t mut2;
int64_t np1;
int64_t pt1[4];
Example22ThreaderTask1* task1;
pthread_cond_t cond2;
Example22ThreaderTeam1* team1;
pthread_t thr1;
} __attribute__((aligned(64)));

struct Example22ThreaderUnwind1 {
ptrdiff_t join1;
ptrdiff_t nodeConds1;
ptrdiff_t nodeMuts1;
ptrdiff_t hubCond1;
ptrdiff_t hubMut1;
void* nodes1;
void* hub1;
};

struct Example22ThreaderTeam1 {
ptrdiff_t nt1;
Example22ThreaderHub1* hub2;
Example22ThreaderNode1* nodes2;
Example22ThreaderUnwind1 unwind1;
};

static void Example22ThreaderInc1(
ptrdiff_t nd2,
int64_t*restrict hull2,
int64_t*restrict pt2
) {
for (ptrdiff_t i1 = 0; i1 < nd2; ++i1) {
int64_t elem1 = pt2[i1];
if (++elem1 == hull2[i1]) {
pt2[i1] = 0;
} else {
pt2[i1] = elem1;
break;
}
}
}

static void Example22ThreaderPut1(
ptrdiff_t nd3,
int64_t*restrict hull3,
int64_t*restrict pt3,
int64_t val1
) {
ptrdiff_t i2 = 0;
for (; i2 < nd3 && val1; ) {
int64_t wrap1 = hull3[i2];
int64_t carry1 = val1/wrap1;
pt3[i2++] = val1-carry1*wrap1;
val1 = carry1;
}
for (; i2 < nd3; pt3[i2++] = 0);
}

static void Example22ThreaderAdd1(
ptrdiff_t nd4,
int64_t*restrict hull4,
int64_t*restrict pt4,
int64_t*restrict plus1,
int64_t carry2
) {
for (ptrdiff_t i3 = 0; i3 < nd4; ++i3) {
int64_t wrap2 = hull4[i3];
int64_t sum1 = pt4[i3]+plus1[i3]+carry2;
if (sum1 < wrap2) {
pt4[i3] = sum1;
carry2 = 0;
} else {
pt4[i3] = sum1-wrap2;
carry2 = 1;
}
}
}

static void* Example22ThreaderMain1(void* arg1) {
Example22ThreaderNode1* node1 = arg1;
Example22ThreaderTeam1* team2 = node1->team1;
ptrdiff_t nt2 = team2->nt1;
Example22ThreaderHub1* hub3 = team2->hub2;
Example22ThreaderNode1* nodes3 = team2->nodes2;
size_t role1 = node1-nodes3;
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
for (; ; ) {
Example22ThreaderTask1* task2 = node1->task1;
if (!task2) {
for (; __builtin_expect(pthread_cond_wait(&node1->cond2, &node1->mut2), 0); );
continue;
}
int64_t np2 = node1->np1;
if (np2 < 0) {
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
return 0;
}
node1->task1 = 0;
Example22ThreaderCallee1 callee2 = task2->callee1;
ptrdiff_t nd5 = task2->nd1;
int64_t pt5[4];
for (; np2; np2 = node1->np1) {
memcpy(pt5, node1->pt1, sizeof(pt5));
node1->np1 = np2-1;
Example22ThreaderInc1(nd5, task2->hull1, node1->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node1->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[role1/(sizeof(long)*8)] &= ~((long)1<<role1%(sizeof(long)*8));
ptrdiff_t offset2 = hub3->offset1;
long mask2 = hub3->mask1;
ptrdiff_t wrapped1 = 0;
for (; ; ) {
long hand1 = hub3->status1[offset2]&mask2;
if (!hand1) {
++offset2;
mask2 = -1;
continue;
}
ptrdiff_t target1 = offset2*(sizeof(long)*8)+__builtin_ctzl(hand1);
if (target1 == nt2) {
if (wrapped1) break;
offset2 = 0;
mask2 = -1;
wrapped1 = 1;
continue;
}
hand1 &= -hand1;
hub3->offset1 = offset2;
hub3->mask1 = mask2-hand1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
Example22ThreaderNode1* node2 = nodes3+target1;
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
for (np2 = node2->np1; np2; np2 = node2->np1) {
memcpy(pt5, node2->pt1, sizeof(pt5));
node2->np1 = np2-1;
Example22ThreaderInc1(nd5, task2->hull1, node2->pt1);
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
callee2(task2, pt5);
for (; __builtin_expect(pthread_mutex_lock(&node2->mut2), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&node2->mut2), 0); );
for (; __builtin_expect(pthread_mutex_lock(&hub3->mut1), 0); );
hub3->status1[offset2] &= ~hand1;
offset2 = hub3->offset1;
mask2 = hub3->mask1;
wrapped1 = 0;
}
ptrdiff_t pending2 = --hub3->pending1;
for (; __builtin_expect(pthread_mutex_unlock(&hub3->mut1), 0); );
if (!pending2) for (; __builtin_expect(pthread_cond_signal(&hub3->cond1), 0); );
for (; __builtin_expect(pthread_mutex_lock(&node1->mut2), 0); );
}
}

static void Example22ThreaderDestroy1(Example22ThreaderTeam1* team3) {
if (!team3) return;
Example22ThreaderNode1* nodes4 = team3->nodes2;
Example22ThreaderNode1* stop1 = nodes4+team3->unwind1.join1;
for (Example22ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_lock(&node3->mut2), 0); );
node3->np1 = -1;
node3->task1 = (Example22ThreaderTask1*)1;
for (; __builtin_expect(pthread_mutex_unlock(&node3->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node3->cond2), 0); );
}
for (Example22ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_join(node3->thr1, 0), 0); );
}
stop1 = nodes4+team3->unwind1.nodeConds1;
for (Example22ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_cond_destroy(&node3->cond2), 0); );
}
stop1 = nodes4+team3->unwind1.nodeMuts1;
for (Example22ThreaderNode1* node3 = nodes4; node3 != stop1; ++node3) {
for (; __builtin_expect(pthread_mutex_destroy(&node3->mut2), 0); );
}
Example22ThreaderHub1* hub4 = team3->hub2;
if (team3->unwind1.hubCond1) {
for (; __builtin_expect(pthread_cond_destroy(&hub4->cond1), 0); );
}
if (team3->unwind1.hubMut1) {
for (; __builtin_expect(pthread_mutex_destroy(&hub4->mut1), 0); );
}
free(team3->unwind1.nodes1);
free(team3->unwind1.hub1);
free(team3);
}

static char* Example22ThreaderCreate1Up4(Example22ThreaderTeam1* team8, ptrdiff_t nt7) {
Example22ThreaderNode1* nodes5 = team8->nodes2;
for (Example22ThreaderNode1* node4 = nodes5; node4 != nodes5+nt7; ++node4) {
int err2 = pthread_mutex_init(&node4->mut2, 0);
if (__builtin_expect(err2, 0)) {
char* msg2 = Example22Errmsg1(__LINE__, "errno %d", err2);
team8->unwind1.nodeMuts1 = node4-nodes5;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg2;
}
node4->task1 = 0;
int err3 = pthread_cond_init(&node4->cond2, 0);
if (__builtin_expect(err3, 0)) {
char* msg3 = Example22Errmsg1(__LINE__, "errno %d", err3);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5;
team8->unwind1.join1 = node4-nodes5;
return msg3;
}
node4->team1 = team8;
int err4 = pthread_create(&node4->thr1, 0, Example22ThreaderMain1, node4);
if (__builtin_expect(err4, 0)) {
char* msg4 = Example22Errmsg1(__LINE__, "errno %d", err4);
team8->unwind1.nodeMuts1 = node4-nodes5+1;
team8->unwind1.nodeConds1 = node4-nodes5+1;
team8->unwind1.join1 = node4-nodes5;
return msg4;
}
}
team8->unwind1.nodeMuts1 = nt7;
team8->unwind1.nodeConds1 = nt7;
team8->unwind1.join1 = nt7;
return 0;
}

static char* Example22ThreaderCreate1Up3(Example22ThreaderTeam1* team7, ptrdiff_t nt6) {
Example22ThreaderHub1* hub5 = team7->hub2;
int err5 = pthread_mutex_init(&hub5->mut1, 0);
if (__builtin_expect(err5, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", err5);
}
team7->unwind1.hubMut1 = 1;
int err6 = pthread_cond_init(&hub5->cond1, 0);
if (__builtin_expect(err6, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", err6);
}
team7->unwind1.hubCond1 = 1;
return Example22ThreaderCreate1Up4(team7, nt6);
}

static char* Example22ThreaderCreate1Up2(Example22ThreaderTeam1* team6, ptrdiff_t nt5) {
size_t size2 = nt5*sizeof(Example22ThreaderNode1);
if (__builtin_expect(size2/sizeof(Example22ThreaderNode1) != (size_t)nt5, 0)) {
return Example22Errmsg1(__LINE__, "too many threads");
}
void* addr3 = malloc(size2+63);
if (__builtin_expect(!addr3, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", errno);
}
team6->unwind1.nodes1 = addr3;
team6->nodes2 = (void*)(((size_t)addr3+63)&-64);
return Example22ThreaderCreate1Up3(team6, nt5);
}

static char* Example22ThreaderCreate1Up1(Example22ThreaderTeam1* team5, ptrdiff_t nt4) {
team5->nt1 = nt4;
size_t size1 = sizeof(Example22ThreaderHub1);
size1 += sizeof(long)*((size_t)nt4/(sizeof(long)*8)+1);
size1 = (size1+63)&-64;
void* addr2 = malloc(size1+63);
if (__builtin_expect(!addr2, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", errno);
}
team5->unwind1.hub1 = addr2;
team5->hub2 = (void*)(((size_t)addr2+63)&-64);
return Example22ThreaderCreate1Up2(team5, nt4);
}

static char* Example22ThreaderCreate1(Example22ThreaderTeam1** team4, ptrdiff_t nt3) {
if (__builtin_expect(nt3 < 1, 0)) {
return Example22Errmsg1(__LINE__, "too few threads");
}
void* addr1 = calloc(1, sizeof(Example22ThreaderTeam1));
if (__builtin_expect(!addr1, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", errno);
}
char* err1 = Example22ThreaderCreate1Up1(addr1, nt3);
if (__builtin_expect(!!err1, 0)) {
Example22ThreaderDestroy1(addr1);
} else {
*team4 = addr1;
}
return err1;
}

static char* Example22ThreaderPthreadT1(
pthread_t* thr2,
Example22ThreaderTeam1* team9,
ptrdiff_t idx1
) {
if (__builtin_expect(idx1 < 0 || idx1 >= team9->nt1, 0)) {
return Example22Errmsg1(__LINE__, "bad thread idx");
}
*thr2 = team9->nodes2[idx1].thr1;
return 0;
}

static void Example22ThreaderDo1(Example22ThreaderTeam1* team10, Example22ThreaderTask1* task3) {
ptrdiff_t nd6 = task3->nd1;
if (nd6 < 1) return;
int64_t tot1 = task3->hull1[0];
for (ptrdiff_t i4 = 1; i4 < nd6; tot1 *= task3->hull1[i4++]);
ptrdiff_t nt8 = team10->nt1;
int64_t each1 = tot1/nt8;
ptrdiff_t more1 = tot1%nt8;
int64_t plus2[4];
Example22ThreaderPut1(nd6, task3->hull1, plus2, each1);
int64_t pt6[4] = {0};
Example22ThreaderHub1* hub6 = team10->hub2;
for (; __builtin_expect(pthread_mutex_lock(&hub6->mut1), 0); );
Example22ThreaderNode1* node5 = team10->nodes2;
for (ptrdiff_t i4 = 0; ; ++node5) {
for (; __builtin_expect(pthread_mutex_lock(&node5->mut2), 0); );
int64_t carry3 = i4 < more1;
node5->np1 = each1+carry3;
memcpy(node5->pt1, pt6, sizeof(pt6));
node5->task1 = task3;
for (; __builtin_expect(pthread_mutex_unlock(&node5->mut2), 0); );
for (; __builtin_expect(pthread_cond_signal(&node5->cond2), 0); );
if (++i4 == nt8) break;
Example22ThreaderAdd1(nd6, task3->hull1, pt6, plus2, carry3);
}
hub6->offset1 = 0;
hub6->mask1 = -1;
for (ptrdiff_t i4 = (size_t)nt8/(sizeof(long)*8); i4 >= 0; ) {
hub6->status1[i4--] = -1;
}
for (hub6->pending1 = nt8; hub6->pending1; ) {
for (; __builtin_expect(pthread_cond_wait(&hub6->cond1, &hub6->mut1), 0); );
}
for (; __builtin_expect(pthread_mutex_unlock(&hub6->mut1), 0); );
}

static __m512 Example22Exp1(__m512 x1) {
x1 = _mm512_max_ps(x1, _mm512_set1_ps(-8.733654e+01f));
x1 = _mm512_min_ps(x1, _mm512_set1_ps(8.872284e+01f));
__m512 t1 = _mm512_mul_ps(x1, _mm512_set1_ps(1.442695e+00f));
__m512 r1 = _mm512_roundscale_ps(t1, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
__m512 f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-6.9314575e-01f), x1);
f1 = _mm512_fmadd_ps(r1, _mm512_set1_ps(-1.4286068e-06f), f1);
__m512 g1 = _mm512_set1_ps(4.194439e-02f);
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(1.6800667e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(4.9999994e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.999569e-01f));
g1 = _mm512_fmadd_ps(g1, f1, _mm512_set1_ps(9.9999964e-01f));
__m512i y1 = _mm512_slli_epi32(_mm512_cvtps_epi32(t1), 23);
return _mm512_castsi512_ps(_mm512_add_epi32(y1, _mm512_castps_si512(g1)));
}

static __m512 Example22Rsqrt1(__m512 x2) {
__m512 y2 = _mm512_rsqrt14_ps(x2);
__m512 z1 = _mm512_mul_ps(x2, y2);
__m512 a1 = _mm512_mul_ps(y2, _mm512_set1_ps(5e-01f));
__m512 b1 = _mm512_fnmadd_ps(y2, z1, _mm512_set1_ps(3e+00f));
return _mm512_mul_ps(a1, b1);
}

static void Example22Twopl1Callee1(Example22ThreaderTask1* task4, int64_t* pt7) {
char** tensors2 = task4->any1;
ptrdiff_t b2 = pt7[0];
ptrdiff_t c1 = pt7[1];
char*restrict ptr1 = tensors2[0]-(ptrdiff_t)464+(ptrdiff_t)16704*b2+(ptrdiff_t)32480*c1;
char*restrict ptr2 = tensors2[1]+(ptrdiff_t)4176*b2+(ptrdiff_t)8352*c1;
if (c1 < 92) {
for (ptrdiff_t i5 = 0; i5 < 2; ++i5) {
for (ptrdiff_t k1 = 0; k1 < 3; ++k1) {
__m512 dat1 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i5+(ptrdiff_t)928*0+(ptrdiff_t)128*k1);
__m512 dat2 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i5+(ptrdiff_t)928*0+(ptrdiff_t)128*k1);
__m512i pmLo1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi1 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi1 = _mm512_permutex2var_ps(dat1, pmHi1, dat2);
dat1 = _mm512_permutex2var_ps(dat1, pmLo1, dat2);
dat1 = _mm512_add_ps(dat1, hi1);
dat1 = _mm512_mul_ps(dat1, _mm512_set1_ps(5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i5+(ptrdiff_t)232*0+(ptrdiff_t)64*k1, 65535, dat1);
}
__m512 dat3 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i5+(ptrdiff_t)928*0+(ptrdiff_t)128*3);
__m512 dat4 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i5+(ptrdiff_t)928*0+(ptrdiff_t)128*3);
__m512i pmLo2 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi2 = _mm512_permutex2var_ps(dat3, pmHi2, dat4);
dat3 = _mm512_permutex2var_ps(dat3, pmLo2, dat4);
dat3 = _mm512_add_ps(dat3, hi2);
dat3 = _mm512_mul_ps(dat3, _mm512_set1_ps(5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i5+(ptrdiff_t)232*0+(ptrdiff_t)64*3, 1023, dat3);
for (ptrdiff_t j1 = 1; j1 < 18; ++j1) {
for (ptrdiff_t k2 = 0; k2 < 3; ++k2) {
__m512 dat5 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*k2);
__m512 dat6 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*k2);
__m512 dat7 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*k2);
__m512 dat8 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*k2);
dat5 = _mm512_add_ps(dat5, dat7);
dat6 = _mm512_add_ps(dat6, dat8);
__m512i pmLo3 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi3 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi3 = _mm512_permutex2var_ps(dat5, pmHi3, dat6);
dat5 = _mm512_permutex2var_ps(dat5, pmLo3, dat6);
dat5 = _mm512_add_ps(dat5, hi3);
dat5 = _mm512_mul_ps(dat5, _mm512_set1_ps(2.5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i5+(ptrdiff_t)232*j1+(ptrdiff_t)64*k2, 65535, dat5);
}
__m512 dat9 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*3);
__m512 dat10 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)64+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*3);
__m512 dat11 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*3);
__m512 dat12 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i5+(ptrdiff_t)928*j1+(ptrdiff_t)128*3);
dat9 = _mm512_add_ps(dat9, dat11);
dat10 = _mm512_add_ps(dat10, dat12);
__m512i pmLo4 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi4 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi4 = _mm512_permutex2var_ps(dat9, pmHi4, dat10);
dat9 = _mm512_permutex2var_ps(dat9, pmLo4, dat10);
dat9 = _mm512_add_ps(dat9, hi4);
dat9 = _mm512_mul_ps(dat9, _mm512_set1_ps(2.5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i5+(ptrdiff_t)232*j1+(ptrdiff_t)64*3, 1023, dat9);
}
}
return;
}
for (ptrdiff_t i6 = 0; i6 < 1; ++i6) {
for (ptrdiff_t k3 = 0; k3 < 3; ++k3) {
__m512 dat13 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i6+(ptrdiff_t)928*0+(ptrdiff_t)128*k3);
__m512 dat14 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i6+(ptrdiff_t)928*0+(ptrdiff_t)128*k3);
__m512i pmLo5 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi5 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi5 = _mm512_permutex2var_ps(dat13, pmHi5, dat14);
dat13 = _mm512_permutex2var_ps(dat13, pmLo5, dat14);
dat13 = _mm512_add_ps(dat13, hi5);
dat13 = _mm512_mul_ps(dat13, _mm512_set1_ps(5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i6+(ptrdiff_t)232*0+(ptrdiff_t)64*k3, 65535, dat13);
}
__m512 dat15 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i6+(ptrdiff_t)928*0+(ptrdiff_t)128*3);
__m512 dat16 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i6+(ptrdiff_t)928*0+(ptrdiff_t)128*3);
__m512i pmLo6 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi6 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi6 = _mm512_permutex2var_ps(dat15, pmHi6, dat16);
dat15 = _mm512_permutex2var_ps(dat15, pmLo6, dat16);
dat15 = _mm512_add_ps(dat15, hi6);
dat15 = _mm512_mul_ps(dat15, _mm512_set1_ps(5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i6+(ptrdiff_t)232*0+(ptrdiff_t)64*3, 1023, dat15);
for (ptrdiff_t j2 = 1; j2 < 18; ++j2) {
for (ptrdiff_t k4 = 0; k4 < 3; ++k4) {
__m512 dat17 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*k4);
__m512 dat18 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)64+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*k4);
__m512 dat19 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*k4);
__m512 dat20 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*k4);
dat17 = _mm512_add_ps(dat17, dat19);
dat18 = _mm512_add_ps(dat18, dat20);
__m512i pmLo7 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi7 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi7 = _mm512_permutex2var_ps(dat17, pmHi7, dat18);
dat17 = _mm512_permutex2var_ps(dat17, pmLo7, dat18);
dat17 = _mm512_add_ps(dat17, hi7);
dat17 = _mm512_mul_ps(dat17, _mm512_set1_ps(2.5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i6+(ptrdiff_t)232*j2+(ptrdiff_t)64*k4, 65535, dat17);
}
__m512 dat21 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)0+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*3);
__m512 dat22 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)64+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*3);
__m512 dat23 = _mm512_maskz_loadu_ps(65535, ptr1+(ptrdiff_t)464+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*3);
__m512 dat24 = _mm512_maskz_loadu_ps(15, ptr1+(ptrdiff_t)528+(ptrdiff_t)16240*i6+(ptrdiff_t)928*j2+(ptrdiff_t)128*3);
dat21 = _mm512_add_ps(dat21, dat23);
dat22 = _mm512_add_ps(dat22, dat24);
__m512i pmLo8 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
__m512i pmHi8 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1);
__m512 hi8 = _mm512_permutex2var_ps(dat21, pmHi8, dat22);
dat21 = _mm512_permutex2var_ps(dat21, pmLo8, dat22);
dat21 = _mm512_add_ps(dat21, hi8);
dat21 = _mm512_mul_ps(dat21, _mm512_set1_ps(2.5e-01f));
_mm512_mask_storeu_ps(ptr2+(ptrdiff_t)0+(ptrdiff_t)4176*i6+(ptrdiff_t)232*j2+(ptrdiff_t)64*3, 1023, dat21);
}
}
}

static void Example22Twopl1(Example22ThreaderTeam1* team13, char** tensors1) {
Example22ThreaderTask1 task5;
task5.callee1 = Example22Twopl1Callee1;
task5.any1 = tensors1;
task5.nd1 = 2;
task5.hull1[0] = 1;
task5.hull1[1] = 93;
Example22ThreaderDo1(team13, &task5);
}

struct Example22Net {
char* alloc1;
char* align1;
};

void Example22NetDestroy(Example22Net* net2) {
free(net2->alloc1);
free(net2);
}

char* Example22NetCreate(
Example22Net** net1,
Example22Params* params1,
ptrdiff_t threads1
) {
(void)params1;
(void)threads1;
if (__builtin_expect(!__builtin_cpu_supports("avx512f"), 0)) {
return Example22Errmsg1(__LINE__, "CPU does not support AVX512F");
}
Example22Net* net5 = malloc(sizeof(Example22Net));
if (__builtin_expect(!net5, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", errno);
}
net5->alloc1 = 0;
net5->align1 = 0;
*net1 = net5;
return 0;
}

struct Example22Engine {
Example22Net* net3;
Example22ThreaderTeam1* team11;
char* alloc2;
char* align2;
};

char* Example22EnginePthreadT(
Example22Engine* eng2,
ptrdiff_t idx2,
pthread_t* to1
) {
return Example22ThreaderPthreadT1(to1, eng2->team11, idx2);
}

void Example22EngineDestroy(Example22Engine* eng3) {
Example22ThreaderDestroy1(eng3->team11);
free(eng3->alloc2);
free(eng3);
}

char* Example22EngineCreate(
Example22Engine** eng4,
Example22Net* net4,
ptrdiff_t threads2
) {
Example22Engine* eng5 = malloc(sizeof(Example22Engine));
if (__builtin_expect(!eng5, 0)) {
return Example22Errmsg1(__LINE__, "errno %d", errno);
}
char* alloc3 = malloc(63);
if (__builtin_expect(!alloc3, 0)) {
char* msg5 = Example22Errmsg1(__LINE__, "errno %d", errno);
free(eng5);
return msg5;
}
eng5->alloc2 = alloc3;
eng5->align2 = (void*)(((size_t)alloc3+63)&-64);
char* err7 = Example22ThreaderCreate1(&eng5->team11, threads2);
if (__builtin_expect(!!err7, 0)) {
free(eng5);
free(alloc3);
return err7;
}
eng5->net3 = net4;
*eng4 = eng5;
return 0;
}

void Example22EngineInference(
Example22Engine* eng1,
float* inData,
float* outData
) {
Example22ThreaderTeam1* team12 = eng1->team11;
{
char* tensors3[] = {
(char*)inData,
(char*)outData
};
Example22Twopl1(team12, tensors3);
}
}

// End of file.

Top