All Downloads are FREE. Search and download functionalities are using the official Maven repository.

kernels.JCudaVec_kernels_float_64_cc20.ptx Maven / Gradle / Ivy

The newest version!
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-19805474
// Cuda compilation tools, release 7.5, V7.5.16
// Based on LLVM 3.4svn
//

.version 4.3
.target sm_20
.address_size 64

	// .globl	vec_setf
.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162};

.visible .entry vec_setf(
	.param .u64 vec_setf_param_0,
	.param .u64 vec_setf_param_1,
	.param .f32 vec_setf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<2>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<7>;


	ld.param.u64 	%rd2, [vec_setf_param_0];
	ld.param.u64 	%rd1, [vec_setf_param_1];
	ld.param.f32 	%f1, [vec_setf_param_2];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd3, %r1;
	setp.ge.u64	%p1, %rd3, %rd2;
	@%p1 bra 	BB0_2;

	cvta.to.global.u64 	%rd4, %rd1;
	mul.wide.s32 	%rd5, %r1, 4;
	add.s64 	%rd6, %rd4, %rd5;
	st.global.f32 	[%rd6], %f1;

BB0_2:
	ret;
}

	// .globl	vec_addf
.visible .entry vec_addf(
	.param .u64 vec_addf_param_0,
	.param .u64 vec_addf_param_1,
	.param .u64 vec_addf_param_2,
	.param .u64 vec_addf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_addf_param_0];
	ld.param.u64 	%rd1, [vec_addf_param_1];
	ld.param.u64 	%rd2, [vec_addf_param_2];
	ld.param.u64 	%rd3, [vec_addf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB1_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB1_2:
	ret;
}

	// .globl	vec_subf
.visible .entry vec_subf(
	.param .u64 vec_subf_param_0,
	.param .u64 vec_subf_param_1,
	.param .u64 vec_subf_param_2,
	.param .u64 vec_subf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_subf_param_0];
	ld.param.u64 	%rd1, [vec_subf_param_1];
	ld.param.u64 	%rd2, [vec_subf_param_2];
	ld.param.u64 	%rd3, [vec_subf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB2_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	sub.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB2_2:
	ret;
}

	// .globl	vec_mulf
.visible .entry vec_mulf(
	.param .u64 vec_mulf_param_0,
	.param .u64 vec_mulf_param_1,
	.param .u64 vec_mulf_param_2,
	.param .u64 vec_mulf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_mulf_param_0];
	ld.param.u64 	%rd1, [vec_mulf_param_1];
	ld.param.u64 	%rd2, [vec_mulf_param_2];
	ld.param.u64 	%rd3, [vec_mulf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB3_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB3_2:
	ret;
}

	// .globl	vec_divf
.visible .entry vec_divf(
	.param .u64 vec_divf_param_0,
	.param .u64 vec_divf_param_1,
	.param .u64 vec_divf_param_2,
	.param .u64 vec_divf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_divf_param_0];
	ld.param.u64 	%rd2, [vec_divf_param_1];
	ld.param.u64 	%rd3, [vec_divf_param_2];
	ld.param.u64 	%rd4, [vec_divf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB4_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB4_2:
	ret;
}

	// .globl	vec_negatef
.visible .entry vec_negatef(
	.param .u64 vec_negatef_param_0,
	.param .u64 vec_negatef_param_1,
	.param .u64 vec_negatef_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_negatef_param_0];
	ld.param.u64 	%rd1, [vec_negatef_param_1];
	ld.param.u64 	%rd2, [vec_negatef_param_2];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB5_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	neg.f32 	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB5_2:
	ret;
}

	// .globl	vec_addScalarf
.visible .entry vec_addScalarf(
	.param .u64 vec_addScalarf_param_0,
	.param .u64 vec_addScalarf_param_1,
	.param .u64 vec_addScalarf_param_2,
	.param .f32 vec_addScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_addScalarf_param_0];
	ld.param.u64 	%rd1, [vec_addScalarf_param_1];
	ld.param.u64 	%rd2, [vec_addScalarf_param_2];
	ld.param.f32 	%f1, [vec_addScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB6_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB6_2:
	ret;
}

	// .globl	vec_subScalarf
.visible .entry vec_subScalarf(
	.param .u64 vec_subScalarf_param_0,
	.param .u64 vec_subScalarf_param_1,
	.param .u64 vec_subScalarf_param_2,
	.param .f32 vec_subScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_subScalarf_param_0];
	ld.param.u64 	%rd1, [vec_subScalarf_param_1];
	ld.param.u64 	%rd2, [vec_subScalarf_param_2];
	ld.param.f32 	%f1, [vec_subScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB7_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	sub.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB7_2:
	ret;
}

	// .globl	vec_mulScalarf
.visible .entry vec_mulScalarf(
	.param .u64 vec_mulScalarf_param_0,
	.param .u64 vec_mulScalarf_param_1,
	.param .u64 vec_mulScalarf_param_2,
	.param .f32 vec_mulScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_mulScalarf_param_0];
	ld.param.u64 	%rd1, [vec_mulScalarf_param_1];
	ld.param.u64 	%rd2, [vec_mulScalarf_param_2];
	ld.param.f32 	%f1, [vec_mulScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB8_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB8_2:
	ret;
}

	// .globl	vec_divScalarf
.visible .entry vec_divScalarf(
	.param .u64 vec_divScalarf_param_0,
	.param .u64 vec_divScalarf_param_1,
	.param .u64 vec_divScalarf_param_2,
	.param .f32 vec_divScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_divScalarf_param_0];
	ld.param.u64 	%rd2, [vec_divScalarf_param_1];
	ld.param.u64 	%rd3, [vec_divScalarf_param_2];
	ld.param.f32 	%f1, [vec_divScalarf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB9_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB9_2:
	ret;
}

	// .globl	vec_scalarAddf
.visible .entry vec_scalarAddf(
	.param .u64 vec_scalarAddf_param_0,
	.param .u64 vec_scalarAddf_param_1,
	.param .f32 vec_scalarAddf_param_2,
	.param .u64 vec_scalarAddf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_scalarAddf_param_0];
	ld.param.u64 	%rd1, [vec_scalarAddf_param_1];
	ld.param.f32 	%f1, [vec_scalarAddf_param_2];
	ld.param.u64 	%rd2, [vec_scalarAddf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB10_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB10_2:
	ret;
}

	// .globl	vec_scalarSubf
.visible .entry vec_scalarSubf(
	.param .u64 vec_scalarSubf_param_0,
	.param .u64 vec_scalarSubf_param_1,
	.param .f32 vec_scalarSubf_param_2,
	.param .u64 vec_scalarSubf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_scalarSubf_param_0];
	ld.param.u64 	%rd1, [vec_scalarSubf_param_1];
	ld.param.f32 	%f1, [vec_scalarSubf_param_2];
	ld.param.u64 	%rd2, [vec_scalarSubf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB11_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	sub.f32 	%f3, %f1, %f2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB11_2:
	ret;
}

	// .globl	vec_scalarMulf
.visible .entry vec_scalarMulf(
	.param .u64 vec_scalarMulf_param_0,
	.param .u64 vec_scalarMulf_param_1,
	.param .f32 vec_scalarMulf_param_2,
	.param .u64 vec_scalarMulf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_scalarMulf_param_0];
	ld.param.u64 	%rd1, [vec_scalarMulf_param_1];
	ld.param.f32 	%f1, [vec_scalarMulf_param_2];
	ld.param.u64 	%rd2, [vec_scalarMulf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB12_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB12_2:
	ret;
}

	// .globl	vec_scalarDivf
.visible .entry vec_scalarDivf(
	.param .u64 vec_scalarDivf_param_0,
	.param .u64 vec_scalarDivf_param_1,
	.param .f32 vec_scalarDivf_param_2,
	.param .u64 vec_scalarDivf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_scalarDivf_param_0];
	ld.param.u64 	%rd2, [vec_scalarDivf_param_1];
	ld.param.f32 	%f1, [vec_scalarDivf_param_2];
	ld.param.u64 	%rd3, [vec_scalarDivf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB13_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	div.rn.f32 	%f3, %f1, %f2;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB13_2:
	ret;
}

	// .globl	vec_ltf
.visible .entry vec_ltf(
	.param .u64 vec_ltf_param_0,
	.param .u64 vec_ltf_param_1,
	.param .u64 vec_ltf_param_2,
	.param .u64 vec_ltf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_ltf_param_0];
	ld.param.u64 	%rd1, [vec_ltf_param_1];
	ld.param.u64 	%rd2, [vec_ltf_param_2];
	ld.param.u64 	%rd3, [vec_ltf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB14_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.lt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB14_2:
	ret;
}

	// .globl	vec_ltef
.visible .entry vec_ltef(
	.param .u64 vec_ltef_param_0,
	.param .u64 vec_ltef_param_1,
	.param .u64 vec_ltef_param_2,
	.param .u64 vec_ltef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_ltef_param_0];
	ld.param.u64 	%rd1, [vec_ltef_param_1];
	ld.param.u64 	%rd2, [vec_ltef_param_2];
	ld.param.u64 	%rd3, [vec_ltef_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB15_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB15_2:
	ret;
}

	// .globl	vec_eqf
.visible .entry vec_eqf(
	.param .u64 vec_eqf_param_0,
	.param .u64 vec_eqf_param_1,
	.param .u64 vec_eqf_param_2,
	.param .u64 vec_eqf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_eqf_param_0];
	ld.param.u64 	%rd1, [vec_eqf_param_1];
	ld.param.u64 	%rd2, [vec_eqf_param_2];
	ld.param.u64 	%rd3, [vec_eqf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB16_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.eq.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB16_2:
	ret;
}

	// .globl	vec_gtef
.visible .entry vec_gtef(
	.param .u64 vec_gtef_param_0,
	.param .u64 vec_gtef_param_1,
	.param .u64 vec_gtef_param_2,
	.param .u64 vec_gtef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_gtef_param_0];
	ld.param.u64 	%rd1, [vec_gtef_param_1];
	ld.param.u64 	%rd2, [vec_gtef_param_2];
	ld.param.u64 	%rd3, [vec_gtef_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB17_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.ltu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB17_2:
	ret;
}

	// .globl	vec_gtf
.visible .entry vec_gtf(
	.param .u64 vec_gtf_param_0,
	.param .u64 vec_gtf_param_1,
	.param .u64 vec_gtf_param_2,
	.param .u64 vec_gtf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_gtf_param_0];
	ld.param.u64 	%rd1, [vec_gtf_param_1];
	ld.param.u64 	%rd2, [vec_gtf_param_2];
	ld.param.u64 	%rd3, [vec_gtf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB18_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.gt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB18_2:
	ret;
}

	// .globl	vec_nef
.visible .entry vec_nef(
	.param .u64 vec_nef_param_0,
	.param .u64 vec_nef_param_1,
	.param .u64 vec_nef_param_2,
	.param .u64 vec_nef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd4, [vec_nef_param_0];
	ld.param.u64 	%rd1, [vec_nef_param_1];
	ld.param.u64 	%rd2, [vec_nef_param_2];
	ld.param.u64 	%rd3, [vec_nef_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd5, %r1;
	setp.ge.u64	%p1, %rd5, %rd4;
	@%p1 bra 	BB19_2;

	cvta.to.global.u64 	%rd6, %rd2;
	mul.wide.s32 	%rd7, %r1, 4;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd3;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	setp.neu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd11, %rd1;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB19_2:
	ret;
}

	// .globl	vec_ltScalarf
.visible .entry vec_ltScalarf(
	.param .u64 vec_ltScalarf_param_0,
	.param .u64 vec_ltScalarf_param_1,
	.param .u64 vec_ltScalarf_param_2,
	.param .f32 vec_ltScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_ltScalarf_param_0];
	ld.param.u64 	%rd1, [vec_ltScalarf_param_1];
	ld.param.u64 	%rd2, [vec_ltScalarf_param_2];
	ld.param.f32 	%f1, [vec_ltScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB20_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.lt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB20_2:
	ret;
}

	// .globl	vec_lteScalarf
.visible .entry vec_lteScalarf(
	.param .u64 vec_lteScalarf_param_0,
	.param .u64 vec_lteScalarf_param_1,
	.param .u64 vec_lteScalarf_param_2,
	.param .f32 vec_lteScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_lteScalarf_param_0];
	ld.param.u64 	%rd1, [vec_lteScalarf_param_1];
	ld.param.u64 	%rd2, [vec_lteScalarf_param_2];
	ld.param.f32 	%f1, [vec_lteScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB21_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB21_2:
	ret;
}

	// .globl	vec_eqScalarf
.visible .entry vec_eqScalarf(
	.param .u64 vec_eqScalarf_param_0,
	.param .u64 vec_eqScalarf_param_1,
	.param .u64 vec_eqScalarf_param_2,
	.param .f32 vec_eqScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_eqScalarf_param_0];
	ld.param.u64 	%rd1, [vec_eqScalarf_param_1];
	ld.param.u64 	%rd2, [vec_eqScalarf_param_2];
	ld.param.f32 	%f1, [vec_eqScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB22_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.eq.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB22_2:
	ret;
}

	// .globl	vec_gteScalarf
.visible .entry vec_gteScalarf(
	.param .u64 vec_gteScalarf_param_0,
	.param .u64 vec_gteScalarf_param_1,
	.param .u64 vec_gteScalarf_param_2,
	.param .f32 vec_gteScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_gteScalarf_param_0];
	ld.param.u64 	%rd1, [vec_gteScalarf_param_1];
	ld.param.u64 	%rd2, [vec_gteScalarf_param_2];
	ld.param.f32 	%f1, [vec_gteScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB23_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.ltu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB23_2:
	ret;
}

	// .globl	vec_gtScalarf
.visible .entry vec_gtScalarf(
	.param .u64 vec_gtScalarf_param_0,
	.param .u64 vec_gtScalarf_param_1,
	.param .u64 vec_gtScalarf_param_2,
	.param .f32 vec_gtScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_gtScalarf_param_0];
	ld.param.u64 	%rd1, [vec_gtScalarf_param_1];
	ld.param.u64 	%rd2, [vec_gtScalarf_param_2];
	ld.param.f32 	%f1, [vec_gtScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB24_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.gt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB24_2:
	ret;
}

	// .globl	vec_neScalarf
.visible .entry vec_neScalarf(
	.param .u64 vec_neScalarf_param_0,
	.param .u64 vec_neScalarf_param_1,
	.param .u64 vec_neScalarf_param_2,
	.param .f32 vec_neScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd3, [vec_neScalarf_param_0];
	ld.param.u64 	%rd1, [vec_neScalarf_param_1];
	ld.param.u64 	%rd2, [vec_neScalarf_param_2];
	ld.param.f32 	%f1, [vec_neScalarf_param_3];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r1, %r3, %r4, %r2;
	cvt.s64.s32	%rd4, %r1;
	setp.ge.u64	%p1, %rd4, %rd3;
	@%p1 bra 	BB25_2;

	cvta.to.global.u64 	%rd5, %rd2;
	mul.wide.s32 	%rd6, %r1, 4;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f2, [%rd7];
	setp.neu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u64 	%rd8, %rd1;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f3;

BB25_2:
	ret;
}

	// .globl	vec_acosf
.visible .entry vec_acosf(
	.param .u64 vec_acosf_param_0,
	.param .u64 vec_acosf_param_1,
	.param .u64 vec_acosf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<27>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_acosf_param_0];
	ld.param.u64 	%rd2, [vec_acosf_param_1];
	ld.param.u64 	%rd3, [vec_acosf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB26_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	mov.f32 	%f3, 0f3F800000;
	sub.f32 	%f4, %f3, %f2;
	mul.f32 	%f5, %f4, 0f3F000000;
	sqrt.rn.f32 	%f6, %f5;
	setp.gt.f32	%p2, %f2, 0f3F11EB85;
	selp.f32	%f7, %f6, %f2, %p2;
	mul.f32 	%f8, %f7, %f7;
	mov.f32 	%f9, 0f3C94D2E9;
	mov.f32 	%f10, 0f3D53F941;
	fma.rn.f32 	%f11, %f10, %f8, %f9;
	mov.f32 	%f12, 0f3D3F841F;
	fma.rn.f32 	%f13, %f11, %f8, %f12;
	mov.f32 	%f14, 0f3D994929;
	fma.rn.f32 	%f15, %f13, %f8, %f14;
	mov.f32 	%f16, 0f3E2AAB94;
	fma.rn.f32 	%f17, %f15, %f8, %f16;
	mul.f32 	%f18, %f8, %f17;
	fma.rn.f32 	%f19, %f18, %f7, %f7;
	add.f32 	%f20, %f19, %f19;
	mov.f32 	%f21, 0f3FC90FDB;
	sub.f32 	%f22, %f21, %f19;
	selp.f32	%f23, %f20, %f22, %p2;
	setp.lt.f32	%p3, %f1, 0f00000000;
	mov.f32 	%f24, 0f40490FDB;
	sub.f32 	%f25, %f24, %f23;
	selp.f32	%f26, %f25, %f23, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f26;

BB26_2:
	ret;
}

	// .globl	vec_acoshf
.visible .entry vec_acoshf(
	.param .u64 vec_acoshf_param_0,
	.param .u64 vec_acoshf_param_1,
	.param .u64 vec_acoshf_param_2
)
{
	.reg .pred 	%p<16>;
	.reg .f32 	%f<85>;
	.reg .b32 	%r<14>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_acoshf_param_0];
	ld.param.u64 	%rd2, [vec_acoshf_param_1];
	ld.param.u64 	%rd3, [vec_acoshf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB27_12;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	add.f32 	%f2, %f1, 0fBF800000;
	mov.b32 	 %r5, %f2;
	setp.gt.u32	%p2, %r5, 1258291200;
	@%p2 bra 	BB27_7;
	bra.uni 	BB27_2;

BB27_7:
	setp.gt.f32	%p11, %f2, 0f00000000;
	setp.lt.f32	%p12, %f2, 0f7F800000;
	and.pred  	%p13, %p11, %p12;
	@%p13 bra 	BB27_9;
	bra.uni 	BB27_8;

BB27_9:
	setp.lt.f32	%p14, %f2, 0f00800000;
	mul.f32 	%f58, %f2, 0f4B800000;
	selp.f32	%f59, %f58, %f2, %p14;
	selp.f32	%f60, 0fC3170000, 0fC2FE0000, %p14;
	mov.b32 	 %r10, %f59;
	and.b32  	%r11, %r10, 8388607;
	or.b32  	%r12, %r11, 1065353216;
	mov.b32 	 %f61, %r12;
	shr.u32 	%r13, %r10, 23;
	cvt.rn.f32.u32	%f62, %r13;
	add.f32 	%f63, %f60, %f62;
	setp.gt.f32	%p15, %f61, 0f3FAE147B;
	mul.f32 	%f64, %f61, 0f3F000000;
	add.f32 	%f65, %f63, 0f3F800000;
	selp.f32	%f66, %f64, %f61, %p15;
	selp.f32	%f67, %f65, %f63, %p15;
	add.f32 	%f57, %f66, 0f3F800000;
	add.f32 	%f68, %f66, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f56,%f57;
	// inline asm
	mul.f32 	%f69, %f68, %f68;
	neg.f32 	%f70, %f69;
	mul.rn.f32 	%f71, %f56, %f70;
	add.rn.f32 	%f72, %f68, %f71;
	mul.f32 	%f73, %f72, %f72;
	mov.f32 	%f74, 0f3C4C6A36;
	mov.f32 	%f75, 0f3B1E94E6;
	fma.rn.f32 	%f76, %f75, %f73, %f74;
	mov.f32 	%f77, 0f3DAAAB1A;
	fma.rn.f32 	%f78, %f76, %f73, %f77;
	mul.f32 	%f79, %f73, %f78;
	fma.rn.f32 	%f80, %f79, %f72, %f71;
	add.f32 	%f81, %f68, %f80;
	mov.f32 	%f82, 0f3F317218;
	fma.rn.f32 	%f83, %f67, %f82, %f81;
	bra.uni 	BB27_10;

BB27_2:
	mul.rz.f32 	%f13, %f1, %f2;
	add.rn.f32 	%f14, %f13, %f2;
	sqrt.rn.f32 	%f15, %f14;
	add.f32 	%f3, %f2, %f15;
	setp.le.f32	%p3, %f3, 0f3F266666;
	setp.ge.f32	%p4, %f3, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB27_6;
	bra.uni 	BB27_3;

BB27_6:
	add.f32 	%f43, %f3, 0f40000000;
	div.approx.f32 	%f44, %f3, %f43;
	neg.f32 	%f45, %f3;
	mul.rn.f32 	%f46, %f45, %f44;
	add.rn.f32 	%f47, %f3, %f46;
	mul.f32 	%f48, %f47, %f47;
	mov.f32 	%f49, 0f3C4C4BE0;
	mov.f32 	%f50, 0f3B2063C3;
	fma.rn.f32 	%f51, %f50, %f48, %f49;
	mov.f32 	%f52, 0f3DAAAB50;
	fma.rn.f32 	%f53, %f51, %f48, %f52;
	mul.f32 	%f54, %f48, %f53;
	fma.rn.f32 	%f55, %f54, %f47, %f46;
	add.f32 	%f84, %f3, %f55;
	bra.uni 	BB27_11;

BB27_8:
	lg2.approx.f32 	%f83, %f2;

BB27_10:
	add.f32 	%f84, %f83, 0f3F317218;
	bra.uni 	BB27_11;

BB27_3:
	add.f32 	%f4, %f3, 0f3F800000;
	setp.gt.f32	%p6, %f4, 0f00000000;
	setp.lt.f32	%p7, %f4, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB27_5;
	bra.uni 	BB27_4;

BB27_5:
	setp.lt.f32	%p9, %f4, 0f00800000;
	mul.f32 	%f18, %f4, 0f4B800000;
	selp.f32	%f19, %f18, %f4, %p9;
	selp.f32	%f20, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r6, %f19;
	and.b32  	%r7, %r6, 8388607;
	or.b32  	%r8, %r7, 1065353216;
	mov.b32 	 %f21, %r8;
	shr.u32 	%r9, %r6, 23;
	cvt.rn.f32.u32	%f22, %r9;
	add.f32 	%f23, %f20, %f22;
	setp.gt.f32	%p10, %f21, 0f3FAE147B;
	mul.f32 	%f24, %f21, 0f3F000000;
	add.f32 	%f25, %f23, 0f3F800000;
	selp.f32	%f26, %f24, %f21, %p10;
	selp.f32	%f27, %f25, %f23, %p10;
	add.f32 	%f17, %f26, 0f3F800000;
	add.f32 	%f28, %f26, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f16,%f17;
	// inline asm
	mul.f32 	%f29, %f28, %f28;
	neg.f32 	%f30, %f29;
	mul.rn.f32 	%f31, %f16, %f30;
	add.rn.f32 	%f32, %f28, %f31;
	mul.f32 	%f33, %f32, %f32;
	mov.f32 	%f34, 0f3C4C6A36;
	mov.f32 	%f35, 0f3B1E94E6;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0f3DAAAB1A;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mul.f32 	%f39, %f33, %f38;
	fma.rn.f32 	%f40, %f39, %f32, %f31;
	add.f32 	%f41, %f28, %f40;
	mov.f32 	%f42, 0f3F317218;
	fma.rn.f32 	%f84, %f27, %f42, %f41;
	bra.uni 	BB27_11;

BB27_4:
	lg2.approx.f32 	%f84, %f4;

BB27_11:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f84;

BB27_12:
	ret;
}

	// .globl	vec_asinf
.visible .entry vec_asinf(
	.param .u64 vec_asinf_param_0,
	.param .u64 vec_asinf_param_1,
	.param .u64 vec_asinf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<26>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_asinf_param_0];
	ld.param.u64 	%rd2, [vec_asinf_param_1];
	ld.param.u64 	%rd3, [vec_asinf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB28_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	mov.f32 	%f3, 0f3F800000;
	sub.f32 	%f4, %f3, %f2;
	mul.f32 	%f5, %f4, 0f3F000000;
	sqrt.rn.f32 	%f6, %f5;
	setp.gt.f32	%p2, %f2, 0f3F11EB85;
	selp.f32	%f7, %f6, %f2, %p2;
	mul.f32 	%f8, %f7, %f7;
	mov.f32 	%f9, 0f3C94D2E9;
	mov.f32 	%f10, 0f3D53F941;
	fma.rn.f32 	%f11, %f10, %f8, %f9;
	mov.f32 	%f12, 0f3D3F841F;
	fma.rn.f32 	%f13, %f11, %f8, %f12;
	mov.f32 	%f14, 0f3D994929;
	fma.rn.f32 	%f15, %f13, %f8, %f14;
	mov.f32 	%f16, 0f3E2AAB94;
	fma.rn.f32 	%f17, %f15, %f8, %f16;
	mul.f32 	%f18, %f8, %f17;
	fma.rn.f32 	%f19, %f18, %f7, %f7;
	mov.f32 	%f20, 0f3FC90FDB;
	mov.f32 	%f21, 0fC0000000;
	fma.rn.f32 	%f22, %f21, %f19, %f20;
	selp.f32	%f23, %f22, %f19, %p2;
	setp.gtu.f32	%p3, %f23, 0f7F800000;
	mov.b32 	 %r5, %f23;
	mov.b32 	 %r6, %f1;
	and.b32  	%r7, %r6, -2147483648;
	or.b32  	%r8, %r5, %r7;
	mov.b32 	 %f24, %r8;
	selp.f32	%f25, %f23, %f24, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f25;

BB28_2:
	ret;
}

	// .globl	vec_asinhf
.visible .entry vec_asinhf(
	.param .u64 vec_asinhf_param_0,
	.param .u64 vec_asinhf_param_1,
	.param .u64 vec_asinhf_param_2
)
{
	.reg .pred 	%p<12>;
	.reg .f32 	%f<62>;
	.reg .b32 	%r<13>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_asinhf_param_0];
	ld.param.u64 	%rd2, [vec_asinhf_param_1];
	ld.param.u64 	%rd3, [vec_asinhf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB29_9;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.gt.f32	%p2, %f2, 0f7E800000;
	@%p2 bra 	BB29_7;
	bra.uni 	BB29_2;

BB29_7:
	lg2.approx.f32 	%f56, %f2;
	mul.f32 	%f57, %f56, 0f3F317218;
	mov.f32 	%f58, 0f3F317218;
	add.rn.f32 	%f61, %f58, %f57;
	bra.uni 	BB29_8;

BB29_2:
	rcp.rn.f32 	%f12, %f2;
	mov.f32 	%f13, 0f3F800000;
	fma.rn.f32 	%f14, %f12, %f12, %f13;
	sqrt.rn.f32 	%f15, %f14;
	add.f32 	%f11, %f12, %f15;
	// inline asm
	rcp.approx.ftz.f32 %f10,%f11;
	// inline asm
	fma.rn.f32 	%f3, %f2, %f10, %f2;
	setp.le.f32	%p3, %f3, 0f3F266666;
	setp.ge.f32	%p4, %f3, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB29_6;
	bra.uni 	BB29_3;

BB29_6:
	add.f32 	%f43, %f3, 0f40000000;
	div.approx.f32 	%f44, %f3, %f43;
	neg.f32 	%f45, %f3;
	mul.rn.f32 	%f46, %f45, %f44;
	add.rn.f32 	%f47, %f3, %f46;
	mul.f32 	%f48, %f47, %f47;
	mov.f32 	%f49, 0f3C4C4BE0;
	mov.f32 	%f50, 0f3B2063C3;
	fma.rn.f32 	%f51, %f50, %f48, %f49;
	mov.f32 	%f52, 0f3DAAAB50;
	fma.rn.f32 	%f53, %f51, %f48, %f52;
	mul.f32 	%f54, %f48, %f53;
	fma.rn.f32 	%f55, %f54, %f47, %f46;
	add.f32 	%f61, %f3, %f55;
	bra.uni 	BB29_8;

BB29_3:
	add.f32 	%f4, %f3, 0f3F800000;
	setp.gt.f32	%p6, %f4, 0f00000000;
	setp.lt.f32	%p7, %f4, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB29_5;
	bra.uni 	BB29_4;

BB29_5:
	setp.lt.f32	%p9, %f4, 0f00800000;
	mul.f32 	%f18, %f4, 0f4B800000;
	selp.f32	%f19, %f18, %f4, %p9;
	selp.f32	%f20, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r5, %f19;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f21, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f22, %r8;
	add.f32 	%f23, %f20, %f22;
	setp.gt.f32	%p10, %f21, 0f3FAE147B;
	mul.f32 	%f24, %f21, 0f3F000000;
	add.f32 	%f25, %f23, 0f3F800000;
	selp.f32	%f26, %f24, %f21, %p10;
	selp.f32	%f27, %f25, %f23, %p10;
	add.f32 	%f17, %f26, 0f3F800000;
	add.f32 	%f28, %f26, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f16,%f17;
	// inline asm
	mul.f32 	%f29, %f28, %f28;
	neg.f32 	%f30, %f29;
	mul.rn.f32 	%f31, %f16, %f30;
	add.rn.f32 	%f32, %f28, %f31;
	mul.f32 	%f33, %f32, %f32;
	mov.f32 	%f34, 0f3C4C6A36;
	mov.f32 	%f35, 0f3B1E94E6;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0f3DAAAB1A;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mul.f32 	%f39, %f33, %f38;
	fma.rn.f32 	%f40, %f39, %f32, %f31;
	add.f32 	%f41, %f28, %f40;
	mov.f32 	%f42, 0f3F317218;
	fma.rn.f32 	%f61, %f27, %f42, %f41;
	bra.uni 	BB29_8;

BB29_4:
	lg2.approx.f32 	%f61, %f4;

BB29_8:
	cvta.to.global.u64 	%rd8, %rd2;
	mov.b32 	 %r9, %f1;
	and.b32  	%r10, %r9, -2147483648;
	mov.b32 	 %r11, %f61;
	or.b32  	%r12, %r11, %r10;
	mov.b32 	 %f59, %r12;
	setp.gtu.f32	%p11, %f2, 0f7F800000;
	selp.f32	%f60, %f61, %f59, %p11;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f60;

BB29_9:
	ret;
}

	// .globl	vec_atanf
.visible .entry vec_atanf(
	.param .u64 vec_atanf_param_0,
	.param .u64 vec_atanf_param_1,
	.param .u64 vec_atanf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<26>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_atanf_param_0];
	ld.param.u64 	%rd2, [vec_atanf_param_1];
	ld.param.u64 	%rd3, [vec_atanf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB30_4;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.leu.f32	%p2, %f2, 0f3F800000;
	mov.f32 	%f25, %f2;
	@%p2 bra 	BB30_3;

	rcp.rn.f32 	%f3, %f2;
	mov.f32 	%f25, %f3;

BB30_3:
	mov.f32 	%f4, %f25;
	cvta.to.global.u64 	%rd8, %rd2;
	mul.rn.f32 	%f5, %f4, %f4;
	mov.f32 	%f6, 0fC0B59883;
	mov.f32 	%f7, 0fBF52C7EA;
	fma.rn.f32 	%f8, %f5, %f7, %f6;
	mov.f32 	%f9, 0fC0D21907;
	fma.rn.f32 	%f10, %f8, %f5, %f9;
	mul.f32 	%f11, %f5, %f10;
	mul.f32 	%f12, %f4, %f11;
	add.f32 	%f13, %f5, 0f41355DC0;
	mov.f32 	%f14, 0f41E6BD60;
	fma.rn.f32 	%f15, %f13, %f5, %f14;
	mov.f32 	%f16, 0f419D92C8;
	fma.rn.f32 	%f17, %f15, %f5, %f16;
	rcp.rn.f32 	%f18, %f17;
	fma.rn.f32 	%f19, %f12, %f18, %f4;
	mov.f32 	%f20, 0f3FC90FDB;
	sub.f32 	%f21, %f20, %f19;
	setp.gt.f32	%p3, %f2, 0f3F800000;
	selp.f32	%f22, %f21, %f19, %p3;
	mov.b32 	 %r5, %f22;
	mov.b32 	 %r6, %f1;
	and.b32  	%r7, %r6, -2147483648;
	or.b32  	%r8, %r5, %r7;
	mov.b32 	 %f23, %r8;
	setp.gtu.f32	%p4, %f2, 0f7F800000;
	selp.f32	%f24, %f22, %f23, %p4;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f24;

BB30_4:
	ret;
}

	// .globl	vec_atanhf
.visible .entry vec_atanhf(
	.param .u64 vec_atanhf_param_0,
	.param .u64 vec_atanhf_param_1,
	.param .u64 vec_atanhf_param_2
)
{
	.reg .pred 	%p<12>;
	.reg .f32 	%f<59>;
	.reg .b32 	%r<13>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_atanhf_param_0];
	ld.param.u64 	%rd2, [vec_atanhf_param_1];
	ld.param.u64 	%rd3, [vec_atanhf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB31_7;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f10, %f1;
	mov.f32 	%f11, 0f3F800000;
	sub.f32 	%f9, %f11, %f10;
	// inline asm
	rcp.approx.ftz.f32 %f8,%f9;
	// inline asm
	add.f32 	%f12, %f8, %f8;
	mul.f32 	%f13, %f10, %f12;
	setp.gt.f32	%p2, %f10, 0f7E800000;
	selp.f32	%f2, 0fC0000000, %f13, %p2;
	setp.le.f32	%p3, %f2, 0f3F266666;
	setp.ge.f32	%p4, %f2, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB31_5;
	bra.uni 	BB31_2;

BB31_5:
	add.f32 	%f41, %f2, 0f40000000;
	div.approx.f32 	%f42, %f2, %f41;
	neg.f32 	%f43, %f2;
	mul.rn.f32 	%f44, %f43, %f42;
	add.rn.f32 	%f45, %f2, %f44;
	mul.f32 	%f46, %f45, %f45;
	mov.f32 	%f47, 0f3C4C4BE0;
	mov.f32 	%f48, 0f3B2063C3;
	fma.rn.f32 	%f49, %f48, %f46, %f47;
	mov.f32 	%f50, 0f3DAAAB50;
	fma.rn.f32 	%f51, %f49, %f46, %f50;
	mul.f32 	%f52, %f46, %f51;
	fma.rn.f32 	%f53, %f52, %f45, %f44;
	add.f32 	%f58, %f2, %f53;
	bra.uni 	BB31_6;

BB31_2:
	add.f32 	%f3, %f2, 0f3F800000;
	setp.gt.f32	%p6, %f3, 0f00000000;
	setp.lt.f32	%p7, %f3, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB31_4;
	bra.uni 	BB31_3;

BB31_4:
	setp.lt.f32	%p9, %f3, 0f00800000;
	mul.f32 	%f16, %f3, 0f4B800000;
	selp.f32	%f17, %f16, %f3, %p9;
	selp.f32	%f18, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r5, %f17;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f19, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f20, %r8;
	add.f32 	%f21, %f18, %f20;
	setp.gt.f32	%p10, %f19, 0f3FAE147B;
	mul.f32 	%f22, %f19, 0f3F000000;
	add.f32 	%f23, %f21, 0f3F800000;
	selp.f32	%f24, %f22, %f19, %p10;
	selp.f32	%f25, %f23, %f21, %p10;
	add.f32 	%f15, %f24, 0f3F800000;
	add.f32 	%f26, %f24, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f14,%f15;
	// inline asm
	mul.f32 	%f27, %f26, %f26;
	neg.f32 	%f28, %f27;
	mul.rn.f32 	%f29, %f14, %f28;
	add.rn.f32 	%f30, %f26, %f29;
	mul.f32 	%f31, %f30, %f30;
	mov.f32 	%f32, 0f3C4C6A36;
	mov.f32 	%f33, 0f3B1E94E6;
	fma.rn.f32 	%f34, %f33, %f31, %f32;
	mov.f32 	%f35, 0f3DAAAB1A;
	fma.rn.f32 	%f36, %f34, %f31, %f35;
	mul.f32 	%f37, %f31, %f36;
	fma.rn.f32 	%f38, %f37, %f30, %f29;
	add.f32 	%f39, %f26, %f38;
	mov.f32 	%f40, 0f3F317218;
	fma.rn.f32 	%f58, %f25, %f40, %f39;
	bra.uni 	BB31_6;

BB31_3:
	lg2.approx.f32 	%f58, %f3;

BB31_6:
	cvta.to.global.u64 	%rd8, %rd2;
	mul.f32 	%f54, %f58, 0f3F000000;
	abs.f32 	%f55, %f54;
	setp.gtu.f32	%p11, %f55, 0f7F800000;
	mov.b32 	 %r9, %f54;
	mov.b32 	 %r10, %f1;
	and.b32  	%r11, %r10, -2147483648;
	or.b32  	%r12, %r9, %r11;
	mov.b32 	 %f56, %r12;
	selp.f32	%f57, %f54, %f56, %p11;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f57;

BB31_7:
	ret;
}

	// .globl	vec_cbrtf
.visible .entry vec_cbrtf(
	.param .u64 vec_cbrtf_param_0,
	.param .u64 vec_cbrtf_param_1,
	.param .u64 vec_cbrtf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<16>;
	.reg .b32 	%r<6>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_cbrtf_param_0];
	ld.param.u64 	%rd2, [vec_cbrtf_param_1];
	ld.param.u64 	%rd3, [vec_cbrtf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB32_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f5, [%rd7];
	abs.f32 	%f6, %f5;
	lg2.approx.f32 	%f7, %f6;
	mul.f32 	%f2, %f7, 0f3EAAAAAB;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	mul.f32 	%f4, %f1, %f1;
	// inline asm
	rcp.approx.ftz.f32 %f3,%f4;
	// inline asm
	neg.f32 	%f8, %f6;
	fma.rn.f32 	%f9, %f3, %f8, %f1;
	mov.f32 	%f10, 0fBEAAAAAB;
	fma.rn.f32 	%f11, %f9, %f10, %f1;
	mov.b32 	 %r5, %f5;
	setp.lt.s32	%p2, %r5, 0;
	neg.f32 	%f12, %f11;
	selp.f32	%f13, %f12, %f11, %p2;
	add.f32 	%f14, %f5, %f5;
	setp.eq.f32	%p3, %f14, %f5;
	selp.f32	%f15, %f14, %f13, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f15;

BB32_2:
	ret;
}

	// .globl	vec_ceilf
.visible .entry vec_ceilf(
	.param .u64 vec_ceilf_param_0,
	.param .u64 vec_ceilf_param_1,
	.param .u64 vec_ceilf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_ceilf_param_0];
	ld.param.u64 	%rd2, [vec_ceilf_param_1];
	ld.param.u64 	%rd3, [vec_ceilf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB33_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	cvt.rpi.f32.f32	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB33_2:
	ret;
}

	// .globl	vec_cosf
.visible .entry vec_cosf(
	.param .u64 vec_cosf_param_0,
	.param .u64 vec_cosf_param_1,
	.param .u64 vec_cosf_param_2
)
{
	.local .align 4 .b8 	__local_depot34[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<15>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<97>;
	.reg .b64 	%rd<23>;


	mov.u64 	%rd22, __local_depot34;
	cvta.local.u64 	%SP, %rd22;
	ld.param.u64 	%rd9, [vec_cosf_param_0];
	ld.param.u64 	%rd7, [vec_cosf_param_1];
	ld.param.u64 	%rd8, [vec_cosf_param_2];
	add.u64 	%rd10, %SP, 0;
	cvta.to.local.u64 	%rd1, %rd10;
	mov.u32 	%r39, %ntid.x;
	mov.u32 	%r40, %ctaid.x;
	mov.u32 	%r41, %tid.x;
	mad.lo.s32 	%r1, %r39, %r40, %r41;
	cvt.s64.s32	%rd11, %r1;
	setp.ge.u64	%p1, %rd11, %rd9;
	@%p1 bra 	BB34_24;

	cvta.to.global.u64 	%rd12, %rd8;
	mul.wide.s32 	%rd13, %r1, 4;
	add.s64 	%rd14, %rd12, %rd13;
	ld.global.f32 	%f43, [%rd14];
	abs.f32 	%f19, %f43;
	setp.neu.f32	%p2, %f19, 0f7F800000;
	@%p2 bra 	BB34_3;

	mov.f32 	%f20, 0f00000000;
	mul.rn.f32 	%f43, %f43, %f20;

BB34_3:
	mul.f32 	%f21, %f43, 0f3F22F983;
	cvt.rni.s32.f32	%r96, %f21;
	cvt.rn.f32.s32	%f22, %r96;
	neg.f32 	%f23, %f22;
	mov.f32 	%f24, 0f3FC90FDA;
	fma.rn.f32 	%f25, %f23, %f24, %f43;
	mov.f32 	%f26, 0f33A22168;
	fma.rn.f32 	%f27, %f23, %f26, %f25;
	mov.f32 	%f28, 0f27C234C5;
	fma.rn.f32 	%f44, %f23, %f28, %f27;
	abs.f32 	%f29, %f43;
	setp.leu.f32	%p3, %f29, 0f47CE4780;
	@%p3 bra 	BB34_13;

	mov.b32 	 %r3, %f43;
	shr.u32 	%r4, %r3, 23;
	bfe.u32 	%r44, %r3, 23, 8;
	add.s32 	%r45, %r44, -128;
	shl.b32 	%r46, %r3, 8;
	or.b32  	%r5, %r46, -2147483648;
	shr.u32 	%r6, %r45, 5;
	mov.u32 	%r88, 0;
	mov.u64 	%rd20, __cudart_i2opi_f;
	mov.u32 	%r87, -6;
	mov.u64 	%rd21, %rd1;

BB34_5:
	.pragma "nounroll";
	mov.u64 	%rd3, %rd21;
	ld.const.u32 	%r49, [%rd20];
	// inline asm
	{
	mad.lo.cc.u32   %r47, %r49, %r5, %r88;
	madc.hi.u32     %r88, %r49, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%rd3], %r47;
	add.s64 	%rd4, %rd3, 4;
	add.s64 	%rd20, %rd20, 4;
	add.s32 	%r87, %r87, 1;
	setp.ne.s32	%p4, %r87, 0;
	mov.u64 	%rd21, %rd4;
	@%p4 bra 	BB34_5;

	and.b32  	%r11, %r3, -2147483648;
	st.local.u32 	[%rd1+24], %r88;
	mov.u32 	%r52, 6;
	sub.s32 	%r53, %r52, %r6;
	mul.wide.s32 	%rd16, %r53, 4;
	add.s64 	%rd6, %rd1, %rd16;
	ld.local.u32 	%r89, [%rd6];
	ld.local.u32 	%r90, [%rd6+-4];
	and.b32  	%r14, %r4, 31;
	setp.eq.s32	%p5, %r14, 0;
	@%p5 bra 	BB34_8;

	mov.u32 	%r54, 32;
	sub.s32 	%r55, %r54, %r14;
	shr.u32 	%r56, %r90, %r55;
	shl.b32 	%r57, %r89, %r14;
	add.s32 	%r89, %r56, %r57;
	ld.local.u32 	%r58, [%rd6+-8];
	shr.u32 	%r59, %r58, %r55;
	shl.b32 	%r60, %r90, %r14;
	add.s32 	%r90, %r59, %r60;

BB34_8:
	shr.u32 	%r61, %r90, 30;
	shl.b32 	%r62, %r89, 2;
	add.s32 	%r91, %r61, %r62;
	shl.b32 	%r20, %r90, 2;
	shr.u32 	%r63, %r91, 31;
	shr.u32 	%r64, %r89, 30;
	add.s32 	%r21, %r63, %r64;
	setp.eq.s32	%p6, %r63, 0;
	mov.u32 	%r92, %r11;
	mov.u32 	%r93, %r20;
	@%p6 bra 	BB34_10;

	not.b32 	%r65, %r91;
	neg.s32 	%r22, %r20;
	setp.eq.s32	%p7, %r20, 0;
	selp.u32	%r66, 1, 0, %p7;
	add.s32 	%r91, %r66, %r65;
	xor.b32  	%r24, %r11, -2147483648;
	mov.u32 	%r92, %r24;
	mov.u32 	%r93, %r22;

BB34_10:
	mov.u32 	%r26, %r92;
	neg.s32 	%r67, %r21;
	setp.eq.s32	%p8, %r11, 0;
	selp.b32	%r96, %r21, %r67, %p8;
	clz.b32 	%r95, %r91;
	setp.eq.s32	%p9, %r95, 0;
	shl.b32 	%r68, %r91, %r95;
	mov.u32 	%r69, 32;
	sub.s32 	%r70, %r69, %r95;
	shr.u32 	%r71, %r93, %r70;
	add.s32 	%r72, %r71, %r68;
	selp.b32	%r30, %r91, %r72, %p9;
	mov.u32 	%r73, -921707870;
	mul.hi.u32 	%r94, %r30, %r73;
	setp.lt.s32	%p10, %r94, 1;
	@%p10 bra 	BB34_12;

	mul.lo.s32 	%r74, %r30, -921707870;
	shr.u32 	%r75, %r74, 31;
	shl.b32 	%r76, %r94, 1;
	add.s32 	%r94, %r75, %r76;
	add.s32 	%r95, %r95, 1;

BB34_12:
	mov.u32 	%r77, 126;
	sub.s32 	%r78, %r77, %r95;
	shl.b32 	%r79, %r78, 23;
	add.s32 	%r80, %r94, 1;
	shr.u32 	%r81, %r80, 7;
	add.s32 	%r82, %r81, 1;
	shr.u32 	%r83, %r82, 1;
	add.s32 	%r84, %r83, %r79;
	or.b32  	%r85, %r84, %r26;
	mov.b32 	 %f44, %r85;

BB34_13:
	mul.rn.f32 	%f7, %f44, %f44;
	add.s32 	%r37, %r96, 1;
	and.b32  	%r38, %r37, 1;
	setp.eq.s32	%p11, %r38, 0;
	@%p11 bra 	BB34_15;

	mov.f32 	%f30, 0fBAB6061A;
	mov.f32 	%f31, 0f37CCF5CE;
	fma.rn.f32 	%f45, %f31, %f7, %f30;
	bra.uni 	BB34_16;

BB34_15:
	mov.f32 	%f32, 0f3C08839E;
	mov.f32 	%f33, 0fB94CA1F9;
	fma.rn.f32 	%f45, %f33, %f7, %f32;

BB34_16:
	@%p11 bra 	BB34_18;

	mov.f32 	%f34, 0f3D2AAAA5;
	fma.rn.f32 	%f35, %f45, %f7, %f34;
	mov.f32 	%f36, 0fBF000000;
	fma.rn.f32 	%f46, %f35, %f7, %f36;
	bra.uni 	BB34_19;

BB34_18:
	mov.f32 	%f37, 0fBE2AAAA3;
	fma.rn.f32 	%f38, %f45, %f7, %f37;
	mov.f32 	%f39, 0f00000000;
	fma.rn.f32 	%f46, %f38, %f7, %f39;

BB34_19:
	fma.rn.f32 	%f47, %f46, %f44, %f44;
	@%p11 bra 	BB34_21;

	mov.f32 	%f40, 0f3F800000;
	fma.rn.f32 	%f47, %f46, %f7, %f40;

BB34_21:
	and.b32  	%r86, %r37, 2;
	setp.eq.s32	%p14, %r86, 0;
	@%p14 bra 	BB34_23;

	mov.f32 	%f41, 0f00000000;
	mov.f32 	%f42, 0fBF800000;
	fma.rn.f32 	%f47, %f47, %f42, %f41;

BB34_23:
	cvta.to.global.u64 	%rd17, %rd7;
	add.s64 	%rd19, %rd17, %rd13;
	st.global.f32 	[%rd19], %f47;

BB34_24:
	ret;
}

	// .globl	vec_coshf
.visible .entry vec_coshf(
	.param .u64 vec_coshf_param_0,
	.param .u64 vec_coshf_param_1,
	.param .u64 vec_coshf_param_2
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<19>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_coshf_param_0];
	ld.param.u64 	%rd2, [vec_coshf_param_1];
	ld.param.u64 	%rd3, [vec_coshf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB35_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f3, [%rd7];
	abs.f32 	%f4, %f3;
	mul.f32 	%f5, %f4, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f6, %f5;
	mov.f32 	%f7, 0fBF317200;
	fma.rn.f32 	%f8, %f6, %f7, %f4;
	mov.f32 	%f9, 0fB5BFBE8E;
	fma.rn.f32 	%f10, %f6, %f9, %f8;
	mul.f32 	%f2, %f10, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f11, %f6, 0fC0000000;
	ex2.approx.f32 	%f12, %f11;
	mul.f32 	%f13, %f1, %f12;
	mov.f32 	%f14, 0f3E000000;
	div.approx.f32 	%f15, %f14, %f13;
	mov.f32 	%f16, 0f40000000;
	fma.rn.f32 	%f17, %f16, %f13, %f15;
	setp.ltu.f32	%p2, %f4, 0f42B40000;
	selp.f32	%f18, %f17, 0f7F800000, %p2;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f18;

BB35_2:
	ret;
}

	// .globl	vec_cospif
.visible .entry vec_cospif(
	.param .u64 vec_cospif_param_0,
	.param .u64 vec_cospif_param_1,
	.param .u64 vec_cospif_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<43>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_cospif_param_0];
	ld.param.u64 	%rd2, [vec_cospif_param_1];
	ld.param.u64 	%rd3, [vec_cospif_param_2];
	mov.u32 	%r3, %tid.x;
	mov.u32 	%r4, %ntid.x;
	mov.u32 	%r5, %ctaid.x;
	mad.lo.s32 	%r6, %r4, %r5, %r3;
	cvt.s64.s32	%rd1, %r6;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB36_14;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f39, [%rd7];
	abs.f32 	%f17, %f39;
	setp.leu.f32	%p2, %f17, 0f4B800000;
	@%p2 bra 	BB36_3;

	mov.f32 	%f18, 0f00000000;
	mul.rn.f32 	%f39, %f39, %f18;

BB36_3:
	add.f32 	%f19, %f39, %f39;
	cvt.rni.f32.f32	%f20, %f19;
	cvt.rzi.s32.f32	%r7, %f20;
	neg.f32 	%f21, %f20;
	mov.f32 	%f22, 0f3F000000;
	fma.rn.f32 	%f23, %f21, %f22, %f39;
	mul.f32 	%f24, %f23, 0f34222169;
	mov.f32 	%f25, 0f40490FDA;
	fma.rn.f32 	%f4, %f23, %f25, %f24;
	add.s32 	%r1, %r7, 1;
	mul.rn.f32 	%f5, %f4, %f4;
	and.b32  	%r2, %r1, 1;
	setp.eq.s32	%p3, %r2, 0;
	@%p3 bra 	BB36_5;

	mov.f32 	%f26, 0fBAB6061A;
	mov.f32 	%f27, 0f37CCF5CE;
	fma.rn.f32 	%f40, %f27, %f5, %f26;
	bra.uni 	BB36_6;

BB36_5:
	mov.f32 	%f28, 0f3C08839E;
	mov.f32 	%f29, 0fB94CA1F9;
	fma.rn.f32 	%f40, %f29, %f5, %f28;

BB36_6:
	@%p3 bra 	BB36_8;

	mov.f32 	%f30, 0f3D2AAAA5;
	fma.rn.f32 	%f31, %f40, %f5, %f30;
	mov.f32 	%f32, 0fBF000000;
	fma.rn.f32 	%f41, %f31, %f5, %f32;
	bra.uni 	BB36_9;

BB36_8:
	mov.f32 	%f33, 0fBE2AAAA3;
	fma.rn.f32 	%f34, %f40, %f5, %f33;
	mov.f32 	%f35, 0f00000000;
	fma.rn.f32 	%f41, %f34, %f5, %f35;

BB36_9:
	fma.rn.f32 	%f42, %f41, %f4, %f4;
	@%p3 bra 	BB36_11;

	mov.f32 	%f36, 0f3F800000;
	fma.rn.f32 	%f42, %f41, %f5, %f36;

BB36_11:
	and.b32  	%r8, %r1, 2;
	setp.eq.s32	%p6, %r8, 0;
	@%p6 bra 	BB36_13;

	mov.f32 	%f37, 0f00000000;
	mov.f32 	%f38, 0fBF800000;
	fma.rn.f32 	%f42, %f42, %f38, %f37;

BB36_13:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f42;

BB36_14:
	ret;
}

	// .globl	vec_erfcf
.visible .entry vec_erfcf(
	.param .u64 vec_erfcf_param_0,
	.param .u64 vec_erfcf_param_1,
	.param .u64 vec_erfcf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<63>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_erfcf_param_0];
	ld.param.u64 	%rd2, [vec_erfcf_param_1];
	ld.param.u64 	%rd3, [vec_erfcf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB37_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f7, [%rd7];
	abs.f32 	%f8, %f7;
	add.f32 	%f2, %f8, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f9, %f8, 0fC0800000;
	mul.rn.f32 	%f10, %f9, %f1;
	add.f32 	%f11, %f10, 0f3F800000;
	mov.f32 	%f12, 0fC0800000;
	fma.rn.f32 	%f13, %f12, %f11, %f8;
	neg.f32 	%f14, %f10;
	fma.rn.f32 	%f15, %f14, %f8, %f13;
	fma.rn.f32 	%f16, %f1, %f15, %f10;
	mov.f32 	%f17, 0f3BE6E05B;
	mov.f32 	%f18, 0f3A69A091;
	fma.rn.f32 	%f19, %f18, %f16, %f17;
	mov.f32 	%f20, 0fBC81FB4B;
	fma.rn.f32 	%f21, %f19, %f16, %f20;
	mov.f32 	%f22, 0f3D15373B;
	fma.rn.f32 	%f23, %f21, %f16, %f22;
	mov.f32 	%f24, 0fBD887C5A;
	fma.rn.f32 	%f25, %f23, %f16, %f24;
	mov.f32 	%f26, 0f3DC021D5;
	fma.rn.f32 	%f27, %f25, %f16, %f26;
	mov.f32 	%f28, 0fBDCED424;
	fma.rn.f32 	%f29, %f27, %f16, %f28;
	mov.f32 	%f30, 0f3D8B74DE;
	fma.rn.f32 	%f31, %f29, %f16, %f30;
	mov.f32 	%f32, 0f3C7BF170;
	fma.rn.f32 	%f33, %f31, %f16, %f32;
	mov.f32 	%f34, 0fBE0EF8D4;
	fma.rn.f32 	%f35, %f33, %f16, %f34;
	mov.f32 	%f36, 0f3F9DD2C9;
	fma.rn.f32 	%f37, %f35, %f16, %f36;
	mov.f32 	%f38, 0f3F800000;
	mov.f32 	%f39, 0f40000000;
	fma.rn.f32 	%f4, %f39, %f8, %f38;
	// inline asm
	rcp.approx.ftz.f32 %f3,%f4;
	// inline asm
	mul.rn.f32 	%f40, %f37, %f3;
	mul.f32 	%f41, %f40, 0fC0000000;
	fma.rn.f32 	%f42, %f8, %f41, %f37;
	sub.f32 	%f43, %f42, %f40;
	fma.rn.f32 	%f44, %f43, %f3, %f40;
	mul.f32 	%f45, %f8, %f8;
	neg.f32 	%f46, %f45;
	mul.f32 	%f47, %f45, 0fBFB8AA3B;
	cvt.rzi.f32.f32	%f48, %f47;
	mov.f32 	%f49, 0fBF317200;
	fma.rn.f32 	%f50, %f48, %f49, %f46;
	mov.f32 	%f51, 0fB5BFBE8E;
	fma.rn.f32 	%f52, %f48, %f51, %f50;
	mul.f32 	%f6, %f52, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f5,%f6;
	// inline asm
	add.f32 	%f53, %f48, 0f00000000;
	ex2.approx.f32 	%f54, %f53;
	mul.f32 	%f55, %f5, %f54;
	neg.f32 	%f56, %f8;
	fma.rn.f32 	%f57, %f56, %f8, %f45;
	fma.rn.f32 	%f58, %f55, %f57, %f55;
	mul.f32 	%f59, %f44, %f58;
	setp.gt.f32	%p2, %f8, 0f4120E148;
	selp.f32	%f60, 0f00000000, %f59, %p2;
	setp.lt.f32	%p3, %f7, 0f00000000;
	sub.f32 	%f61, %f39, %f60;
	selp.f32	%f62, %f61, %f60, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f62;

BB37_2:
	ret;
}

	// .globl	vec_erfcinvf
.visible .entry vec_erfcinvf(
	.param .u64 vec_erfcinvf_param_0,
	.param .u64 vec_erfcinvf_param_1,
	.param .u64 vec_erfcinvf_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<56>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_erfcinvf_param_0];
	ld.param.u64 	%rd2, [vec_erfcinvf_param_1];
	ld.param.u64 	%rd3, [vec_erfcinvf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB38_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	neg.f32 	%f2, %f1;
	mov.f32 	%f7, 0f40000000;
	add.rn.f32 	%f3, %f7, %f2;
	setp.le.f32	%p2, %f1, 0f3FFF9097;
	setp.ge.f32	%p3, %f1, 0f3B5ED289;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB38_3;
	bra.uni 	BB38_2;

BB38_3:
	mul.rn.f32 	%f34, %f3, %f1;
	// inline asm
	lg2.approx.ftz.f32 %f33,%f34;
	// inline asm
	neg.f32 	%f35, %f33;
	mov.f32 	%f36, 0f3221F645;
	mov.f32 	%f37, 0fAF8A6370;
	fma.rn.f32 	%f38, %f37, %f35, %f36;
	mov.f32 	%f39, 0fB4016FDA;
	fma.rn.f32 	%f40, %f38, %f35, %f39;
	mov.f32 	%f41, 0f3468F846;
	fma.rn.f32 	%f42, %f40, %f35, %f41;
	mov.f32 	%f43, 0f370742AA;
	fma.rn.f32 	%f44, %f42, %f35, %f43;
	mov.f32 	%f45, 0fB804DB4D;
	fma.rn.f32 	%f46, %f44, %f35, %f45;
	mov.f32 	%f47, 0fBA4AFEA1;
	fma.rn.f32 	%f48, %f46, %f35, %f47;
	mov.f32 	%f49, 0f3BB5C027;
	fma.rn.f32 	%f50, %f48, %f35, %f49;
	mov.f32 	%f51, 0f3E24AE0F;
	fma.rn.f32 	%f52, %f50, %f35, %f51;
	mov.f32 	%f53, 0f3F62DFC4;
	fma.rn.f32 	%f54, %f52, %f35, %f53;
	fma.rn.f32 	%f55, %f54, %f2, %f54;
	bra.uni 	BB38_4;

BB38_2:
	setp.gt.f32	%p5, %f1, 0f3F800000;
	selp.f32	%f12, %f3, %f1, %p5;
	lg2.approx.f32 	%f13, %f12;
	neg.f32 	%f9, %f13;
	// inline asm
	rsqrt.approx.ftz.f32 %f8,%f9;
	// inline asm
	mov.f32 	%f14, 0f42FEF829;
	mov.f32 	%f15, 0fC27C73F1;
	fma.rn.f32 	%f16, %f15, %f8, %f14;
	mov.f32 	%f17, 0fC2E4361C;
	fma.rn.f32 	%f18, %f16, %f8, %f17;
	mov.f32 	%f19, 0f42714D9B;
	fma.rn.f32 	%f20, %f18, %f8, %f19;
	mov.f32 	%f21, 0fC1AE51B3;
	fma.rn.f32 	%f22, %f20, %f8, %f21;
	mov.f32 	%f23, 0f40CEF504;
	fma.rn.f32 	%f24, %f22, %f8, %f23;
	mov.f32 	%f25, 0fBFEA9E05;
	fma.rn.f32 	%f26, %f24, %f8, %f25;
	mov.f32 	%f27, 0fBCF871F4;
	fma.rn.f32 	%f28, %f26, %f8, %f27;
	mov.f32 	%f29, 0f3F553775;
	fma.rn.f32 	%f30, %f28, %f8, %f29;
	// inline asm
	rcp.approx.ftz.f32 %f10,%f8;
	// inline asm
	mul.rn.f32 	%f31, %f30, %f10;
	neg.f32 	%f32, %f31;
	selp.f32	%f55, %f32, %f31, %p5;

BB38_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f55;

BB38_5:
	ret;
}

	// .globl	vec_erfcxf
.visible .entry vec_erfcxf(
	.param .u64 vec_erfcxf_param_0,
	.param .u64 vec_erfcxf_param_1,
	.param .u64 vec_erfcxf_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<81>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_erfcxf_param_0];
	ld.param.u64 	%rd2, [vec_erfcxf_param_1];
	ld.param.u64 	%rd3, [vec_erfcxf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB39_7;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.lt.f32	%p2, %f2, 0f4120E148;
	@%p2 bra 	BB39_3;
	bra.uni 	BB39_2;

BB39_3:
	add.f32 	%f23, %f2, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f22,%f23;
	// inline asm
	add.f32 	%f26, %f2, 0fC0800000;
	mul.rn.f32 	%f27, %f26, %f22;
	add.f32 	%f28, %f27, 0f3F800000;
	mov.f32 	%f29, 0fC0800000;
	fma.rn.f32 	%f30, %f29, %f28, %f2;
	neg.f32 	%f31, %f27;
	fma.rn.f32 	%f32, %f31, %f2, %f30;
	fma.rn.f32 	%f33, %f22, %f32, %f27;
	mov.f32 	%f34, 0f3BE6E05B;
	mov.f32 	%f35, 0f3A69A091;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0fBC81FB4B;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mov.f32 	%f39, 0f3D15373B;
	fma.rn.f32 	%f40, %f38, %f33, %f39;
	mov.f32 	%f41, 0fBD887C5A;
	fma.rn.f32 	%f42, %f40, %f33, %f41;
	mov.f32 	%f43, 0f3DC021D5;
	fma.rn.f32 	%f44, %f42, %f33, %f43;
	mov.f32 	%f45, 0fBDCED424;
	fma.rn.f32 	%f46, %f44, %f33, %f45;
	mov.f32 	%f47, 0f3D8B74DE;
	fma.rn.f32 	%f48, %f46, %f33, %f47;
	mov.f32 	%f49, 0f3C7BF170;
	fma.rn.f32 	%f50, %f48, %f33, %f49;
	mov.f32 	%f51, 0fBE0EF8D4;
	fma.rn.f32 	%f52, %f50, %f33, %f51;
	mov.f32 	%f53, 0f3F9DD2C9;
	fma.rn.f32 	%f54, %f52, %f33, %f53;
	mov.f32 	%f55, 0f3F800000;
	mov.f32 	%f56, 0f40000000;
	fma.rn.f32 	%f25, %f56, %f2, %f55;
	// inline asm
	rcp.approx.ftz.f32 %f24,%f25;
	// inline asm
	mul.rn.f32 	%f57, %f54, %f24;
	mul.f32 	%f58, %f57, 0fC0000000;
	fma.rn.f32 	%f59, %f2, %f58, %f54;
	sub.f32 	%f60, %f59, %f57;
	fma.rn.f32 	%f80, %f60, %f24, %f57;
	bra.uni 	BB39_4;

BB39_2:
	mul.f32 	%f8, %f2, 0f3E800000;
	mov.f32 	%f9, 0f3E800000;
	div.approx.f32 	%f10, %f9, %f8;
	mul.f32 	%f11, %f10, %f10;
	mov.f32 	%f12, 0fBFF00000;
	mov.f32 	%f13, 0f40D20000;
	fma.rn.f32 	%f14, %f13, %f11, %f12;
	mov.f32 	%f15, 0f3F400000;
	fma.rn.f32 	%f16, %f14, %f11, %f15;
	mov.f32 	%f17, 0fBF000000;
	fma.rn.f32 	%f18, %f16, %f11, %f17;
	mov.f32 	%f19, 0f3F800000;
	fma.rn.f32 	%f20, %f18, %f11, %f19;
	mul.f32 	%f21, %f10, 0f3F106EBB;
	mul.f32 	%f80, %f21, %f20;

BB39_4:
	setp.geu.f32	%p3, %f1, 0f00000000;
	@%p3 bra 	BB39_6;

	mul.rz.f32 	%f63, %f2, %f2;
	neg.f32 	%f64, %f63;
	fma.rn.f32 	%f65, %f2, %f2, %f64;
	mul.f32 	%f66, %f63, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f67, %f66;
	mov.f32 	%f68, 0fBF317200;
	fma.rn.f32 	%f69, %f67, %f68, %f63;
	mov.f32 	%f70, 0fB5BFBE8E;
	fma.rn.f32 	%f71, %f67, %f70, %f69;
	mul.f32 	%f62, %f71, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f61,%f62;
	// inline asm
	add.f32 	%f72, %f67, 0f00000000;
	ex2.approx.f32 	%f73, %f72;
	mul.f32 	%f74, %f61, %f73;
	setp.lt.f32	%p4, %f63, 0fC2D20000;
	selp.f32	%f75, 0f00000000, %f74, %p4;
	setp.gt.f32	%p5, %f63, 0f42D20000;
	selp.f32	%f76, 0f7F800000, %f75, %p5;
	add.f32 	%f77, %f76, %f76;
	fma.rn.f32 	%f78, %f77, %f65, %f77;
	sub.f32 	%f79, %f78, %f80;
	setp.eq.f32	%p6, %f77, 0f7F800000;
	selp.f32	%f80, %f77, %f79, %p6;

BB39_6:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f80;

BB39_7:
	ret;
}

	// .globl	vec_erff
.visible .entry vec_erff(
	.param .u64 vec_erff_param_0,
	.param .u64 vec_erff_param_1,
	.param .u64 vec_erff_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<37>;
	.reg .b32 	%r<10>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_erff_param_0];
	ld.param.u64 	%rd2, [vec_erff_param_1];
	ld.param.u64 	%rd3, [vec_erff_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB40_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F800000;
	@%p2 bra 	BB40_3;
	bra.uni 	BB40_2;

BB40_3:
	mul.f32 	%f24, %f1, %f1;
	mov.f32 	%f25, 0f3BA0C9F8;
	mov.f32 	%f26, 0fBA1268FB;
	fma.rn.f32 	%f27, %f26, %f24, %f25;
	mov.f32 	%f28, 0fBCDABFD4;
	fma.rn.f32 	%f29, %f27, %f24, %f28;
	mov.f32 	%f30, 0f3DE70331;
	fma.rn.f32 	%f31, %f29, %f24, %f30;
	mov.f32 	%f32, 0fBEC09330;
	fma.rn.f32 	%f33, %f31, %f24, %f32;
	mov.f32 	%f34, 0f3F906EBA;
	fma.rn.f32 	%f35, %f33, %f24, %f34;
	mul.f32 	%f36, %f1, %f35;
	bra.uni 	BB40_4;

BB40_2:
	mov.f32 	%f8, 0f3A03BB71;
	mov.f32 	%f9, 0fB7B730FB;
	fma.rn.f32 	%f10, %f9, %f2, %f8;
	mov.f32 	%f11, 0fBBACA3B3;
	fma.rn.f32 	%f12, %f10, %f2, %f11;
	mov.f32 	%f13, 0f3D0A7445;
	fma.rn.f32 	%f14, %f12, %f2, %f13;
	mov.f32 	%f15, 0fBE1B3B75;
	fma.rn.f32 	%f16, %f14, %f2, %f15;
	mov.f32 	%f17, 0fBF6B385A;
	fma.rn.f32 	%f18, %f16, %f2, %f17;
	mov.f32 	%f19, 0fBFD0316E;
	fma.rn.f32 	%f20, %f18, %f2, %f19;
	mov.f32 	%f21, 0fBA031CCE;
	fma.rn.f32 	%f7, %f20, %f2, %f21;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	mov.f32 	%f22, 0f3F800000;
	sub.f32 	%f23, %f22, %f6;
	mov.b32 	 %r5, %f23;
	setp.ltu.f32	%p3, %f2, 0f407AD445;
	selp.b32	%r6, %r5, 1065353216, %p3;
	mov.b32 	 %r7, %f1;
	and.b32  	%r8, %r7, -2147483648;
	or.b32  	%r9, %r6, %r8;
	mov.b32 	 %f36, %r9;

BB40_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f36;

BB40_5:
	ret;
}

	// .globl	vec_erfinvf
.visible .entry vec_erfinvf(
	.param .u64 vec_erfinvf_param_0,
	.param .u64 vec_erfinvf_param_1,
	.param .u64 vec_erfinvf_param_2
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<45>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_erfinvf_param_0];
	ld.param.u64 	%rd2, [vec_erfinvf_param_1];
	ld.param.u64 	%rd3, [vec_erfinvf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB41_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	neg.f32 	%f8, %f1;
	mov.f32 	%f9, 0f3F800000;
	fma.rn.f32 	%f7, %f1, %f8, %f9;
	// inline asm
	lg2.approx.ftz.f32 %f6,%f7;
	// inline asm
	neg.f32 	%f2, %f6;
	setp.lt.f32	%p2, %f6, 0fC1033333;
	@%p2 bra 	BB41_3;
	bra.uni 	BB41_2;

BB41_3:
	// inline asm
	rsqrt.approx.ftz.f32 %f29,%f2;
	// inline asm
	mov.f32 	%f31, 0fBF29BAA5;
	mov.f32 	%f32, 0fBF1704A1;
	fma.rn.f32 	%f33, %f32, %f29, %f31;
	mov.f32 	%f34, 0f3FCC6ADC;
	fma.rn.f32 	%f35, %f33, %f29, %f34;
	mov.f32 	%f36, 0fBF2CDAED;
	fma.rn.f32 	%f37, %f35, %f29, %f36;
	mov.f32 	%f38, 0fBDC30537;
	fma.rn.f32 	%f39, %f37, %f29, %f38;
	mov.f32 	%f40, 0f3F55D9B9;
	fma.rn.f32 	%f41, %f39, %f29, %f40;
	rcp.rn.f32 	%f42, %f29;
	mul.f32 	%f43, %f41, %f42;
	mov.b32 	 %r5, %f43;
	mov.b32 	 %r6, %f1;
	and.b32  	%r7, %r6, -2147483648;
	or.b32  	%r8, %r5, %r7;
	mov.b32 	 %f44, %r8;
	bra.uni 	BB41_4;

BB41_2:
	mov.f32 	%f10, 0f3221F645;
	mov.f32 	%f11, 0fAF8A6370;
	fma.rn.f32 	%f12, %f11, %f2, %f10;
	mov.f32 	%f13, 0fB4016FDA;
	fma.rn.f32 	%f14, %f12, %f2, %f13;
	mov.f32 	%f15, 0f3468F846;
	fma.rn.f32 	%f16, %f14, %f2, %f15;
	mov.f32 	%f17, 0f370742AA;
	fma.rn.f32 	%f18, %f16, %f2, %f17;
	mov.f32 	%f19, 0fB804DB4D;
	fma.rn.f32 	%f20, %f18, %f2, %f19;
	mov.f32 	%f21, 0fBA4AFEA1;
	fma.rn.f32 	%f22, %f20, %f2, %f21;
	mov.f32 	%f23, 0f3BB5C027;
	fma.rn.f32 	%f24, %f22, %f2, %f23;
	mov.f32 	%f25, 0f3E24AE0F;
	fma.rn.f32 	%f26, %f24, %f2, %f25;
	mov.f32 	%f27, 0f3F62DFC4;
	fma.rn.f32 	%f28, %f26, %f2, %f27;
	mul.f32 	%f44, %f1, %f28;

BB41_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f44;

BB41_5:
	ret;
}

	// .globl	vec_exp10f
.visible .entry vec_exp10f(
	.param .u64 vec_exp10f_param_0,
	.param .u64 vec_exp10f_param_1,
	.param .u64 vec_exp10f_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<14>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_exp10f_param_0];
	ld.param.u64 	%rd2, [vec_exp10f_param_1];
	ld.param.u64 	%rd3, [vec_exp10f_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB42_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f3, [%rd7];
	mul.f32 	%f4, %f3, 0f40549A78;
	cvt.rzi.f32.f32	%f5, %f4;
	mov.f32 	%f6, 0fBE9A2080;
	fma.rn.f32 	%f7, %f5, %f6, %f3;
	mov.f32 	%f8, 0fB55427DE;
	fma.rn.f32 	%f9, %f5, %f8, %f7;
	mul.f32 	%f2, %f9, 0f40549A78;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	ex2.approx.f32 	%f10, %f5;
	mul.f32 	%f11, %f1, %f10;
	setp.lt.f32	%p2, %f3, 0fC2380000;
	selp.f32	%f12, 0f00000000, %f11, %p2;
	setp.gt.f32	%p3, %f3, 0f42380000;
	selp.f32	%f13, 0f7F800000, %f12, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f13;

BB42_2:
	ret;
}

	// .globl	vec_exp2f
.visible .entry vec_exp2f(
	.param .u64 vec_exp2f_param_0,
	.param .u64 vec_exp2f_param_1,
	.param .u64 vec_exp2f_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_exp2f_param_0];
	ld.param.u64 	%rd2, [vec_exp2f_param_1];
	ld.param.u64 	%rd3, [vec_exp2f_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB43_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	ex2.approx.f32 	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB43_2:
	ret;
}

	// .globl	vec_expf
.visible .entry vec_expf(
	.param .u64 vec_expf_param_0,
	.param .u64 vec_expf_param_1,
	.param .u64 vec_expf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<15>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_expf_param_0];
	ld.param.u64 	%rd2, [vec_expf_param_1];
	ld.param.u64 	%rd3, [vec_expf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB44_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f3, [%rd7];
	mul.f32 	%f4, %f3, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f5, %f4;
	mov.f32 	%f6, 0fBF317200;
	fma.rn.f32 	%f7, %f5, %f6, %f3;
	mov.f32 	%f8, 0fB5BFBE8E;
	fma.rn.f32 	%f9, %f5, %f8, %f7;
	mul.f32 	%f2, %f9, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f10, %f5, 0f00000000;
	ex2.approx.f32 	%f11, %f10;
	mul.f32 	%f12, %f1, %f11;
	setp.lt.f32	%p2, %f3, 0fC2D20000;
	selp.f32	%f13, 0f00000000, %f12, %p2;
	setp.gt.f32	%p3, %f3, 0f42D20000;
	selp.f32	%f14, 0f7F800000, %f13, %p3;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f14;

BB44_2:
	ret;
}

	// .globl	vec_expm1f
.visible .entry vec_expm1f(
	.param .u64 vec_expm1f_param_0,
	.param .u64 vec_expm1f_param_1,
	.param .u64 vec_expm1f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_expm1f_param_0];
	ld.param.u64 	%rd2, [vec_expm1f_param_1];
	ld.param.u64 	%rd3, [vec_expm1f_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB45_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	mul.f32 	%f2, %f1, 0f3FB8AA3B;
	cvt.rni.f32.f32	%f3, %f2;
	abs.f32 	%f4, %f1;
	setp.lt.f32	%p2, %f4, 0f3ED1EB85;
	selp.f32	%f5, 0f00000000, %f3, %p2;
	neg.f32 	%f6, %f5;
	mov.f32 	%f7, 0f3F317200;
	fma.rn.f32 	%f8, %f6, %f7, %f1;
	mov.f32 	%f9, 0f35BFBE8E;
	fma.rn.f32 	%f10, %f6, %f9, %f8;
	setp.eq.f32	%p3, %f5, 0f43000000;
	add.f32 	%f11, %f5, 0fBF800000;
	selp.f32	%f12, %f11, %f5, %p3;
	mov.f32 	%f13, 0f3C095663;
	mov.f32 	%f14, 0f3AB5EBE6;
	fma.rn.f32 	%f15, %f14, %f10, %f13;
	mov.f32 	%f16, 0f3D2AABE3;
	fma.rn.f32 	%f17, %f15, %f10, %f16;
	mov.f32 	%f18, 0f3E2AA9F6;
	fma.rn.f32 	%f19, %f17, %f10, %f18;
	mov.f32 	%f20, 0f3EFFFFFE;
	fma.rn.f32 	%f21, %f19, %f10, %f20;
	mul.f32 	%f22, %f10, %f21;
	fma.rn.f32 	%f23, %f22, %f10, %f10;
	ex2.approx.f32 	%f24, %f12;
	add.f32 	%f25, %f24, 0fBF800000;
	fma.rn.f32 	%f26, %f23, %f24, %f25;
	add.f32 	%f27, %f26, %f26;
	selp.f32	%f28, %f27, %f26, %p3;
	setp.gt.f32	%p4, %f12, 0f43000000;
	selp.f32	%f29, 0f7F800000, %f28, %p4;
	setp.lt.f32	%p5, %f12, 0fC1C80000;
	selp.f32	%f30, 0fBF800000, %f29, %p5;
	setp.eq.f32	%p6, %f1, 0f00000000;
	add.f32 	%f31, %f1, %f1;
	selp.f32	%f32, %f31, %f30, %p6;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f32;

BB45_2:
	ret;
}

	// .globl	vec_fabsf
.visible .entry vec_fabsf(
	.param .u64 vec_fabsf_param_0,
	.param .u64 vec_fabsf_param_1,
	.param .u64 vec_fabsf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_fabsf_param_0];
	ld.param.u64 	%rd2, [vec_fabsf_param_1];
	ld.param.u64 	%rd3, [vec_fabsf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB46_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB46_2:
	ret;
}

	// .globl	vec_floorf
.visible .entry vec_floorf(
	.param .u64 vec_floorf_param_0,
	.param .u64 vec_floorf_param_1,
	.param .u64 vec_floorf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_floorf_param_0];
	ld.param.u64 	%rd2, [vec_floorf_param_1];
	ld.param.u64 	%rd3, [vec_floorf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB47_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	cvt.rmi.f32.f32	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB47_2:
	ret;
}

	// .globl	vec_j0f
.visible .entry vec_j0f(
	.param .u64 vec_j0f_param_0,
	.param .u64 vec_j0f_param_1,
	.param .u64 vec_j0f_param_2
)
{
	.local .align 4 .b8 	__local_depot48[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<25>;
	.reg .f32 	%f<129>;
	.reg .b32 	%r<186>;
	.reg .b64 	%rd<36>;


	mov.u64 	%rd35, __local_depot48;
	cvta.local.u64 	%SP, %rd35;
	ld.param.u64 	%rd15, [vec_j0f_param_0];
	ld.param.u64 	%rd13, [vec_j0f_param_1];
	ld.param.u64 	%rd14, [vec_j0f_param_2];
	add.u64 	%rd16, %SP, 0;
	cvta.to.local.u64 	%rd1, %rd16;
	mov.u32 	%r73, %ntid.x;
	mov.u32 	%r74, %ctaid.x;
	mov.u32 	%r75, %tid.x;
	mad.lo.s32 	%r1, %r73, %r74, %r75;
	cvt.s64.s32	%rd17, %r1;
	setp.ge.u64	%p1, %rd17, %rd15;
	@%p1 bra 	BB48_38;

	cvta.to.global.u64 	%rd18, %rd14;
	mul.wide.s32 	%rd19, %r1, 4;
	add.s64 	%rd20, %rd18, %rd19;
	ld.global.f32 	%f28, [%rd20];
	abs.f32 	%f1, %f28;
	setp.gtu.f32	%p2, %f1, 0f41000000;
	@%p2 bra 	BB48_3;
	bra.uni 	BB48_2;

BB48_3:
	abs.f32 	%f65, %f1;
	mov.f32 	%f128, 0f00000000;
	setp.eq.f32	%p3, %f65, 0f7F800000;
	@%p3 bra 	BB48_37;

	// inline asm
	rcp.approx.ftz.f32 %f66,%f1;
	// inline asm
	mul.f32 	%f68, %f66, %f66;
	mov.f32 	%f69, 0fBF03B7C2;
	mov.f32 	%f70, 0f4056FE93;
	fma.rn.f32 	%f71, %f70, %f68, %f69;
	mov.f32 	%f72, 0f3DD3B3F3;
	fma.rn.f32 	%f73, %f71, %f68, %f72;
	mov.f32 	%f74, 0fBD7FFFB6;
	fma.rn.f32 	%f75, %f73, %f68, %f74;
	mov.f32 	%f76, 0f3F800000;
	fma.rn.f32 	%f77, %f75, %f68, %f76;
	mov.f32 	%f78, 0fBE52412D;
	mov.f32 	%f79, 0f3F91E009;
	fma.rn.f32 	%f80, %f79, %f68, %f78;
	mov.f32 	%f81, 0f3D854ED1;
	fma.rn.f32 	%f82, %f80, %f68, %f81;
	mov.f32 	%f83, 0fBDFFFFFF;
	fma.rn.f32 	%f84, %f82, %f68, %f83;
	fma.rn.f32 	%f3, %f84, %f66, %f1;
	rsqrt.approx.f32 	%f85, %f1;
	mul.f32 	%f86, %f85, 0f3F4C422A;
	mul.f32 	%f4, %f77, %f86;
	mul.f32 	%f87, %f3, 0f3F22F983;
	cvt.rni.s32.f32	%r175, %f87;
	cvt.rn.f32.s32	%f88, %r175;
	neg.f32 	%f89, %f88;
	mov.f32 	%f90, 0f3FC90FDA;
	fma.rn.f32 	%f91, %f89, %f90, %f3;
	mov.f32 	%f92, 0f33A22168;
	fma.rn.f32 	%f93, %f89, %f92, %f91;
	mov.f32 	%f94, 0f27C234C5;
	fma.rn.f32 	%f122, %f89, %f94, %f93;
	abs.f32 	%f95, %f3;
	setp.leu.f32	%p4, %f95, 0f47CE4780;
	@%p4 bra 	BB48_14;

	add.s64 	%rd2, %rd1, 24;
	mov.b32 	 %r3, %f3;
	bfe.u32 	%r78, %r3, 23, 8;
	add.s32 	%r79, %r78, -128;
	shl.b32 	%r80, %r3, 8;
	or.b32  	%r4, %r80, -2147483648;
	shr.u32 	%r5, %r79, 5;
	mov.u32 	%r167, 0;
	mov.u64 	%rd30, __cudart_i2opi_f;
	mov.u32 	%r166, -6;
	mov.u64 	%rd34, %rd1;

BB48_6:
	.pragma "nounroll";
	mov.u64 	%rd4, %rd34;
	ld.const.u32 	%r83, [%rd30];
	// inline asm
	{
	mad.lo.cc.u32   %r81, %r83, %r4, %r167;
	madc.hi.u32     %r167, %r83, %r4,  0;
	}
	// inline asm
	st.local.u32 	[%rd4], %r81;
	add.s64 	%rd5, %rd4, 4;
	add.s64 	%rd30, %rd30, 4;
	add.s32 	%r166, %r166, 1;
	setp.ne.s32	%p5, %r166, 0;
	mov.u64 	%rd34, %rd5;
	@%p5 bra 	BB48_6;

	and.b32  	%r10, %r3, -2147483648;
	st.local.u32 	[%rd2], %r167;
	bfe.u32 	%r11, %r3, 23, 5;
	mov.u32 	%r86, 6;
	sub.s32 	%r87, %r86, %r5;
	mul.wide.s32 	%rd22, %r87, 4;
	add.s64 	%rd7, %rd1, %rd22;
	ld.local.u32 	%r168, [%rd7];
	ld.local.u32 	%r169, [%rd7+-4];
	setp.eq.s32	%p6, %r11, 0;
	@%p6 bra 	BB48_9;

	mov.u32 	%r88, 32;
	sub.s32 	%r89, %r88, %r11;
	shr.u32 	%r90, %r169, %r89;
	shl.b32 	%r91, %r168, %r11;
	add.s32 	%r168, %r90, %r91;
	ld.local.u32 	%r92, [%rd7+-8];
	shr.u32 	%r93, %r92, %r89;
	shl.b32 	%r94, %r169, %r11;
	add.s32 	%r169, %r93, %r94;

BB48_9:
	shr.u32 	%r95, %r169, 30;
	shl.b32 	%r96, %r168, 2;
	add.s32 	%r170, %r95, %r96;
	shl.b32 	%r19, %r169, 2;
	shr.u32 	%r97, %r170, 31;
	shr.u32 	%r98, %r168, 30;
	add.s32 	%r20, %r97, %r98;
	setp.eq.s32	%p7, %r97, 0;
	mov.u32 	%r171, %r10;
	mov.u32 	%r172, %r19;
	@%p7 bra 	BB48_11;

	not.b32 	%r99, %r170;
	neg.s32 	%r21, %r19;
	setp.eq.s32	%p8, %r19, 0;
	selp.u32	%r100, 1, 0, %p8;
	add.s32 	%r170, %r100, %r99;
	xor.b32  	%r23, %r10, -2147483648;
	mov.u32 	%r171, %r23;
	mov.u32 	%r172, %r21;

BB48_11:
	mov.u32 	%r25, %r171;
	neg.s32 	%r101, %r20;
	setp.eq.s32	%p9, %r10, 0;
	selp.b32	%r175, %r20, %r101, %p9;
	clz.b32 	%r174, %r170;
	setp.eq.s32	%p10, %r174, 0;
	shl.b32 	%r102, %r170, %r174;
	mov.u32 	%r103, 32;
	sub.s32 	%r104, %r103, %r174;
	shr.u32 	%r105, %r172, %r104;
	add.s32 	%r106, %r105, %r102;
	selp.b32	%r29, %r170, %r106, %p10;
	mov.u32 	%r107, -921707870;
	mul.hi.u32 	%r173, %r29, %r107;
	setp.lt.s32	%p11, %r173, 1;
	@%p11 bra 	BB48_13;

	mul.lo.s32 	%r108, %r29, -921707870;
	shr.u32 	%r109, %r108, 31;
	shl.b32 	%r110, %r173, 1;
	add.s32 	%r173, %r109, %r110;
	add.s32 	%r174, %r174, 1;

BB48_13:
	mov.u32 	%r111, 126;
	sub.s32 	%r112, %r111, %r174;
	shl.b32 	%r113, %r112, 23;
	add.s32 	%r114, %r173, 1;
	shr.u32 	%r115, %r114, 7;
	add.s32 	%r116, %r115, 1;
	shr.u32 	%r117, %r116, 1;
	add.s32 	%r118, %r117, %r113;
	or.b32  	%r119, %r118, %r25;
	mov.b32 	 %f122, %r119;

BB48_14:
	and.b32  	%r120, %r175, 3;
	cvt.rn.f32.s32	%f96, %r120;
	add.f32 	%f97, %f122, 0fBF490FDB;
	fma.rn.f32 	%f123, %f96, 0f3FC90FDB, %f97;
	abs.f32 	%f98, %f123;
	setp.neu.f32	%p12, %f98, 0f7F800000;
	@%p12 bra 	BB48_16;

	mov.f32 	%f99, 0f00000000;
	mul.rn.f32 	%f123, %f123, %f99;

BB48_16:
	mul.f32 	%f100, %f123, 0f3F22F983;
	cvt.rni.s32.f32	%r185, %f100;
	cvt.rn.f32.s32	%f101, %r185;
	neg.f32 	%f102, %f101;
	fma.rn.f32 	%f104, %f102, %f90, %f123;
	fma.rn.f32 	%f106, %f102, %f92, %f104;
	fma.rn.f32 	%f124, %f102, %f94, %f106;
	abs.f32 	%f108, %f123;
	setp.leu.f32	%p13, %f108, 0f47CE4780;
	@%p13 bra 	BB48_26;

	mov.b32 	 %r37, %f123;
	shr.u32 	%r38, %r37, 23;
	bfe.u32 	%r123, %r37, 23, 8;
	add.s32 	%r124, %r123, -128;
	shl.b32 	%r125, %r37, 8;
	or.b32  	%r39, %r125, -2147483648;
	shr.u32 	%r40, %r124, 5;
	mov.u32 	%r177, 0;
	mov.u64 	%rd31, __cudart_i2opi_f;
	mov.u32 	%r176, -6;
	mov.u64 	%rd33, %rd1;

BB48_18:
	.pragma "nounroll";
	ld.const.u32 	%r128, [%rd31];
	// inline asm
	{
	mad.lo.cc.u32   %r126, %r128, %r39, %r177;
	madc.hi.u32     %r177, %r128, %r39,  0;
	}
	// inline asm
	st.local.u32 	[%rd33], %r126;
	add.s64 	%rd33, %rd33, 4;
	add.s64 	%rd31, %rd31, 4;
	add.s32 	%r176, %r176, 1;
	setp.ne.s32	%p14, %r176, 0;
	@%p14 bra 	BB48_18;

	and.b32  	%r45, %r37, -2147483648;
	cvta.to.local.u64 	%rd25, %rd16;
	st.local.u32 	[%rd25+24], %r177;
	mov.u32 	%r131, 6;
	sub.s32 	%r132, %r131, %r40;
	mul.wide.s32 	%rd26, %r132, 4;
	add.s64 	%rd12, %rd25, %rd26;
	ld.local.u32 	%r178, [%rd12];
	ld.local.u32 	%r179, [%rd12+-4];
	and.b32  	%r48, %r38, 31;
	setp.eq.s32	%p15, %r48, 0;
	@%p15 bra 	BB48_21;

	mov.u32 	%r133, 32;
	sub.s32 	%r134, %r133, %r48;
	shr.u32 	%r135, %r179, %r134;
	shl.b32 	%r136, %r178, %r48;
	add.s32 	%r178, %r135, %r136;
	ld.local.u32 	%r137, [%rd12+-8];
	shr.u32 	%r138, %r137, %r134;
	shl.b32 	%r139, %r179, %r48;
	add.s32 	%r179, %r138, %r139;

BB48_21:
	shr.u32 	%r140, %r179, 30;
	shl.b32 	%r141, %r178, 2;
	add.s32 	%r180, %r140, %r141;
	shl.b32 	%r54, %r179, 2;
	shr.u32 	%r142, %r180, 31;
	shr.u32 	%r143, %r178, 30;
	add.s32 	%r55, %r142, %r143;
	setp.eq.s32	%p16, %r142, 0;
	mov.u32 	%r181, %r45;
	mov.u32 	%r182, %r54;
	@%p16 bra 	BB48_23;

	not.b32 	%r144, %r180;
	neg.s32 	%r56, %r54;
	setp.eq.s32	%p17, %r54, 0;
	selp.u32	%r145, 1, 0, %p17;
	add.s32 	%r180, %r145, %r144;
	xor.b32  	%r58, %r45, -2147483648;
	mov.u32 	%r181, %r58;
	mov.u32 	%r182, %r56;

BB48_23:
	mov.u32 	%r60, %r181;
	neg.s32 	%r146, %r55;
	setp.eq.s32	%p18, %r45, 0;
	selp.b32	%r185, %r55, %r146, %p18;
	clz.b32 	%r184, %r180;
	setp.eq.s32	%p19, %r184, 0;
	shl.b32 	%r147, %r180, %r184;
	mov.u32 	%r148, 32;
	sub.s32 	%r149, %r148, %r184;
	shr.u32 	%r150, %r182, %r149;
	add.s32 	%r151, %r150, %r147;
	selp.b32	%r64, %r180, %r151, %p19;
	mov.u32 	%r152, -921707870;
	mul.hi.u32 	%r183, %r64, %r152;
	setp.lt.s32	%p20, %r183, 1;
	@%p20 bra 	BB48_25;

	mul.lo.s32 	%r153, %r64, -921707870;
	shr.u32 	%r154, %r153, 31;
	shl.b32 	%r155, %r183, 1;
	add.s32 	%r183, %r154, %r155;
	add.s32 	%r184, %r184, 1;

BB48_25:
	mov.u32 	%r156, 126;
	sub.s32 	%r157, %r156, %r184;
	shl.b32 	%r158, %r157, 23;
	add.s32 	%r159, %r183, 1;
	shr.u32 	%r160, %r159, 7;
	add.s32 	%r161, %r160, 1;
	shr.u32 	%r162, %r161, 1;
	add.s32 	%r163, %r162, %r158;
	or.b32  	%r164, %r163, %r60;
	mov.b32 	 %f124, %r164;

BB48_26:
	mul.rn.f32 	%f14, %f124, %f124;
	add.s32 	%r71, %r185, 1;
	and.b32  	%r72, %r71, 1;
	setp.eq.s32	%p21, %r72, 0;
	@%p21 bra 	BB48_28;

	mov.f32 	%f109, 0fBAB6061A;
	mov.f32 	%f110, 0f37CCF5CE;
	fma.rn.f32 	%f125, %f110, %f14, %f109;
	bra.uni 	BB48_29;

BB48_2:
	add.f32 	%f29, %f1, 0fC019E8A9;
	add.f32 	%f30, %f29, 0fB3E971B3;
	mov.f32 	%f31, 0fA9ACA9B3;
	mov.f32 	%f32, 0fA6B3B8E7;
	fma.rn.f32 	%f33, %f32, %f30, %f31;
	mov.f32 	%f34, 0f2C3F0E18;
	fma.rn.f32 	%f35, %f33, %f30, %f34;
	mov.f32 	%f36, 0fACD41781;
	fma.rn.f32 	%f37, %f35, %f30, %f36;
	mov.f32 	%f38, 0fAFE90F38;
	fma.rn.f32 	%f39, %f37, %f30, %f38;
	mov.f32 	%f40, 0f3020305B;
	fma.rn.f32 	%f41, %f39, %f30, %f40;
	mov.f32 	%f42, 0f33797143;
	fma.rn.f32 	%f43, %f41, %f30, %f42;
	mov.f32 	%f44, 0f30F76F85;
	fma.rn.f32 	%f45, %f43, %f30, %f44;
	mov.f32 	%f46, 0fB6B6DFC6;
	fma.rn.f32 	%f47, %f45, %f30, %f46;
	mov.f32 	%f48, 0fB6F665C9;
	fma.rn.f32 	%f49, %f47, %f30, %f48;
	mov.f32 	%f50, 0f399E2DEB;
	fma.rn.f32 	%f51, %f49, %f30, %f50;
	mov.f32 	%f52, 0f3A4AE334;
	fma.rn.f32 	%f53, %f51, %f30, %f52;
	mov.f32 	%f54, 0fBBEEAA1B;
	fma.rn.f32 	%f55, %f53, %f30, %f54;
	mov.f32 	%f56, 0fBCDA7747;
	fma.rn.f32 	%f57, %f55, %f30, %f56;
	mul.f32 	%f58, %f30, %f57;
	add.f32 	%f59, %f1, 0fC0B0A47B;
	add.f32 	%f60, %f59, 0f339A7A37;
	mul.f32 	%f61, %f60, %f58;
	add.f32 	%f62, %f1, 0fC10A75AB;
	add.f32 	%f63, %f62, 0fB4CCCDED;
	mul.f32 	%f128, %f63, %f61;
	bra.uni 	BB48_37;

BB48_28:
	mov.f32 	%f111, 0f3C08839E;
	mov.f32 	%f112, 0fB94CA1F9;
	fma.rn.f32 	%f125, %f112, %f14, %f111;

BB48_29:
	@%p21 bra 	BB48_31;

	mov.f32 	%f113, 0f3D2AAAA5;
	fma.rn.f32 	%f114, %f125, %f14, %f113;
	mov.f32 	%f115, 0fBF000000;
	fma.rn.f32 	%f126, %f114, %f14, %f115;
	bra.uni 	BB48_32;

BB48_31:
	mov.f32 	%f116, 0fBE2AAAA3;
	fma.rn.f32 	%f117, %f125, %f14, %f116;
	mov.f32 	%f118, 0f00000000;
	fma.rn.f32 	%f126, %f117, %f14, %f118;

BB48_32:
	fma.rn.f32 	%f127, %f126, %f124, %f124;
	@%p21 bra 	BB48_34;

	fma.rn.f32 	%f127, %f126, %f14, %f76;

BB48_34:
	and.b32  	%r165, %r71, 2;
	setp.eq.s32	%p24, %r165, 0;
	@%p24 bra 	BB48_36;

	mov.f32 	%f120, 0f00000000;
	mov.f32 	%f121, 0fBF800000;
	fma.rn.f32 	%f127, %f127, %f121, %f120;

BB48_36:
	mul.f32 	%f128, %f4, %f127;

BB48_37:
	cvta.to.global.u64 	%rd27, %rd13;
	add.s64 	%rd29, %rd27, %rd19;
	st.global.f32 	[%rd29], %f128;

BB48_38:
	ret;
}

	// .globl	vec_j1f
.visible .entry vec_j1f(
	.param .u64 vec_j1f_param_0,
	.param .u64 vec_j1f_param_1,
	.param .u64 vec_j1f_param_2
)
{
	.local .align 4 .b8 	__local_depot49[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<27>;
	.reg .f32 	%f<129>;
	.reg .b32 	%r<195>;
	.reg .b64 	%rd<38>;


	mov.u64 	%rd37, __local_depot49;
	cvta.local.u64 	%SP, %rd37;
	ld.param.u64 	%rd15, [vec_j1f_param_0];
	ld.param.u64 	%rd13, [vec_j1f_param_1];
	ld.param.u64 	%rd14, [vec_j1f_param_2];
	mov.u32 	%r73, %ntid.x;
	mov.u32 	%r74, %ctaid.x;
	mov.u32 	%r75, %tid.x;
	mad.lo.s32 	%r1, %r73, %r74, %r75;
	cvt.s64.s32	%rd16, %r1;
	setp.ge.u64	%p1, %rd16, %rd15;
	@%p1 bra 	BB49_38;

	cvta.to.global.u64 	%rd17, %rd14;
	mul.wide.s32 	%rd18, %r1, 4;
	add.s64 	%rd19, %rd17, %rd18;
	ld.global.f32 	%f1, [%rd19];
	abs.f32 	%f2, %f1;
	setp.gtu.f32	%p2, %f2, 0f40FB3333;
	@%p2 bra 	BB49_3;
	bra.uni 	BB49_2;

BB49_3:
	abs.f32 	%f63, %f2;
	mov.f32 	%f128, 0f00000000;
	setp.eq.f32	%p3, %f63, 0f7F800000;
	@%p3 bra 	BB49_37;

	// inline asm
	rcp.approx.ftz.f32 %f64,%f2;
	// inline asm
	mul.f32 	%f66, %f64, %f64;
	mov.f32 	%f67, 0f3F3FF7E9;
	mov.f32 	%f68, 0fC082CB37;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0fBE458BAE;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mov.f32 	%f72, 0f3E3FFF8B;
	fma.rn.f32 	%f73, %f71, %f66, %f72;
	mov.f32 	%f74, 0f3F800000;
	fma.rn.f32 	%f4, %f73, %f66, %f74;
	mov.f32 	%f75, 0f3EB914AD;
	mov.f32 	%f76, 0fBFCA3BA2;
	fma.rn.f32 	%f77, %f76, %f66, %f75;
	mov.f32 	%f78, 0fBE27F2EC;
	fma.rn.f32 	%f79, %f77, %f66, %f78;
	mov.f32 	%f80, 0f3EBFFFFD;
	fma.rn.f32 	%f81, %f79, %f66, %f80;
	fma.rn.f32 	%f5, %f81, %f64, %f2;
	rsqrt.approx.f32 	%f6, %f2;
	mul.f32 	%f82, %f5, 0f3F22F983;
	cvt.rni.s32.f32	%r184, %f82;
	cvt.rn.f32.s32	%f83, %r184;
	neg.f32 	%f84, %f83;
	mov.f32 	%f85, 0f3FC90FDA;
	fma.rn.f32 	%f86, %f84, %f85, %f5;
	mov.f32 	%f87, 0f33A22168;
	fma.rn.f32 	%f88, %f84, %f87, %f86;
	mov.f32 	%f89, 0f27C234C5;
	fma.rn.f32 	%f122, %f84, %f89, %f88;
	abs.f32 	%f90, %f5;
	setp.leu.f32	%p4, %f90, 0f47CE4780;
	@%p4 bra 	BB49_14;

	add.u64 	%rd21, %SP, 0;
	cvta.to.local.u64 	%rd34, %rd21;
	mov.b32 	 %r3, %f5;
	bfe.u32 	%r78, %r3, 23, 8;
	add.s32 	%r79, %r78, -128;
	shl.b32 	%r80, %r3, 8;
	or.b32  	%r4, %r80, -2147483648;
	shr.u32 	%r5, %r79, 5;
	mov.u32 	%r176, 0;
	mov.u64 	%rd33, __cudart_i2opi_f;
	mov.u32 	%r175, -6;

BB49_6:
	.pragma "nounroll";
	ld.const.u32 	%r83, [%rd33];
	// inline asm
	{
	mad.lo.cc.u32   %r81, %r83, %r4, %r176;
	madc.hi.u32     %r176, %r83, %r4,  0;
	}
	// inline asm
	st.local.u32 	[%rd34], %r81;
	add.s64 	%rd34, %rd34, 4;
	add.s64 	%rd33, %rd33, 4;
	add.s32 	%r175, %r175, 1;
	setp.ne.s32	%p5, %r175, 0;
	@%p5 bra 	BB49_6;

	and.b32  	%r10, %r3, -2147483648;
	cvta.to.local.u64 	%rd23, %rd21;
	st.local.u32 	[%rd23+24], %r176;
	bfe.u32 	%r11, %r3, 23, 5;
	mov.u32 	%r86, 6;
	sub.s32 	%r87, %r86, %r5;
	mul.wide.s32 	%rd24, %r87, 4;
	add.s64 	%rd6, %rd23, %rd24;
	ld.local.u32 	%r177, [%rd6];
	ld.local.u32 	%r178, [%rd6+-4];
	setp.eq.s32	%p6, %r11, 0;
	@%p6 bra 	BB49_9;

	mov.u32 	%r88, 32;
	sub.s32 	%r89, %r88, %r11;
	shr.u32 	%r90, %r178, %r89;
	shl.b32 	%r91, %r177, %r11;
	add.s32 	%r177, %r90, %r91;
	ld.local.u32 	%r92, [%rd6+-8];
	shr.u32 	%r93, %r92, %r89;
	shl.b32 	%r94, %r178, %r11;
	add.s32 	%r178, %r93, %r94;

BB49_9:
	shr.u32 	%r95, %r178, 30;
	shl.b32 	%r96, %r177, 2;
	add.s32 	%r179, %r95, %r96;
	shl.b32 	%r19, %r178, 2;
	shr.u32 	%r97, %r179, 31;
	shr.u32 	%r98, %r177, 30;
	add.s32 	%r20, %r97, %r98;
	setp.eq.s32	%p7, %r97, 0;
	mov.u32 	%r180, %r10;
	mov.u32 	%r181, %r19;
	@%p7 bra 	BB49_11;

	not.b32 	%r99, %r179;
	neg.s32 	%r21, %r19;
	setp.eq.s32	%p8, %r19, 0;
	selp.u32	%r100, 1, 0, %p8;
	add.s32 	%r179, %r100, %r99;
	xor.b32  	%r23, %r10, -2147483648;
	mov.u32 	%r180, %r23;
	mov.u32 	%r181, %r21;

BB49_11:
	mov.u32 	%r25, %r180;
	neg.s32 	%r101, %r20;
	setp.eq.s32	%p9, %r10, 0;
	selp.b32	%r184, %r20, %r101, %p9;
	clz.b32 	%r183, %r179;
	setp.eq.s32	%p10, %r183, 0;
	shl.b32 	%r102, %r179, %r183;
	mov.u32 	%r103, 32;
	sub.s32 	%r104, %r103, %r183;
	shr.u32 	%r105, %r181, %r104;
	add.s32 	%r106, %r105, %r102;
	selp.b32	%r29, %r179, %r106, %p10;
	mov.u32 	%r107, -921707870;
	mul.hi.u32 	%r182, %r29, %r107;
	setp.lt.s32	%p11, %r182, 1;
	@%p11 bra 	BB49_13;

	mul.lo.s32 	%r108, %r29, -921707870;
	shr.u32 	%r109, %r108, 31;
	shl.b32 	%r110, %r182, 1;
	add.s32 	%r182, %r109, %r110;
	add.s32 	%r183, %r183, 1;

BB49_13:
	mov.u32 	%r111, 126;
	sub.s32 	%r112, %r111, %r183;
	shl.b32 	%r113, %r112, 23;
	add.s32 	%r114, %r182, 1;
	shr.u32 	%r115, %r114, 7;
	add.s32 	%r116, %r115, 1;
	shr.u32 	%r117, %r116, 1;
	add.s32 	%r118, %r117, %r113;
	or.b32  	%r119, %r118, %r25;
	mov.b32 	 %f122, %r119;

BB49_14:
	mul.f32 	%f91, %f6, 0f3F4C422A;
	mul.f32 	%f10, %f4, %f91;
	and.b32  	%r120, %r184, 3;
	cvt.rn.f32.s32	%f92, %r120;
	add.f32 	%f93, %f122, 0fC016CBE4;
	fma.rn.f32 	%f123, %f92, 0f3FC90FDB, %f93;
	abs.f32 	%f94, %f123;
	setp.neu.f32	%p12, %f94, 0f7F800000;
	@%p12 bra 	BB49_16;

	mov.f32 	%f95, 0f00000000;
	mul.rn.f32 	%f123, %f123, %f95;

BB49_16:
	mul.f32 	%f96, %f123, 0f3F22F983;
	cvt.rni.s32.f32	%r194, %f96;
	cvt.rn.f32.s32	%f97, %r194;
	neg.f32 	%f98, %f97;
	fma.rn.f32 	%f100, %f98, %f85, %f123;
	fma.rn.f32 	%f102, %f98, %f87, %f100;
	fma.rn.f32 	%f124, %f98, %f89, %f102;
	abs.f32 	%f104, %f123;
	setp.leu.f32	%p13, %f104, 0f47CE4780;
	@%p13 bra 	BB49_26;

	mov.b32 	 %r37, %f123;
	shr.u32 	%r38, %r37, 23;
	bfe.u32 	%r123, %r37, 23, 8;
	add.s32 	%r124, %r123, -128;
	shl.b32 	%r125, %r37, 8;
	or.b32  	%r39, %r125, -2147483648;
	shr.u32 	%r40, %r124, 5;
	add.u64 	%rd26, %SP, 0;
	cvta.to.local.u64 	%rd36, %rd26;
	mov.u32 	%r186, 0;
	mov.u64 	%rd35, __cudart_i2opi_f;
	mov.u32 	%r185, -6;

BB49_18:
	.pragma "nounroll";
	ld.const.u32 	%r128, [%rd35];
	// inline asm
	{
	mad.lo.cc.u32   %r126, %r128, %r39, %r186;
	madc.hi.u32     %r186, %r128, %r39,  0;
	}
	// inline asm
	st.local.u32 	[%rd36], %r126;
	add.s64 	%rd36, %rd36, 4;
	add.s64 	%rd35, %rd35, 4;
	add.s32 	%r185, %r185, 1;
	setp.ne.s32	%p14, %r185, 0;
	@%p14 bra 	BB49_18;

	and.b32  	%r45, %r37, -2147483648;
	cvta.to.local.u64 	%rd28, %rd26;
	st.local.u32 	[%rd28+24], %r186;
	mov.u32 	%r131, 6;
	sub.s32 	%r132, %r131, %r40;
	mul.wide.s32 	%rd29, %r132, 4;
	add.s64 	%rd12, %rd28, %rd29;
	ld.local.u32 	%r187, [%rd12];
	ld.local.u32 	%r188, [%rd12+-4];
	and.b32  	%r48, %r38, 31;
	setp.eq.s32	%p15, %r48, 0;
	@%p15 bra 	BB49_21;

	mov.u32 	%r133, 32;
	sub.s32 	%r134, %r133, %r48;
	shr.u32 	%r135, %r188, %r134;
	shl.b32 	%r136, %r187, %r48;
	add.s32 	%r187, %r135, %r136;
	ld.local.u32 	%r137, [%rd12+-8];
	shr.u32 	%r138, %r137, %r134;
	shl.b32 	%r139, %r188, %r48;
	add.s32 	%r188, %r138, %r139;

BB49_21:
	shr.u32 	%r140, %r188, 30;
	shl.b32 	%r141, %r187, 2;
	add.s32 	%r189, %r140, %r141;
	shl.b32 	%r54, %r188, 2;
	shr.u32 	%r142, %r189, 31;
	shr.u32 	%r143, %r187, 30;
	add.s32 	%r55, %r142, %r143;
	setp.eq.s32	%p16, %r142, 0;
	mov.u32 	%r190, %r45;
	mov.u32 	%r191, %r54;
	@%p16 bra 	BB49_23;

	not.b32 	%r144, %r189;
	neg.s32 	%r56, %r54;
	setp.eq.s32	%p17, %r54, 0;
	selp.u32	%r145, 1, 0, %p17;
	add.s32 	%r189, %r145, %r144;
	xor.b32  	%r58, %r45, -2147483648;
	mov.u32 	%r190, %r58;
	mov.u32 	%r191, %r56;

BB49_23:
	mov.u32 	%r60, %r190;
	neg.s32 	%r146, %r55;
	setp.eq.s32	%p18, %r45, 0;
	selp.b32	%r194, %r55, %r146, %p18;
	clz.b32 	%r193, %r189;
	setp.eq.s32	%p19, %r193, 0;
	shl.b32 	%r147, %r189, %r193;
	mov.u32 	%r148, 32;
	sub.s32 	%r149, %r148, %r193;
	shr.u32 	%r150, %r191, %r149;
	add.s32 	%r151, %r150, %r147;
	selp.b32	%r64, %r189, %r151, %p19;
	mov.u32 	%r152, -921707870;
	mul.hi.u32 	%r192, %r64, %r152;
	setp.lt.s32	%p20, %r192, 1;
	@%p20 bra 	BB49_25;

	mul.lo.s32 	%r153, %r64, -921707870;
	shr.u32 	%r154, %r153, 31;
	shl.b32 	%r155, %r192, 1;
	add.s32 	%r192, %r154, %r155;
	add.s32 	%r193, %r193, 1;

BB49_25:
	mov.u32 	%r156, 126;
	sub.s32 	%r157, %r156, %r193;
	shl.b32 	%r158, %r157, 23;
	add.s32 	%r159, %r192, 1;
	shr.u32 	%r160, %r159, 7;
	add.s32 	%r161, %r160, 1;
	shr.u32 	%r162, %r161, 1;
	add.s32 	%r163, %r162, %r158;
	or.b32  	%r164, %r163, %r60;
	mov.b32 	 %f124, %r164;

BB49_26:
	mul.rn.f32 	%f17, %f124, %f124;
	add.s32 	%r71, %r194, 1;
	and.b32  	%r72, %r71, 1;
	setp.eq.s32	%p21, %r72, 0;
	@%p21 bra 	BB49_28;

	mov.f32 	%f105, 0fBAB6061A;
	mov.f32 	%f106, 0f37CCF5CE;
	fma.rn.f32 	%f125, %f106, %f17, %f105;
	bra.uni 	BB49_29;

BB49_2:
	add.f32 	%f31, %f2, 0fC0753AAC;
	add.f32 	%f32, %f31, 0f33A5090F;
	mov.f32 	%f33, 0f2B81BF42;
	mov.f32 	%f34, 0f29AF3463;
	fma.rn.f32 	%f35, %f34, %f32, %f33;
	mov.f32 	%f36, 0fADE21EC1;
	fma.rn.f32 	%f37, %f35, %f32, %f36;
	mov.f32 	%f38, 0fAF5DDEFF;
	fma.rn.f32 	%f39, %f37, %f32, %f38;
	mov.f32 	%f40, 0f319B0C9D;
	fma.rn.f32 	%f41, %f39, %f32, %f40;
	mov.f32 	%f42, 0f32E81173;
	fma.rn.f32 	%f43, %f41, %f32, %f42;
	mov.f32 	%f44, 0fB50F8DC8;
	fma.rn.f32 	%f45, %f43, %f32, %f44;
	mov.f32 	%f46, 0fB61E653D;
	fma.rn.f32 	%f47, %f45, %f32, %f46;
	mov.f32 	%f48, 0f382CD9C5;
	fma.rn.f32 	%f49, %f47, %f32, %f48;
	mov.f32 	%f50, 0f38F9EB10;
	fma.rn.f32 	%f51, %f49, %f32, %f50;
	mov.f32 	%f52, 0fBAECEB9C;
	fma.rn.f32 	%f53, %f51, %f32, %f52;
	mov.f32 	%f54, 0fBB276FFD;
	fma.rn.f32 	%f55, %f53, %f32, %f54;
	mov.f32 	%f56, 0f3D073993;
	fma.rn.f32 	%f57, %f55, %f32, %f56;
	add.f32 	%f58, %f2, 0fC0E07FB0;
	add.f32 	%f59, %f58, 0f3444B8DB;
	mul.f32 	%f60, %f59, %f57;
	mul.f32 	%f61, %f32, %f60;
	mul.f32 	%f128, %f2, %f61;
	bra.uni 	BB49_37;

BB49_28:
	mov.f32 	%f107, 0f3C08839E;
	mov.f32 	%f108, 0fB94CA1F9;
	fma.rn.f32 	%f125, %f108, %f17, %f107;

BB49_29:
	@%p21 bra 	BB49_31;

	mov.f32 	%f109, 0f3D2AAAA5;
	fma.rn.f32 	%f110, %f125, %f17, %f109;
	mov.f32 	%f111, 0fBF000000;
	fma.rn.f32 	%f126, %f110, %f17, %f111;
	bra.uni 	BB49_32;

BB49_31:
	mov.f32 	%f112, 0fBE2AAAA3;
	fma.rn.f32 	%f113, %f125, %f17, %f112;
	mov.f32 	%f114, 0f00000000;
	fma.rn.f32 	%f126, %f113, %f17, %f114;

BB49_32:
	fma.rn.f32 	%f127, %f126, %f124, %f124;
	@%p21 bra 	BB49_34;

	fma.rn.f32 	%f127, %f126, %f17, %f74;

BB49_34:
	and.b32  	%r165, %r71, 2;
	setp.eq.s32	%p24, %r165, 0;
	@%p24 bra 	BB49_36;

	mov.f32 	%f116, 0f00000000;
	mov.f32 	%f117, 0fBF800000;
	fma.rn.f32 	%f127, %f127, %f117, %f116;

BB49_36:
	mul.f32 	%f128, %f10, %f127;

BB49_37:
	neg.f32 	%f118, %f128;
	setp.lt.f32	%p25, %f1, 0f00000000;
	selp.f32	%f119, %f118, %f128, %p25;
	mov.b32 	 %r166, %f1;
	and.b32  	%r167, %r166, -2147483648;
	mov.b32 	 %r168, %f119;
	and.b32  	%r169, %r168, 2147483647;
	or.b32  	%r170, %r169, %r167;
	mov.b32 	 %f120, %r170;
	setp.lt.f32	%p26, %f2, 0f0DA24260;
	selp.f32	%f121, %f120, %f119, %p26;
	cvta.to.global.u64 	%rd30, %rd13;
	add.s64 	%rd32, %rd30, %rd18;
	st.global.f32 	[%rd32], %f121;

BB49_38:
	ret;
}

	// .globl	vec_lgammaf
.visible .entry vec_lgammaf(
	.param .u64 vec_lgammaf_param_0,
	.param .u64 vec_lgammaf_param_1,
	.param .u64 vec_lgammaf_param_2
)
{
	.reg .pred 	%p<34>;
	.reg .f32 	%f<271>;
	.reg .b32 	%r<24>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_lgammaf_param_0];
	ld.param.u64 	%rd2, [vec_lgammaf_param_1];
	ld.param.u64 	%rd3, [vec_lgammaf_param_2];
	mov.u32 	%r3, %tid.x;
	mov.u32 	%r4, %ntid.x;
	mov.u32 	%r5, %ctaid.x;
	mad.lo.s32 	%r6, %r4, %r5, %r3;
	cvt.s64.s32	%rd1, %r6;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB50_38;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f40400000;
	@%p2 bra 	BB50_8;
	bra.uni 	BB50_2;

BB50_8:
	setp.ltu.f32	%p10, %f2, 0f3FC00000;
	@%p10 bra 	BB50_10;
	bra.uni 	BB50_9;

BB50_10:
	setp.ltu.f32	%p11, %f2, 0f3F333333;
	@%p11 bra 	BB50_12;
	bra.uni 	BB50_11;

BB50_12:
	mov.f32 	%f143, 0fBBB34878;
	mov.f32 	%f144, 0f3B6B1C86;
	fma.rn.f32 	%f145, %f144, %f2, %f143;
	mov.f32 	%f146, 0fBD36CAEF;
	fma.rn.f32 	%f147, %f145, %f2, %f146;
	mov.f32 	%f148, 0f3E2B5555;
	fma.rn.f32 	%f149, %f147, %f2, %f148;
	mov.f32 	%f150, 0fBD2C96C7;
	fma.rn.f32 	%f151, %f149, %f2, %f150;
	mov.f32 	%f152, 0fBF27E6EB;
	fma.rn.f32 	%f153, %f151, %f2, %f152;
	mov.f32 	%f154, 0f3F13C463;
	fma.rn.f32 	%f155, %f153, %f2, %f154;
	mul.f32 	%f156, %f2, %f155;
	fma.rn.f32 	%f11, %f156, %f2, %f2;
	setp.gt.f32	%p12, %f11, 0f00000000;
	setp.lt.f32	%p13, %f11, 0f7F800000;
	and.pred  	%p14, %p12, %p13;
	@%p14 bra 	BB50_14;
	bra.uni 	BB50_13;

BB50_14:
	setp.lt.f32	%p15, %f11, 0f00800000;
	mul.f32 	%f159, %f11, 0f4B800000;
	selp.f32	%f160, %f159, %f11, %p15;
	selp.f32	%f161, 0fC3170000, 0fC2FE0000, %p15;
	mov.b32 	 %r11, %f160;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f162, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f163, %r14;
	add.f32 	%f164, %f161, %f163;
	setp.gt.f32	%p16, %f162, 0f3FAE147B;
	mul.f32 	%f165, %f162, 0f3F000000;
	add.f32 	%f166, %f164, 0f3F800000;
	selp.f32	%f167, %f165, %f162, %p16;
	selp.f32	%f168, %f166, %f164, %p16;
	add.f32 	%f158, %f167, 0f3F800000;
	add.f32 	%f169, %f167, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f157,%f158;
	// inline asm
	mul.f32 	%f170, %f169, %f169;
	neg.f32 	%f171, %f170;
	mul.rn.f32 	%f172, %f157, %f171;
	add.rn.f32 	%f173, %f169, %f172;
	mul.f32 	%f174, %f173, %f173;
	mov.f32 	%f175, 0f3C4C6A36;
	mov.f32 	%f176, 0f3B1E94E6;
	fma.rn.f32 	%f177, %f176, %f174, %f175;
	mov.f32 	%f178, 0f3DAAAB1A;
	fma.rn.f32 	%f179, %f177, %f174, %f178;
	mul.f32 	%f180, %f174, %f179;
	fma.rn.f32 	%f181, %f180, %f173, %f172;
	add.f32 	%f182, %f169, %f181;
	mov.f32 	%f183, 0f3F317218;
	fma.rn.f32 	%f262, %f168, %f183, %f182;
	bra.uni 	BB50_15;

BB50_2:
	setp.ltu.f32	%p3, %f2, 0f40F9999A;
	@%p3 bra 	BB50_7;
	bra.uni 	BB50_3;

BB50_7:
	add.f32 	%f84, %f2, 0fC0400000;
	mov.f32 	%f85, 0fC640F6F8;
	mov.f32 	%f86, 0fC43B38FB;
	fma.rn.f32 	%f87, %f86, %f84, %f85;
	mov.f32 	%f88, 0fC7206560;
	fma.rn.f32 	%f89, %f87, %f84, %f88;
	mov.f32 	%f90, 0fC73CB6AA;
	fma.rn.f32 	%f91, %f89, %f84, %f90;
	mov.f32 	%f92, 0fC80BAE5A;
	fma.rn.f32 	%f93, %f91, %f84, %f92;
	add.f32 	%f94, %f84, 0fC381A020;
	mov.f32 	%f95, 0fC62864B8;
	fma.rn.f32 	%f96, %f94, %f84, %f95;
	mov.f32 	%f97, 0fC7B50686;
	fma.rn.f32 	%f98, %f96, %f84, %f97;
	mov.f32 	%f99, 0fC8498465;
	fma.rn.f32 	%f83, %f98, %f84, %f99;
	// inline asm
	rcp.approx.ftz.f32 %f82,%f83;
	// inline asm
	fma.rn.f32 	%f270, %f93, %f82, %f84;
	bra.uni 	BB50_16;

BB50_9:
	add.f32 	%f100, %f2, 0fC0000000;
	mov.f32 	%f101, 0fB967A002;
	mov.f32 	%f102, 0f385007FA;
	fma.rn.f32 	%f103, %f102, %f100, %f101;
	mov.f32 	%f104, 0f3A0DE6FC;
	fma.rn.f32 	%f105, %f103, %f100, %f104;
	mov.f32 	%f106, 0fBA9DE0E2;
	fma.rn.f32 	%f107, %f105, %f100, %f106;
	mov.f32 	%f108, 0f3B3D05B7;
	fma.rn.f32 	%f109, %f107, %f100, %f108;
	mov.f32 	%f110, 0fBBF1EB10;
	fma.rn.f32 	%f111, %f109, %f100, %f110;
	mov.f32 	%f112, 0f3CA89A28;
	fma.rn.f32 	%f113, %f111, %f100, %f112;
	mov.f32 	%f114, 0fBD89F01A;
	fma.rn.f32 	%f115, %f113, %f100, %f114;
	mov.f32 	%f116, 0f3EA51A66;
	fma.rn.f32 	%f117, %f115, %f100, %f116;
	mov.f32 	%f118, 0f3ED87730;
	fma.rn.f32 	%f119, %f117, %f100, %f118;
	mul.f32 	%f270, %f100, %f119;
	bra.uni 	BB50_16;

BB50_3:
	// inline asm
	rcp.approx.ftz.f32 %f40,%f2;
	// inline asm
	mul.f32 	%f42, %f40, %f40;
	mov.f32 	%f43, 0fBB360953;
	mov.f32 	%f44, 0f3A4BE755;
	fma.rn.f32 	%f45, %f44, %f42, %f43;
	mov.f32 	%f46, 0f3DAAAAA3;
	fma.rn.f32 	%f47, %f45, %f42, %f46;
	mov.f32 	%f48, 0f3F6B3F8E;
	fma.rn.f32 	%f3, %f47, %f40, %f48;
	setp.lt.f32	%p4, %f2, 0f7F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	and.pred  	%p6, %p5, %p4;
	@%p6 bra 	BB50_5;
	bra.uni 	BB50_4;

BB50_5:
	setp.lt.f32	%p7, %f2, 0f00800000;
	mul.f32 	%f51, %f2, 0f4B800000;
	selp.f32	%f52, %f51, %f2, %p7;
	selp.f32	%f53, 0fC3170000, 0fC2FE0000, %p7;
	mov.b32 	 %r7, %f52;
	and.b32  	%r8, %r7, 8388607;
	or.b32  	%r9, %r8, 1065353216;
	mov.b32 	 %f54, %r9;
	shr.u32 	%r10, %r7, 23;
	cvt.rn.f32.u32	%f55, %r10;
	add.f32 	%f56, %f53, %f55;
	setp.gt.f32	%p8, %f54, 0f3FAE147B;
	mul.f32 	%f57, %f54, 0f3F000000;
	add.f32 	%f58, %f56, 0f3F800000;
	selp.f32	%f59, %f57, %f54, %p8;
	selp.f32	%f60, %f58, %f56, %p8;
	add.f32 	%f50, %f59, 0f3F800000;
	add.f32 	%f61, %f59, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f49,%f50;
	// inline asm
	mul.f32 	%f62, %f61, %f61;
	neg.f32 	%f63, %f62;
	mul.rn.f32 	%f64, %f49, %f63;
	add.rn.f32 	%f65, %f61, %f64;
	mul.f32 	%f66, %f65, %f65;
	mov.f32 	%f67, 0f3C4C6A36;
	mov.f32 	%f68, 0f3B1E94E6;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0f3DAAAB1A;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mul.f32 	%f72, %f66, %f71;
	fma.rn.f32 	%f73, %f72, %f65, %f64;
	add.f32 	%f74, %f61, %f73;
	mov.f32 	%f75, 0f3F317218;
	fma.rn.f32 	%f261, %f60, %f75, %f74;
	bra.uni 	BB50_6;

BB50_11:
	mov.f32 	%f120, 0f3F800000;
	sub.f32 	%f121, %f120, %f2;
	mov.f32 	%f122, 0f3DD47577;
	mov.f32 	%f123, 0f3D3BEF76;
	fma.rn.f32 	%f124, %f123, %f121, %f122;
	mov.f32 	%f125, 0f3DFB8079;
	fma.rn.f32 	%f126, %f124, %f121, %f125;
	mov.f32 	%f127, 0f3E0295B5;
	fma.rn.f32 	%f128, %f126, %f121, %f127;
	mov.f32 	%f129, 0f3E12A765;
	fma.rn.f32 	%f130, %f128, %f121, %f129;
	mov.f32 	%f131, 0f3E2D6867;
	fma.rn.f32 	%f132, %f130, %f121, %f131;
	mov.f32 	%f133, 0f3E5462BF;
	fma.rn.f32 	%f134, %f132, %f121, %f133;
	mov.f32 	%f135, 0f3E8A8A72;
	fma.rn.f32 	%f136, %f134, %f121, %f135;
	mov.f32 	%f137, 0f3ECD26A4;
	fma.rn.f32 	%f138, %f136, %f121, %f137;
	mov.f32 	%f139, 0f3F528D32;
	fma.rn.f32 	%f140, %f138, %f121, %f139;
	mov.f32 	%f141, 0f3F13C468;
	fma.rn.f32 	%f142, %f140, %f121, %f141;
	mul.f32 	%f270, %f121, %f142;
	bra.uni 	BB50_16;

BB50_4:
	lg2.approx.f32 	%f261, %f2;

BB50_6:
	mul.f32 	%f76, %f261, 0f3F000000;
	add.f32 	%f77, %f2, 0fBF000000;
	mul.rn.f32 	%f78, %f76, %f77;
	sub.f32 	%f79, %f78, %f2;
	add.rn.f32 	%f80, %f78, %f3;
	add.f32 	%f81, %f79, %f80;
	setp.eq.f32	%p9, %f2, 0f7F800000;
	selp.f32	%f270, %f2, %f81, %p9;
	bra.uni 	BB50_16;

BB50_13:
	lg2.approx.f32 	%f262, %f11;

BB50_15:
	neg.f32 	%f270, %f262;

BB50_16:
	mov.f32 	%f16, %f270;
	setp.ge.f32	%p17, %f1, 0f00000000;
	mov.f32 	%f269, %f16;
	@%p17 bra 	BB50_37;

	cvt.rmi.f32.f32	%f185, %f2;
	setp.eq.f32	%p18, %f2, %f185;
	mov.f32 	%f184, 0f7F800000;
	mov.f32 	%f269, %f184;
	@%p18 bra 	BB50_37;

	setp.lt.f32	%p19, %f2, 0f1FEC1E4A;
	@%p19 bra 	BB50_33;
	bra.uni 	BB50_19;

BB50_33:
	setp.gt.f32	%p29, %f2, 0f00000000;
	setp.lt.f32	%p30, %f2, 0f7F800000;
	and.pred  	%p31, %p29, %p30;
	@%p31 bra 	BB50_35;
	bra.uni 	BB50_34;

BB50_35:
	setp.lt.f32	%p32, %f2, 0f00800000;
	mul.f32 	%f236, %f2, 0f4B800000;
	selp.f32	%f237, %f236, %f2, %p32;
	selp.f32	%f238, 0fC3170000, 0fC2FE0000, %p32;
	mov.b32 	 %r20, %f237;
	and.b32  	%r21, %r20, 8388607;
	or.b32  	%r22, %r21, 1065353216;
	mov.b32 	 %f239, %r22;
	shr.u32 	%r23, %r20, 23;
	cvt.rn.f32.u32	%f240, %r23;
	add.f32 	%f241, %f238, %f240;
	setp.gt.f32	%p33, %f239, 0f3FAE147B;
	mul.f32 	%f242, %f239, 0f3F000000;
	add.f32 	%f243, %f241, 0f3F800000;
	selp.f32	%f244, %f242, %f239, %p33;
	selp.f32	%f245, %f243, %f241, %p33;
	add.f32 	%f235, %f244, 0f3F800000;
	add.f32 	%f246, %f244, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f234,%f235;
	// inline asm
	mul.f32 	%f247, %f246, %f246;
	neg.f32 	%f248, %f247;
	mul.rn.f32 	%f249, %f234, %f248;
	add.rn.f32 	%f250, %f246, %f249;
	mul.f32 	%f251, %f250, %f250;
	mov.f32 	%f252, 0f3C4C6A36;
	mov.f32 	%f253, 0f3B1E94E6;
	fma.rn.f32 	%f254, %f253, %f251, %f252;
	mov.f32 	%f255, 0f3DAAAB1A;
	fma.rn.f32 	%f256, %f254, %f251, %f255;
	mul.f32 	%f257, %f251, %f256;
	fma.rn.f32 	%f258, %f257, %f250, %f249;
	add.f32 	%f259, %f246, %f258;
	mov.f32 	%f260, 0f3F317218;
	fma.rn.f32 	%f267, %f245, %f260, %f259;
	bra.uni 	BB50_36;

BB50_19:
	add.f32 	%f186, %f2, %f2;
	cvt.rni.f32.f32	%f187, %f186;
	cvt.rzi.s32.f32	%r1, %f187;
	neg.f32 	%f188, %f187;
	mov.f32 	%f189, 0f3F000000;
	fma.rn.f32 	%f190, %f188, %f189, %f2;
	mul.f32 	%f17, %f190, 0f40490FDB;
	mul.rn.f32 	%f18, %f17, %f17;
	and.b32  	%r2, %r1, 1;
	setp.eq.s32	%p20, %r2, 0;
	@%p20 bra 	BB50_21;

	mov.f32 	%f191, 0fBAB6061A;
	mov.f32 	%f192, 0f37CCF5CE;
	fma.rn.f32 	%f263, %f192, %f18, %f191;
	bra.uni 	BB50_22;

BB50_34:
	lg2.approx.f32 	%f267, %f2;

BB50_36:
	neg.f32 	%f269, %f267;
	bra.uni 	BB50_37;

BB50_21:
	mov.f32 	%f193, 0f3C08839E;
	mov.f32 	%f194, 0fB94CA1F9;
	fma.rn.f32 	%f263, %f194, %f18, %f193;

BB50_22:
	@%p20 bra 	BB50_24;

	mov.f32 	%f195, 0f3D2AAAA5;
	fma.rn.f32 	%f196, %f263, %f18, %f195;
	mov.f32 	%f197, 0fBF000000;
	fma.rn.f32 	%f264, %f196, %f18, %f197;
	bra.uni 	BB50_25;

BB50_24:
	mov.f32 	%f198, 0fBE2AAAA3;
	fma.rn.f32 	%f199, %f263, %f18, %f198;
	mov.f32 	%f200, 0f00000000;
	fma.rn.f32 	%f264, %f199, %f18, %f200;

BB50_25:
	fma.rn.f32 	%f265, %f264, %f17, %f17;
	@%p20 bra 	BB50_27;

	mov.f32 	%f201, 0f3F800000;
	fma.rn.f32 	%f265, %f264, %f18, %f201;

BB50_27:
	and.b32  	%r15, %r1, 2;
	setp.eq.s32	%p23, %r15, 0;
	@%p23 bra 	BB50_29;

	mov.f32 	%f202, 0f00000000;
	mov.f32 	%f203, 0fBF800000;
	fma.rn.f32 	%f265, %f265, %f203, %f202;

BB50_29:
	abs.f32 	%f204, %f265;
	mul.f32 	%f30, %f2, %f204;
	setp.gt.f32	%p24, %f30, 0f00000000;
	setp.lt.f32	%p25, %f30, 0f7F800000;
	and.pred  	%p26, %p24, %p25;
	@%p26 bra 	BB50_31;
	bra.uni 	BB50_30;

BB50_31:
	setp.lt.f32	%p27, %f30, 0f00800000;
	mul.f32 	%f207, %f30, 0f4B800000;
	selp.f32	%f208, %f207, %f30, %p27;
	selp.f32	%f209, 0fC3170000, 0fC2FE0000, %p27;
	mov.b32 	 %r16, %f208;
	and.b32  	%r17, %r16, 8388607;
	or.b32  	%r18, %r17, 1065353216;
	mov.b32 	 %f210, %r18;
	shr.u32 	%r19, %r16, 23;
	cvt.rn.f32.u32	%f211, %r19;
	add.f32 	%f212, %f209, %f211;
	setp.gt.f32	%p28, %f210, 0f3FAE147B;
	mul.f32 	%f213, %f210, 0f3F000000;
	add.f32 	%f214, %f212, 0f3F800000;
	selp.f32	%f215, %f213, %f210, %p28;
	selp.f32	%f216, %f214, %f212, %p28;
	add.f32 	%f206, %f215, 0f3F800000;
	add.f32 	%f217, %f215, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f205,%f206;
	// inline asm
	mul.f32 	%f218, %f217, %f217;
	neg.f32 	%f219, %f218;
	mul.rn.f32 	%f220, %f205, %f219;
	add.rn.f32 	%f221, %f217, %f220;
	mul.f32 	%f222, %f221, %f221;
	mov.f32 	%f223, 0f3C4C6A36;
	mov.f32 	%f224, 0f3B1E94E6;
	fma.rn.f32 	%f225, %f224, %f222, %f223;
	mov.f32 	%f226, 0f3DAAAB1A;
	fma.rn.f32 	%f227, %f225, %f222, %f226;
	mul.f32 	%f228, %f222, %f227;
	fma.rn.f32 	%f229, %f228, %f221, %f220;
	add.f32 	%f230, %f217, %f229;
	mov.f32 	%f231, 0f3F317218;
	fma.rn.f32 	%f266, %f216, %f231, %f230;
	bra.uni 	BB50_32;

BB50_30:
	lg2.approx.f32 	%f266, %f30;

BB50_32:
	mov.f32 	%f232, 0f3F928682;
	sub.f32 	%f233, %f232, %f266;
	sub.f32 	%f269, %f233, %f16;

BB50_37:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f269;

BB50_38:
	ret;
}

	// .globl	vec_log10f
.visible .entry vec_log10f(
	.param .u64 vec_log10f_param_0,
	.param .u64 vec_log10f_param_1,
	.param .u64 vec_log10f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<34>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_log10f_param_0];
	ld.param.u64 	%rd2, [vec_log10f_param_1];
	ld.param.u64 	%rd3, [vec_log10f_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB51_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB51_3;
	bra.uni 	BB51_2;

BB51_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r5, %f8;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f10, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f11, %r8;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f33, %f16, %f31, %f30;
	bra.uni 	BB51_4;

BB51_2:
	lg2.approx.f32 	%f33, %f1;

BB51_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	mul.f32 	%f32, %f33, 0f3EDE5BD9;
	st.global.f32 	[%rd10], %f32;

BB51_5:
	ret;
}

	// .globl	vec_log1pf
.visible .entry vec_log1pf(
	.param .u64 vec_log1pf_param_0,
	.param .u64 vec_log1pf_param_1,
	.param .u64 vec_log1pf_param_2
)
{
	.reg .pred 	%p<10>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_log1pf_param_0];
	ld.param.u64 	%rd2, [vec_log1pf_param_1];
	ld.param.u64 	%rd3, [vec_log1pf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB52_7;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	setp.le.f32	%p2, %f1, 0f3F266666;
	setp.ge.f32	%p3, %f1, 0fBEC9BA5E;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB52_5;
	bra.uni 	BB52_2;

BB52_5:
	add.f32 	%f34, %f1, 0f40000000;
	div.approx.f32 	%f35, %f1, %f34;
	neg.f32 	%f36, %f1;
	mul.rn.f32 	%f37, %f36, %f35;
	add.rn.f32 	%f38, %f1, %f37;
	mul.f32 	%f39, %f38, %f38;
	mov.f32 	%f40, 0f3C4C4BE0;
	mov.f32 	%f41, 0f3B2063C3;
	fma.rn.f32 	%f42, %f41, %f39, %f40;
	mov.f32 	%f43, 0f3DAAAB50;
	fma.rn.f32 	%f44, %f42, %f39, %f43;
	mul.f32 	%f45, %f39, %f44;
	fma.rn.f32 	%f46, %f45, %f38, %f37;
	add.f32 	%f47, %f1, %f46;
	bra.uni 	BB52_6;

BB52_2:
	add.f32 	%f2, %f1, 0f3F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	setp.lt.f32	%p6, %f2, 0f7F800000;
	and.pred  	%p7, %p5, %p6;
	@%p7 bra 	BB52_4;
	bra.uni 	BB52_3;

BB52_4:
	setp.lt.f32	%p8, %f2, 0f00800000;
	mul.f32 	%f9, %f2, 0f4B800000;
	selp.f32	%f10, %f9, %f2, %p8;
	selp.f32	%f11, 0fC3170000, 0fC2FE0000, %p8;
	mov.b32 	 %r5, %f10;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f12, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f13, %r8;
	add.f32 	%f14, %f11, %f13;
	setp.gt.f32	%p9, %f12, 0f3FAE147B;
	mul.f32 	%f15, %f12, 0f3F000000;
	add.f32 	%f16, %f14, 0f3F800000;
	selp.f32	%f17, %f15, %f12, %p9;
	selp.f32	%f18, %f16, %f14, %p9;
	add.f32 	%f8, %f17, 0f3F800000;
	add.f32 	%f19, %f17, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f7,%f8;
	// inline asm
	mul.f32 	%f20, %f19, %f19;
	neg.f32 	%f21, %f20;
	mul.rn.f32 	%f22, %f7, %f21;
	add.rn.f32 	%f23, %f19, %f22;
	mul.f32 	%f24, %f23, %f23;
	mov.f32 	%f25, 0f3C4C6A36;
	mov.f32 	%f26, 0f3B1E94E6;
	fma.rn.f32 	%f27, %f26, %f24, %f25;
	mov.f32 	%f28, 0f3DAAAB1A;
	fma.rn.f32 	%f29, %f27, %f24, %f28;
	mul.f32 	%f30, %f24, %f29;
	fma.rn.f32 	%f31, %f30, %f23, %f22;
	add.f32 	%f32, %f19, %f31;
	mov.f32 	%f33, 0f3F317218;
	fma.rn.f32 	%f47, %f18, %f33, %f32;
	bra.uni 	BB52_6;

BB52_3:
	lg2.approx.f32 	%f47, %f2;

BB52_6:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f47;

BB52_7:
	ret;
}

	// .globl	vec_log2f
.visible .entry vec_log2f(
	.param .u64 vec_log2f_param_0,
	.param .u64 vec_log2f_param_1,
	.param .u64 vec_log2f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<34>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_log2f_param_0];
	ld.param.u64 	%rd2, [vec_log2f_param_1];
	ld.param.u64 	%rd3, [vec_log2f_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB53_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB53_3;
	bra.uni 	BB53_2;

BB53_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r5, %f8;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f10, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f11, %r8;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f33, %f16, %f31, %f30;
	bra.uni 	BB53_4;

BB53_2:
	lg2.approx.f32 	%f33, %f1;

BB53_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	mul.f32 	%f32, %f33, 0f3FB8AA3B;
	st.global.f32 	[%rd10], %f32;

BB53_5:
	ret;
}

	// .globl	vec_logbf
.visible .entry vec_logbf(
	.param .u64 vec_logbf_param_0,
	.param .u64 vec_logbf_param_1,
	.param .u64 vec_logbf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<10>;
	.reg .b32 	%r<11>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_logbf_param_0];
	ld.param.u64 	%rd2, [vec_logbf_param_1];
	ld.param.u64 	%rd3, [vec_logbf_param_2];
	mov.u32 	%r2, %tid.x;
	mov.u32 	%r3, %ntid.x;
	mov.u32 	%r4, %ctaid.x;
	mad.lo.s32 	%r5, %r3, %r4, %r2;
	cvt.s64.s32	%rd1, %r5;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB54_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f5, %f1;
	mov.b32 	 %r1, %f5;
	setp.lt.u32	%p2, %r1, 8388608;
	@%p2 bra 	BB54_3;
	bra.uni 	BB54_2;

BB54_3:
	clz.b32 	%r8, %r1;
	mov.u32 	%r9, -118;
	sub.s32 	%r10, %r9, %r8;
	cvt.rn.f32.s32	%f8, %r10;
	setp.eq.f32	%p4, %f1, 0f00000000;
	selp.f32	%f9, 0fFF800000, %f8, %p4;
	bra.uni 	BB54_4;

BB54_2:
	shr.u32 	%r6, %r1, 23;
	add.s32 	%r7, %r6, -127;
	cvt.rn.f32.s32	%f6, %r7;
	mul.f32 	%f7, %f1, %f1;
	setp.gt.u32	%p3, %r1, 2139095039;
	selp.f32	%f9, %f7, %f6, %p3;

BB54_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f9;

BB54_5:
	ret;
}

	// .globl	vec_logf
.visible .entry vec_logf(
	.param .u64 vec_logf_param_0,
	.param .u64 vec_logf_param_1,
	.param .u64 vec_logf_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<9>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_logf_param_0];
	ld.param.u64 	%rd2, [vec_logf_param_1];
	ld.param.u64 	%rd3, [vec_logf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB55_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB55_3;
	bra.uni 	BB55_2;

BB55_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r5, %f8;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f10, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f11, %r8;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f32, %f16, %f31, %f30;
	bra.uni 	BB55_4;

BB55_2:
	lg2.approx.f32 	%f32, %f1;

BB55_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f32;

BB55_5:
	ret;
}

	// .globl	vec_normcdff
.visible .entry vec_normcdff(
	.param .u64 vec_normcdff_param_0,
	.param .u64 vec_normcdff_param_1,
	.param .u64 vec_normcdff_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<81>;
	.reg .b32 	%r<8>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_normcdff_param_0];
	ld.param.u64 	%rd2, [vec_normcdff_param_1];
	ld.param.u64 	%rd3, [vec_normcdff_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB56_4;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f13, [%rd7];
	abs.f32 	%f14, %f13;
	setp.gt.f32	%p2, %f14, 0f41680000;
	mov.b32 	 %r5, %f13;
	and.b32  	%r6, %r5, -2147483648;
	or.b32  	%r7, %r6, 1097334784;
	mov.b32 	 %f15, %r7;
	selp.f32	%f16, %f15, %f13, %p2;
	mov.f32 	%f17, 0fBF3504F3;
	mul.rn.f32 	%f1, %f16, %f17;
	neg.f32 	%f18, %f1;
	fma.rn.f32 	%f19, %f16, %f17, %f18;
	mov.f32 	%f20, 0fB24FE77A;
	fma.rn.f32 	%f2, %f16, %f20, %f19;
	add.rn.f32 	%f3, %f1, %f2;
	abs.f32 	%f21, %f3;
	add.f32 	%f8, %f21, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f7,%f8;
	// inline asm
	add.f32 	%f22, %f21, 0fC0800000;
	mul.rn.f32 	%f23, %f22, %f7;
	add.f32 	%f24, %f23, 0f3F800000;
	mov.f32 	%f25, 0fC0800000;
	fma.rn.f32 	%f26, %f25, %f24, %f21;
	neg.f32 	%f27, %f23;
	fma.rn.f32 	%f28, %f27, %f21, %f26;
	fma.rn.f32 	%f29, %f7, %f28, %f23;
	mov.f32 	%f30, 0f3BE6E05B;
	mov.f32 	%f31, 0f3A69A091;
	fma.rn.f32 	%f32, %f31, %f29, %f30;
	mov.f32 	%f33, 0fBC81FB4B;
	fma.rn.f32 	%f34, %f32, %f29, %f33;
	mov.f32 	%f35, 0f3D15373B;
	fma.rn.f32 	%f36, %f34, %f29, %f35;
	mov.f32 	%f37, 0fBD887C5A;
	fma.rn.f32 	%f38, %f36, %f29, %f37;
	mov.f32 	%f39, 0f3DC021D5;
	fma.rn.f32 	%f40, %f38, %f29, %f39;
	mov.f32 	%f41, 0fBDCED424;
	fma.rn.f32 	%f42, %f40, %f29, %f41;
	mov.f32 	%f43, 0f3D8B74DE;
	fma.rn.f32 	%f44, %f42, %f29, %f43;
	mov.f32 	%f45, 0f3C7BF170;
	fma.rn.f32 	%f46, %f44, %f29, %f45;
	mov.f32 	%f47, 0fBE0EF8D4;
	fma.rn.f32 	%f48, %f46, %f29, %f47;
	mov.f32 	%f49, 0f3F9DD2C9;
	fma.rn.f32 	%f50, %f48, %f29, %f49;
	mov.f32 	%f51, 0f3F800000;
	mov.f32 	%f52, 0f40000000;
	fma.rn.f32 	%f10, %f52, %f21, %f51;
	// inline asm
	rcp.approx.ftz.f32 %f9,%f10;
	// inline asm
	mul.rn.f32 	%f53, %f50, %f9;
	mul.f32 	%f54, %f53, 0fC0000000;
	fma.rn.f32 	%f55, %f21, %f54, %f50;
	sub.f32 	%f56, %f55, %f53;
	fma.rn.f32 	%f57, %f56, %f9, %f53;
	mul.f32 	%f58, %f21, %f21;
	neg.f32 	%f59, %f58;
	mul.f32 	%f60, %f58, 0fBFB8AA3B;
	cvt.rzi.f32.f32	%f61, %f60;
	mov.f32 	%f62, 0fBF317200;
	fma.rn.f32 	%f63, %f61, %f62, %f59;
	mov.f32 	%f64, 0fB5BFBE8E;
	fma.rn.f32 	%f65, %f61, %f64, %f63;
	mul.f32 	%f12, %f65, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f11,%f12;
	// inline asm
	add.f32 	%f66, %f61, 0f00000000;
	ex2.approx.f32 	%f67, %f66;
	mul.f32 	%f68, %f11, %f67;
	neg.f32 	%f69, %f21;
	fma.rn.f32 	%f70, %f69, %f21, %f58;
	fma.rn.f32 	%f71, %f68, %f70, %f68;
	mul.f32 	%f72, %f57, %f71;
	setp.gt.f32	%p3, %f21, 0f4120E148;
	selp.f32	%f73, 0f00000000, %f72, %p3;
	setp.lt.f32	%p4, %f3, 0f00000000;
	sub.f32 	%f74, %f52, %f73;
	selp.f32	%f80, %f74, %f73, %p4;
	setp.geu.f32	%p5, %f16, 0fBF800000;
	@%p5 bra 	BB56_3;

	sub.f32 	%f75, %f1, %f3;
	add.rn.f32 	%f76, %f75, %f2;
	mul.f32 	%f77, %f3, 0fC0000000;
	mul.f32 	%f78, %f77, %f80;
	fma.rn.f32 	%f80, %f78, %f76, %f80;

BB56_3:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	mul.f32 	%f79, %f80, 0f3F000000;
	st.global.f32 	[%rd10], %f79;

BB56_4:
	ret;
}

	// .globl	vec_normcdfinvf
.visible .entry vec_normcdfinvf(
	.param .u64 vec_normcdfinvf_param_0,
	.param .u64 vec_normcdfinvf_param_1,
	.param .u64 vec_normcdfinvf_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<60>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_normcdfinvf_param_0];
	ld.param.u64 	%rd2, [vec_normcdfinvf_param_1];
	ld.param.u64 	%rd3, [vec_normcdfinvf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB57_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f7, [%rd7];
	add.f32 	%f1, %f7, %f7;
	neg.f32 	%f2, %f1;
	mov.f32 	%f8, 0f40000000;
	add.rn.f32 	%f3, %f8, %f2;
	setp.le.f32	%p2, %f1, 0f3FFF9097;
	setp.ge.f32	%p3, %f1, 0f3B5ED289;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB57_3;
	bra.uni 	BB57_2;

BB57_3:
	mul.rn.f32 	%f35, %f3, %f1;
	// inline asm
	lg2.approx.ftz.f32 %f34,%f35;
	// inline asm
	neg.f32 	%f36, %f34;
	mov.f32 	%f37, 0f3221F645;
	mov.f32 	%f38, 0fAF8A6370;
	fma.rn.f32 	%f39, %f38, %f36, %f37;
	mov.f32 	%f40, 0fB4016FDA;
	fma.rn.f32 	%f41, %f39, %f36, %f40;
	mov.f32 	%f42, 0f3468F846;
	fma.rn.f32 	%f43, %f41, %f36, %f42;
	mov.f32 	%f44, 0f370742AA;
	fma.rn.f32 	%f45, %f43, %f36, %f44;
	mov.f32 	%f46, 0fB804DB4D;
	fma.rn.f32 	%f47, %f45, %f36, %f46;
	mov.f32 	%f48, 0fBA4AFEA1;
	fma.rn.f32 	%f49, %f47, %f36, %f48;
	mov.f32 	%f50, 0f3BB5C027;
	fma.rn.f32 	%f51, %f49, %f36, %f50;
	mov.f32 	%f52, 0f3E24AE0F;
	fma.rn.f32 	%f53, %f51, %f36, %f52;
	mov.f32 	%f54, 0f3F62DFC4;
	fma.rn.f32 	%f55, %f53, %f36, %f54;
	fma.rn.f32 	%f59, %f55, %f2, %f55;
	bra.uni 	BB57_4;

BB57_2:
	setp.gt.f32	%p5, %f1, 0f3F800000;
	selp.f32	%f13, %f3, %f1, %p5;
	lg2.approx.f32 	%f14, %f13;
	neg.f32 	%f10, %f14;
	// inline asm
	rsqrt.approx.ftz.f32 %f9,%f10;
	// inline asm
	mov.f32 	%f15, 0f42FEF829;
	mov.f32 	%f16, 0fC27C73F1;
	fma.rn.f32 	%f17, %f16, %f9, %f15;
	mov.f32 	%f18, 0fC2E4361C;
	fma.rn.f32 	%f19, %f17, %f9, %f18;
	mov.f32 	%f20, 0f42714D9B;
	fma.rn.f32 	%f21, %f19, %f9, %f20;
	mov.f32 	%f22, 0fC1AE51B3;
	fma.rn.f32 	%f23, %f21, %f9, %f22;
	mov.f32 	%f24, 0f40CEF504;
	fma.rn.f32 	%f25, %f23, %f9, %f24;
	mov.f32 	%f26, 0fBFEA9E05;
	fma.rn.f32 	%f27, %f25, %f9, %f26;
	mov.f32 	%f28, 0fBCF871F4;
	fma.rn.f32 	%f29, %f27, %f9, %f28;
	mov.f32 	%f30, 0f3F553775;
	fma.rn.f32 	%f31, %f29, %f9, %f30;
	// inline asm
	rcp.approx.ftz.f32 %f11,%f9;
	// inline asm
	mul.rn.f32 	%f32, %f31, %f11;
	neg.f32 	%f33, %f32;
	selp.f32	%f59, %f33, %f32, %p5;

BB57_4:
	cvta.to.global.u64 	%rd8, %rd2;
	mul.f32 	%f56, %f59, 0fBFB504F3;
	mov.f32 	%f57, 0f00000000;
	add.rn.f32 	%f58, %f56, %f57;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f58;

BB57_5:
	ret;
}

	// .globl	vec_rcbrtf
.visible .entry vec_rcbrtf(
	.param .u64 vec_rcbrtf_param_0,
	.param .u64 vec_rcbrtf_param_1,
	.param .u64 vec_rcbrtf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<21>;
	.reg .b32 	%r<6>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_rcbrtf_param_0];
	ld.param.u64 	%rd2, [vec_rcbrtf_param_1];
	ld.param.u64 	%rd3, [vec_rcbrtf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB58_4;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f7, %f1;
	lg2.approx.f32 	%f8, %f7;
	mul.f32 	%f6, %f8, 0fBEAAAAAB;
	// inline asm
	ex2.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f9, %f7, %f5;
	neg.f32 	%f10, %f9;
	mul.f32 	%f11, %f5, %f5;
	mov.f32 	%f12, 0f3F800000;
	fma.rn.f32 	%f13, %f11, %f10, %f12;
	mul.f32 	%f14, %f5, 0f3EAAAAAB;
	fma.rn.f32 	%f15, %f13, %f14, %f5;
	mov.b32 	 %r5, %f1;
	setp.lt.s32	%p2, %r5, 0;
	neg.f32 	%f16, %f15;
	selp.f32	%f20, %f16, %f15, %p2;
	add.f32 	%f17, %f1, %f1;
	setp.neu.f32	%p3, %f17, %f1;
	@%p3 bra 	BB58_3;

	// inline asm
	rcp.approx.ftz.f32 %f20,%f1;
	// inline asm

BB58_3:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f20;

BB58_4:
	ret;
}

	// .globl	vec_rintf
.visible .entry vec_rintf(
	.param .u64 vec_rintf_param_0,
	.param .u64 vec_rintf_param_1,
	.param .u64 vec_rintf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_rintf_param_0];
	ld.param.u64 	%rd2, [vec_rintf_param_1];
	ld.param.u64 	%rd3, [vec_rintf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB59_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	cvt.rni.f32.f32	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB59_2:
	ret;
}

	// .globl	vec_roundf
.visible .entry vec_roundf(
	.param .u64 vec_roundf_param_0,
	.param .u64 vec_roundf_param_1,
	.param .u64 vec_roundf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<10>;
	.reg .b32 	%r<8>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_roundf_param_0];
	ld.param.u64 	%rd2, [vec_roundf_param_1];
	ld.param.u64 	%rd3, [vec_roundf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB60_4;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f5, %f1;
	mov.b32 	 %r5, %f1;
	and.b32  	%r6, %r5, -2147483648;
	or.b32  	%r7, %r6, 1056964608;
	mov.b32 	 %f6, %r7;
	add.f32 	%f7, %f1, %f6;
	cvt.rzi.f32.f32	%f8, %f7;
	setp.gt.f32	%p2, %f5, 0f4B000000;
	selp.f32	%f9, %f1, %f8, %p2;
	setp.geu.f32	%p3, %f5, 0f3F000000;
	@%p3 bra 	BB60_3;

	cvt.rzi.f32.f32	%f9, %f1;

BB60_3:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f9;

BB60_4:
	ret;
}

	// .globl	vec_rsqrtf
.visible .entry vec_rsqrtf(
	.param .u64 vec_rsqrtf_param_0,
	.param .u64 vec_rsqrtf_param_1,
	.param .u64 vec_rsqrtf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_rsqrtf_param_0];
	ld.param.u64 	%rd2, [vec_rsqrtf_param_1];
	ld.param.u64 	%rd3, [vec_rsqrtf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB61_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	rsqrt.approx.f32 	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB61_2:
	ret;
}

	// .globl	vec_sinf
.visible .entry vec_sinf(
	.param .u64 vec_sinf_param_0,
	.param .u64 vec_sinf_param_1,
	.param .u64 vec_sinf_param_2
)
{
	.local .align 4 .b8 	__local_depot62[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<15>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<96>;
	.reg .b64 	%rd<23>;


	mov.u64 	%rd22, __local_depot62;
	cvta.local.u64 	%SP, %rd22;
	ld.param.u64 	%rd9, [vec_sinf_param_0];
	ld.param.u64 	%rd7, [vec_sinf_param_1];
	ld.param.u64 	%rd8, [vec_sinf_param_2];
	add.u64 	%rd10, %SP, 0;
	cvta.to.local.u64 	%rd1, %rd10;
	mov.u32 	%r38, %ntid.x;
	mov.u32 	%r39, %ctaid.x;
	mov.u32 	%r40, %tid.x;
	mad.lo.s32 	%r1, %r38, %r39, %r40;
	cvt.s64.s32	%rd11, %r1;
	setp.ge.u64	%p1, %rd11, %rd9;
	@%p1 bra 	BB62_24;

	cvta.to.global.u64 	%rd12, %rd8;
	mul.wide.s32 	%rd13, %r1, 4;
	add.s64 	%rd14, %rd12, %rd13;
	ld.global.f32 	%f43, [%rd14];
	abs.f32 	%f19, %f43;
	setp.neu.f32	%p2, %f19, 0f7F800000;
	@%p2 bra 	BB62_3;

	mov.f32 	%f20, 0f00000000;
	mul.rn.f32 	%f43, %f43, %f20;

BB62_3:
	mul.f32 	%f21, %f43, 0f3F22F983;
	cvt.rni.s32.f32	%r95, %f21;
	cvt.rn.f32.s32	%f22, %r95;
	neg.f32 	%f23, %f22;
	mov.f32 	%f24, 0f3FC90FDA;
	fma.rn.f32 	%f25, %f23, %f24, %f43;
	mov.f32 	%f26, 0f33A22168;
	fma.rn.f32 	%f27, %f23, %f26, %f25;
	mov.f32 	%f28, 0f27C234C5;
	fma.rn.f32 	%f44, %f23, %f28, %f27;
	abs.f32 	%f29, %f43;
	setp.leu.f32	%p3, %f29, 0f47CE4780;
	@%p3 bra 	BB62_13;

	mov.b32 	 %r3, %f43;
	shr.u32 	%r4, %r3, 23;
	bfe.u32 	%r43, %r3, 23, 8;
	add.s32 	%r44, %r43, -128;
	shl.b32 	%r45, %r3, 8;
	or.b32  	%r5, %r45, -2147483648;
	shr.u32 	%r6, %r44, 5;
	mov.u32 	%r87, 0;
	mov.u64 	%rd20, __cudart_i2opi_f;
	mov.u32 	%r86, -6;
	mov.u64 	%rd21, %rd1;

BB62_5:
	.pragma "nounroll";
	mov.u64 	%rd3, %rd21;
	ld.const.u32 	%r48, [%rd20];
	// inline asm
	{
	mad.lo.cc.u32   %r46, %r48, %r5, %r87;
	madc.hi.u32     %r87, %r48, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%rd3], %r46;
	add.s64 	%rd4, %rd3, 4;
	add.s64 	%rd20, %rd20, 4;
	add.s32 	%r86, %r86, 1;
	setp.ne.s32	%p4, %r86, 0;
	mov.u64 	%rd21, %rd4;
	@%p4 bra 	BB62_5;

	and.b32  	%r11, %r3, -2147483648;
	st.local.u32 	[%rd1+24], %r87;
	mov.u32 	%r51, 6;
	sub.s32 	%r52, %r51, %r6;
	mul.wide.s32 	%rd16, %r52, 4;
	add.s64 	%rd6, %rd1, %rd16;
	ld.local.u32 	%r88, [%rd6];
	ld.local.u32 	%r89, [%rd6+-4];
	and.b32  	%r14, %r4, 31;
	setp.eq.s32	%p5, %r14, 0;
	@%p5 bra 	BB62_8;

	mov.u32 	%r53, 32;
	sub.s32 	%r54, %r53, %r14;
	shr.u32 	%r55, %r89, %r54;
	shl.b32 	%r56, %r88, %r14;
	add.s32 	%r88, %r55, %r56;
	ld.local.u32 	%r57, [%rd6+-8];
	shr.u32 	%r58, %r57, %r54;
	shl.b32 	%r59, %r89, %r14;
	add.s32 	%r89, %r58, %r59;

BB62_8:
	shr.u32 	%r60, %r89, 30;
	shl.b32 	%r61, %r88, 2;
	add.s32 	%r90, %r60, %r61;
	shl.b32 	%r20, %r89, 2;
	shr.u32 	%r62, %r90, 31;
	shr.u32 	%r63, %r88, 30;
	add.s32 	%r21, %r62, %r63;
	setp.eq.s32	%p6, %r62, 0;
	mov.u32 	%r91, %r11;
	mov.u32 	%r92, %r20;
	@%p6 bra 	BB62_10;

	not.b32 	%r64, %r90;
	neg.s32 	%r22, %r20;
	setp.eq.s32	%p7, %r20, 0;
	selp.u32	%r65, 1, 0, %p7;
	add.s32 	%r90, %r65, %r64;
	xor.b32  	%r24, %r11, -2147483648;
	mov.u32 	%r91, %r24;
	mov.u32 	%r92, %r22;

BB62_10:
	mov.u32 	%r26, %r91;
	neg.s32 	%r66, %r21;
	setp.eq.s32	%p8, %r11, 0;
	selp.b32	%r95, %r21, %r66, %p8;
	clz.b32 	%r94, %r90;
	setp.eq.s32	%p9, %r94, 0;
	shl.b32 	%r67, %r90, %r94;
	mov.u32 	%r68, 32;
	sub.s32 	%r69, %r68, %r94;
	shr.u32 	%r70, %r92, %r69;
	add.s32 	%r71, %r70, %r67;
	selp.b32	%r30, %r90, %r71, %p9;
	mov.u32 	%r72, -921707870;
	mul.hi.u32 	%r93, %r30, %r72;
	setp.lt.s32	%p10, %r93, 1;
	@%p10 bra 	BB62_12;

	mul.lo.s32 	%r73, %r30, -921707870;
	shr.u32 	%r74, %r73, 31;
	shl.b32 	%r75, %r93, 1;
	add.s32 	%r93, %r74, %r75;
	add.s32 	%r94, %r94, 1;

BB62_12:
	mov.u32 	%r76, 126;
	sub.s32 	%r77, %r76, %r94;
	shl.b32 	%r78, %r77, 23;
	add.s32 	%r79, %r93, 1;
	shr.u32 	%r80, %r79, 7;
	add.s32 	%r81, %r80, 1;
	shr.u32 	%r82, %r81, 1;
	add.s32 	%r83, %r82, %r78;
	or.b32  	%r84, %r83, %r26;
	mov.b32 	 %f44, %r84;

BB62_13:
	mul.rn.f32 	%f7, %f44, %f44;
	and.b32  	%r37, %r95, 1;
	setp.eq.s32	%p11, %r37, 0;
	@%p11 bra 	BB62_15;

	mov.f32 	%f30, 0fBAB6061A;
	mov.f32 	%f31, 0f37CCF5CE;
	fma.rn.f32 	%f45, %f31, %f7, %f30;
	bra.uni 	BB62_16;

BB62_15:
	mov.f32 	%f32, 0f3C08839E;
	mov.f32 	%f33, 0fB94CA1F9;
	fma.rn.f32 	%f45, %f33, %f7, %f32;

BB62_16:
	@%p11 bra 	BB62_18;

	mov.f32 	%f34, 0f3D2AAAA5;
	fma.rn.f32 	%f35, %f45, %f7, %f34;
	mov.f32 	%f36, 0fBF000000;
	fma.rn.f32 	%f46, %f35, %f7, %f36;
	bra.uni 	BB62_19;

BB62_18:
	mov.f32 	%f37, 0fBE2AAAA3;
	fma.rn.f32 	%f38, %f45, %f7, %f37;
	mov.f32 	%f39, 0f00000000;
	fma.rn.f32 	%f46, %f38, %f7, %f39;

BB62_19:
	fma.rn.f32 	%f47, %f46, %f44, %f44;
	@%p11 bra 	BB62_21;

	mov.f32 	%f40, 0f3F800000;
	fma.rn.f32 	%f47, %f46, %f7, %f40;

BB62_21:
	and.b32  	%r85, %r95, 2;
	setp.eq.s32	%p14, %r85, 0;
	@%p14 bra 	BB62_23;

	mov.f32 	%f41, 0f00000000;
	mov.f32 	%f42, 0fBF800000;
	fma.rn.f32 	%f47, %f47, %f42, %f41;

BB62_23:
	cvta.to.global.u64 	%rd17, %rd7;
	add.s64 	%rd19, %rd17, %rd13;
	st.global.f32 	[%rd19], %f47;

BB62_24:
	ret;
}

	// .globl	vec_sinhf
.visible .entry vec_sinhf(
	.param .u64 vec_sinhf_param_0,
	.param .u64 vec_sinhf_param_1,
	.param .u64 vec_sinhf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<32>;
	.reg .b32 	%r<10>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_sinhf_param_0];
	ld.param.u64 	%rd2, [vec_sinhf_param_1];
	ld.param.u64 	%rd3, [vec_sinhf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB63_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F800000;
	@%p2 bra 	BB63_3;
	bra.uni 	BB63_2;

BB63_3:
	mul.f32 	%f22, %f1, %f1;
	mov.f32 	%f23, 0f394FFF49;
	mov.f32 	%f24, 0f363D0ADA;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3C08889A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mov.f32 	%f28, 0f3E2AAAAB;
	fma.rn.f32 	%f29, %f27, %f22, %f28;
	mul.f32 	%f30, %f22, %f29;
	fma.rn.f32 	%f31, %f30, %f1, %f1;
	bra.uni 	BB63_4;

BB63_2:
	mul.f32 	%f8, %f2, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f9, %f8;
	mov.f32 	%f10, 0fBF317200;
	fma.rn.f32 	%f11, %f9, %f10, %f2;
	mov.f32 	%f12, 0fB5BFBE8E;
	fma.rn.f32 	%f13, %f9, %f12, %f11;
	mul.f32 	%f7, %f13, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	add.f32 	%f14, %f9, 0fC0000000;
	ex2.approx.f32 	%f15, %f14;
	mul.f32 	%f16, %f6, %f15;
	mov.f32 	%f17, 0f3E000000;
	div.approx.f32 	%f18, %f17, %f16;
	neg.f32 	%f19, %f18;
	mov.f32 	%f20, 0f40000000;
	fma.rn.f32 	%f21, %f20, %f16, %f19;
	mov.b32 	 %r5, %f21;
	setp.ltu.f32	%p3, %f2, 0f42B40000;
	selp.b32	%r6, %r5, 2139095040, %p3;
	mov.b32 	 %r7, %f1;
	and.b32  	%r8, %r7, -2147483648;
	or.b32  	%r9, %r6, %r8;
	mov.b32 	 %f31, %r9;

BB63_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f31;

BB63_5:
	ret;
}

	// .globl	vec_sinpif
.visible .entry vec_sinpif(
	.param .u64 vec_sinpif_param_0,
	.param .u64 vec_sinpif_param_1,
	.param .u64 vec_sinpif_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<42>;
	.reg .b32 	%r<8>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_sinpif_param_0];
	ld.param.u64 	%rd2, [vec_sinpif_param_1];
	ld.param.u64 	%rd3, [vec_sinpif_param_2];
	mov.u32 	%r3, %tid.x;
	mov.u32 	%r4, %ntid.x;
	mov.u32 	%r5, %ctaid.x;
	mad.lo.s32 	%r6, %r4, %r5, %r3;
	cvt.s64.s32	%rd1, %r6;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB64_14;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	add.f32 	%f17, %f1, %f1;
	cvt.rni.f32.f32	%f18, %f17;
	cvt.rzi.s32.f32	%r1, %f18;
	neg.f32 	%f19, %f18;
	mov.f32 	%f20, 0f3F000000;
	fma.rn.f32 	%f21, %f19, %f20, %f1;
	mul.f32 	%f22, %f21, 0f34222169;
	mov.f32 	%f23, 0f40490FDA;
	fma.rn.f32 	%f2, %f21, %f23, %f22;
	mul.rn.f32 	%f3, %f2, %f2;
	and.b32  	%r2, %r1, 1;
	setp.eq.s32	%p2, %r2, 0;
	@%p2 bra 	BB64_3;

	mov.f32 	%f24, 0fBAB6061A;
	mov.f32 	%f25, 0f37CCF5CE;
	fma.rn.f32 	%f39, %f25, %f3, %f24;
	bra.uni 	BB64_4;

BB64_3:
	mov.f32 	%f26, 0f3C08839E;
	mov.f32 	%f27, 0fB94CA1F9;
	fma.rn.f32 	%f39, %f27, %f3, %f26;

BB64_4:
	@%p2 bra 	BB64_6;

	mov.f32 	%f28, 0f3D2AAAA5;
	fma.rn.f32 	%f29, %f39, %f3, %f28;
	mov.f32 	%f30, 0fBF000000;
	fma.rn.f32 	%f40, %f29, %f3, %f30;
	bra.uni 	BB64_7;

BB64_6:
	mov.f32 	%f31, 0fBE2AAAA3;
	fma.rn.f32 	%f32, %f39, %f3, %f31;
	mov.f32 	%f33, 0f00000000;
	fma.rn.f32 	%f40, %f32, %f3, %f33;

BB64_7:
	fma.rn.f32 	%f41, %f40, %f2, %f2;
	@%p2 bra 	BB64_9;

	mov.f32 	%f34, 0f3F800000;
	fma.rn.f32 	%f41, %f40, %f3, %f34;

BB64_9:
	and.b32  	%r7, %r1, 2;
	setp.eq.s32	%p5, %r7, 0;
	@%p5 bra 	BB64_11;

	mov.f32 	%f35, 0f00000000;
	mov.f32 	%f36, 0fBF800000;
	fma.rn.f32 	%f41, %f41, %f36, %f35;

BB64_11:
	cvt.rzi.f32.f32	%f37, %f1;
	setp.neu.f32	%p6, %f37, %f1;
	@%p6 bra 	BB64_13;

	mov.f32 	%f38, 0f00000000;
	mul.rn.f32 	%f41, %f1, %f38;

BB64_13:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f41;

BB64_14:
	ret;
}

	// .globl	vec_sqrtf
.visible .entry vec_sqrtf(
	.param .u64 vec_sqrtf_param_0,
	.param .u64 vec_sqrtf_param_1,
	.param .u64 vec_sqrtf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_sqrtf_param_0];
	ld.param.u64 	%rd2, [vec_sqrtf_param_1];
	ld.param.u64 	%rd3, [vec_sqrtf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB65_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	sqrt.rn.f32 	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB65_2:
	ret;
}

	// .globl	vec_tanf
.visible .entry vec_tanf(
	.param .u64 vec_tanf_param_0,
	.param .u64 vec_tanf_param_1,
	.param .u64 vec_tanf_param_2
)
{
	.local .align 4 .b8 	__local_depot66[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<12>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<95>;
	.reg .b64 	%rd<23>;


	mov.u64 	%rd22, __local_depot66;
	cvta.local.u64 	%SP, %rd22;
	ld.param.u64 	%rd9, [vec_tanf_param_0];
	ld.param.u64 	%rd7, [vec_tanf_param_1];
	ld.param.u64 	%rd8, [vec_tanf_param_2];
	add.u64 	%rd10, %SP, 0;
	cvta.to.local.u64 	%rd1, %rd10;
	mov.u32 	%r37, %ntid.x;
	mov.u32 	%r38, %ctaid.x;
	mov.u32 	%r39, %tid.x;
	mad.lo.s32 	%r1, %r37, %r38, %r39;
	cvt.s64.s32	%rd11, %r1;
	setp.ge.u64	%p1, %rd11, %rd9;
	@%p1 bra 	BB66_16;

	cvta.to.global.u64 	%rd12, %rd8;
	mul.wide.s32 	%rd13, %r1, 4;
	add.s64 	%rd14, %rd12, %rd13;
	ld.global.f32 	%f30, [%rd14];
	abs.f32 	%f10, %f30;
	setp.neu.f32	%p2, %f10, 0f7F800000;
	@%p2 bra 	BB66_3;

	mov.f32 	%f11, 0f00000000;
	mul.rn.f32 	%f30, %f30, %f11;

BB66_3:
	mul.f32 	%f12, %f30, 0f3F22F983;
	cvt.rni.s32.f32	%r94, %f12;
	cvt.rn.f32.s32	%f13, %r94;
	neg.f32 	%f14, %f13;
	mov.f32 	%f15, 0f3FC90FDA;
	fma.rn.f32 	%f16, %f14, %f15, %f30;
	mov.f32 	%f17, 0f33A22168;
	fma.rn.f32 	%f18, %f14, %f17, %f16;
	mov.f32 	%f19, 0f27C234C5;
	fma.rn.f32 	%f31, %f14, %f19, %f18;
	abs.f32 	%f20, %f30;
	setp.leu.f32	%p3, %f20, 0f47CE4780;
	@%p3 bra 	BB66_13;

	mov.b32 	 %r3, %f30;
	shr.u32 	%r4, %r3, 23;
	bfe.u32 	%r42, %r3, 23, 8;
	add.s32 	%r43, %r42, -128;
	shl.b32 	%r44, %r3, 8;
	or.b32  	%r5, %r44, -2147483648;
	shr.u32 	%r6, %r43, 5;
	mov.u32 	%r86, 0;
	mov.u64 	%rd20, __cudart_i2opi_f;
	mov.u32 	%r85, -6;
	mov.u64 	%rd21, %rd1;

BB66_5:
	.pragma "nounroll";
	mov.u64 	%rd3, %rd21;
	ld.const.u32 	%r47, [%rd20];
	// inline asm
	{
	mad.lo.cc.u32   %r45, %r47, %r5, %r86;
	madc.hi.u32     %r86, %r47, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%rd3], %r45;
	add.s64 	%rd4, %rd3, 4;
	add.s64 	%rd20, %rd20, 4;
	add.s32 	%r85, %r85, 1;
	setp.ne.s32	%p4, %r85, 0;
	mov.u64 	%rd21, %rd4;
	@%p4 bra 	BB66_5;

	and.b32  	%r11, %r3, -2147483648;
	st.local.u32 	[%rd1+24], %r86;
	mov.u32 	%r50, 6;
	sub.s32 	%r51, %r50, %r6;
	mul.wide.s32 	%rd16, %r51, 4;
	add.s64 	%rd6, %rd1, %rd16;
	ld.local.u32 	%r87, [%rd6];
	ld.local.u32 	%r88, [%rd6+-4];
	and.b32  	%r14, %r4, 31;
	setp.eq.s32	%p5, %r14, 0;
	@%p5 bra 	BB66_8;

	mov.u32 	%r52, 32;
	sub.s32 	%r53, %r52, %r14;
	shr.u32 	%r54, %r88, %r53;
	shl.b32 	%r55, %r87, %r14;
	add.s32 	%r87, %r54, %r55;
	ld.local.u32 	%r56, [%rd6+-8];
	shr.u32 	%r57, %r56, %r53;
	shl.b32 	%r58, %r88, %r14;
	add.s32 	%r88, %r57, %r58;

BB66_8:
	shr.u32 	%r59, %r88, 30;
	shl.b32 	%r60, %r87, 2;
	add.s32 	%r89, %r59, %r60;
	shl.b32 	%r20, %r88, 2;
	shr.u32 	%r61, %r89, 31;
	shr.u32 	%r62, %r87, 30;
	add.s32 	%r21, %r61, %r62;
	setp.eq.s32	%p6, %r61, 0;
	mov.u32 	%r90, %r11;
	mov.u32 	%r91, %r20;
	@%p6 bra 	BB66_10;

	not.b32 	%r63, %r89;
	neg.s32 	%r22, %r20;
	setp.eq.s32	%p7, %r20, 0;
	selp.u32	%r64, 1, 0, %p7;
	add.s32 	%r89, %r64, %r63;
	xor.b32  	%r24, %r11, -2147483648;
	mov.u32 	%r90, %r24;
	mov.u32 	%r91, %r22;

BB66_10:
	mov.u32 	%r26, %r90;
	neg.s32 	%r65, %r21;
	setp.eq.s32	%p8, %r11, 0;
	selp.b32	%r94, %r21, %r65, %p8;
	clz.b32 	%r93, %r89;
	setp.eq.s32	%p9, %r93, 0;
	shl.b32 	%r66, %r89, %r93;
	mov.u32 	%r67, 32;
	sub.s32 	%r68, %r67, %r93;
	shr.u32 	%r69, %r91, %r68;
	add.s32 	%r70, %r69, %r66;
	selp.b32	%r30, %r89, %r70, %p9;
	mov.u32 	%r71, -921707870;
	mul.hi.u32 	%r92, %r30, %r71;
	setp.lt.s32	%p10, %r92, 1;
	@%p10 bra 	BB66_12;

	mul.lo.s32 	%r72, %r30, -921707870;
	shr.u32 	%r73, %r72, 31;
	shl.b32 	%r74, %r92, 1;
	add.s32 	%r92, %r73, %r74;
	add.s32 	%r93, %r93, 1;

BB66_12:
	mov.u32 	%r75, 126;
	sub.s32 	%r76, %r75, %r93;
	shl.b32 	%r77, %r76, 23;
	add.s32 	%r78, %r92, 1;
	shr.u32 	%r79, %r78, 7;
	add.s32 	%r80, %r79, 1;
	shr.u32 	%r81, %r80, 1;
	add.s32 	%r82, %r81, %r77;
	or.b32  	%r83, %r82, %r26;
	mov.b32 	 %f31, %r83;

BB66_13:
	mul.f32 	%f21, %f31, %f31;
	mov.f32 	%f22, 0fBF52B7F4;
	mov.f32 	%f23, 0f3B86D46D;
	fma.rn.f32 	%f24, %f23, %f21, %f22;
	add.f32 	%f25, %f21, 0fC01E09D0;
	rcp.rn.f32 	%f26, %f25;
	mul.f32 	%f27, %f24, %f26;
	mul.f32 	%f28, %f21, %f27;
	fma.rn.f32 	%f32, %f28, %f31, %f31;
	and.b32  	%r84, %r94, 1;
	setp.eq.b32	%p11, %r84, 1;
	@!%p11 bra 	BB66_15;
	bra.uni 	BB66_14;

BB66_14:
	mov.f32 	%f29, 0fBF800000;
	div.rn.f32 	%f32, %f29, %f32;

BB66_15:
	cvta.to.global.u64 	%rd17, %rd7;
	add.s64 	%rd19, %rd17, %rd13;
	st.global.f32 	[%rd19], %f32;

BB66_16:
	ret;
}

	// .globl	vec_tanhf
.visible .entry vec_tanhf(
	.param .u64 vec_tanhf_param_0,
	.param .u64 vec_tanhf_param_1,
	.param .u64 vec_tanhf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<10>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_tanhf_param_0];
	ld.param.u64 	%rd2, [vec_tanhf_param_1];
	ld.param.u64 	%rd3, [vec_tanhf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB67_5;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F0CCCCD;
	@%p2 bra 	BB67_3;
	bra.uni 	BB67_2;

BB67_3:
	mul.f32 	%f21, %f1, %f1;
	mov.f32 	%f22, 0fBD57BE66;
	mov.f32 	%f23, 0f3C86A81B;
	fma.rn.f32 	%f24, %f23, %f21, %f22;
	mov.f32 	%f25, 0f3E08677B;
	fma.rn.f32 	%f26, %f24, %f21, %f25;
	mov.f32 	%f27, 0fBEAAAA29;
	fma.rn.f32 	%f28, %f26, %f21, %f27;
	mul.f32 	%f29, %f21, %f28;
	fma.rn.f32 	%f30, %f29, %f1, %f1;
	add.f32 	%f31, %f1, %f1;
	setp.eq.f32	%p4, %f1, 0f00000000;
	selp.f32	%f32, %f31, %f30, %p4;
	bra.uni 	BB67_4;

BB67_2:
	add.f32 	%f10, %f2, %f2;
	mul.f32 	%f11, %f10, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f12, %f11;
	mov.f32 	%f13, 0fBF317200;
	fma.rn.f32 	%f14, %f12, %f13, %f10;
	mov.f32 	%f15, 0fB5BFBE8E;
	fma.rn.f32 	%f16, %f12, %f15, %f14;
	mul.f32 	%f7, %f16, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	ex2.approx.f32 	%f17, %f12;
	mov.f32 	%f18, 0f3F800000;
	fma.rn.f32 	%f9, %f6, %f17, %f18;
	// inline asm
	rcp.approx.ftz.f32 %f8,%f9;
	// inline asm
	mov.f32 	%f19, 0fC0000000;
	fma.rn.f32 	%f20, %f8, %f19, %f18;
	mov.b32 	 %r5, %f20;
	setp.ltu.f32	%p3, %f2, 0f42B00000;
	selp.b32	%r6, %r5, 1065353216, %p3;
	mov.b32 	 %r7, %f1;
	and.b32  	%r8, %r7, -2147483648;
	or.b32  	%r9, %r6, %r8;
	mov.b32 	 %f32, %r9;

BB67_4:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f32;

BB67_5:
	ret;
}

	// .globl	vec_tgammaf
.visible .entry vec_tgammaf(
	.param .u64 vec_tgammaf_param_0,
	.param .u64 vec_tgammaf_param_1,
	.param .u64 vec_tgammaf_param_2
)
{
	.reg .pred 	%p<19>;
	.reg .f32 	%f<101>;
	.reg .b32 	%r<7>;
	.reg .b64 	%rd<11>;


	ld.param.u64 	%rd4, [vec_tgammaf_param_0];
	ld.param.u64 	%rd2, [vec_tgammaf_param_1];
	ld.param.u64 	%rd3, [vec_tgammaf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB68_13;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	setp.ltu.f32	%p2, %f1, 0f00000000;
	@%p2 bra 	BB68_6;
	bra.uni 	BB68_2;

BB68_6:
	cvt.rmi.f32.f32	%f50, %f1;
	setp.eq.f32	%p10, %f50, %f1;
	selp.f32	%f51, 0f7FFFFFFF, %f1, %p10;
	setp.lt.f32	%p11, %f51, 0fC2246666;
	selp.f32	%f14, 0fC2246666, %f51, %p11;
	setp.lt.f32	%p12, %f14, 0fC2081EB8;
	add.f32 	%f52, %f14, 0f40C00000;
	selp.f32	%f97, %f52, %f14, %p12;
	setp.geu.f32	%p13, %f97, 0fBF000000;
	mov.f32 	%f96, %f97;
	@%p13 bra 	BB68_9;

	mov.f32 	%f98, %f97;
	mov.f32 	%f99, %f97;

BB68_8:
	add.f32 	%f98, %f98, 0f3F800000;
	mul.f32 	%f99, %f99, %f98;
	setp.lt.f32	%p14, %f98, 0fBF000000;
	mov.f32 	%f97, %f99;
	mov.f32 	%f96, %f98;
	@%p14 bra 	BB68_8;

BB68_9:
	mov.f32 	%f53, 0f3BE86AA4;
	mov.f32 	%f54, 0fBA8AA19E;
	fma.rn.f32 	%f55, %f54, %f96, %f53;
	mov.f32 	%f56, 0fBC1E2998;
	fma.rn.f32 	%f57, %f55, %f96, %f56;
	mov.f32 	%f58, 0fBD2CBE4A;
	fma.rn.f32 	%f59, %f57, %f96, %f58;
	mov.f32 	%f60, 0f3E2A8A17;
	fma.rn.f32 	%f61, %f59, %f96, %f60;
	mov.f32 	%f62, 0fBD2C0CBB;
	fma.rn.f32 	%f63, %f61, %f96, %f62;
	mov.f32 	%f64, 0fBF27E7A3;
	fma.rn.f32 	%f65, %f63, %f96, %f64;
	mov.f32 	%f66, 0f3F13C468;
	fma.rn.f32 	%f67, %f65, %f96, %f66;
	mov.f32 	%f68, 0f3F800000;
	fma.rn.f32 	%f69, %f67, %f96, %f68;
	mul.f32 	%f70, %f97, %f69;
	rcp.rn.f32 	%f100, %f70;
	setp.geu.f32	%p15, %f14, 0fC2081EB8;
	@%p15 bra 	BB68_12;

	add.f32 	%f71, %f14, 0f3F800000;
	mul.f32 	%f72, %f14, %f71;
	add.f32 	%f73, %f14, 0f40000000;
	mul.f32 	%f74, %f73, %f72;
	add.f32 	%f75, %f14, 0f40400000;
	mul.f32 	%f76, %f75, %f74;
	add.f32 	%f77, %f14, 0f40800000;
	mul.f32 	%f78, %f77, %f76;
	add.f32 	%f79, %f14, 0f40A00000;
	mul.f32 	%f80, %f79, %f78;
	rcp.rn.f32 	%f81, %f80;
	mul.f32 	%f100, %f100, %f81;
	setp.geu.f32	%p16, %f1, 0fC2280000;
	@%p16 bra 	BB68_12;

	cvt.rzi.s32.f32	%r5, %f1;
	and.b32  	%r6, %r5, 1;
	setp.eq.b32	%p17, %r6, 1;
	not.pred 	%p18, %p17;
	selp.f32	%f100, 0f80000000, %f100, %p18;
	bra.uni 	BB68_12;

BB68_2:
	setp.gt.f32	%p3, %f1, 0f42100000;
	selp.f32	%f2, 0f42100000, %f1, %p3;
	setp.gt.f32	%p4, %f2, 0f42081EB8;
	add.f32 	%f3, %f2, 0fBF800000;
	selp.f32	%f88, %f3, %f2, %p4;
	add.f32 	%f89, %f88, 0fBF800000;
	mov.f32 	%f27, 0f3F800000;
	mov.f32 	%f84, %f27;
	setp.leu.f32	%p5, %f88, 0f3FC00000;
	mov.f32 	%f83, %f27;
	@%p5 bra 	BB68_5;

	mov.f32 	%f90, %f89;

BB68_4:
	mov.f32 	%f88, %f90;
	mul.f32 	%f84, %f88, %f84;
	add.f32 	%f90, %f88, 0fBF800000;
	setp.gt.f32	%p6, %f88, 0f3FC00000;
	mov.f32 	%f82, %f84;
	mov.f32 	%f83, %f82;
	mov.f32 	%f89, %f90;
	@%p6 bra 	BB68_4;

BB68_5:
	mov.f32 	%f11, %f83;
	setp.ltu.f32	%p7, %f2, 0f3F000000;
	selp.f32	%f28, %f88, %f89, %p7;
	mov.f32 	%f29, 0f3BE86AA4;
	mov.f32 	%f30, 0fBA8AA19E;
	fma.rn.f32 	%f31, %f30, %f28, %f29;
	mov.f32 	%f32, 0fBC1E2998;
	fma.rn.f32 	%f33, %f31, %f28, %f32;
	mov.f32 	%f34, 0fBD2CBE4A;
	fma.rn.f32 	%f35, %f33, %f28, %f34;
	mov.f32 	%f36, 0f3E2A8A17;
	fma.rn.f32 	%f37, %f35, %f28, %f36;
	mov.f32 	%f38, 0fBD2C0CBB;
	fma.rn.f32 	%f39, %f37, %f28, %f38;
	mov.f32 	%f40, 0fBF27E7A3;
	fma.rn.f32 	%f41, %f39, %f28, %f40;
	mov.f32 	%f42, 0f3F13C468;
	fma.rn.f32 	%f43, %f41, %f28, %f42;
	fma.rn.f32 	%f45, %f43, %f28, %f27;
	mul.f32 	%f46, %f2, %f45;
	setp.lt.f32	%p8, %f2, 0f3F000000;
	selp.f32	%f47, %f46, %f45, %p8;
	div.approx.f32 	%f48, %f11, %f47;
	mul.f32 	%f49, %f3, %f48;
	selp.f32	%f100, %f49, %f48, %p4;

BB68_12:
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd10, %rd8, %rd6;
	st.global.f32 	[%rd10], %f100;

BB68_13:
	ret;
}

	// .globl	vec_truncf
.visible .entry vec_truncf(
	.param .u64 vec_truncf_param_0,
	.param .u64 vec_truncf_param_1,
	.param .u64 vec_truncf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<10>;


	ld.param.u64 	%rd4, [vec_truncf_param_0];
	ld.param.u64 	%rd2, [vec_truncf_param_1];
	ld.param.u64 	%rd3, [vec_truncf_param_2];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd4;
	@%p1 bra 	BB69_2;

	cvta.to.global.u64 	%rd5, %rd3;
	shl.b64 	%rd6, %rd1, 2;
	add.s64 	%rd7, %rd5, %rd6;
	ld.global.f32 	%f1, [%rd7];
	cvt.rzi.f32.f32	%f2, %f1;
	cvta.to.global.u64 	%rd8, %rd2;
	add.s64 	%rd9, %rd8, %rd6;
	st.global.f32 	[%rd9], %f2;

BB69_2:
	ret;
}

	// .globl	vec_y0f
.visible .entry vec_y0f(
	.param .u64 vec_y0f_param_0,
	.param .u64 vec_y0f_param_1,
	.param .u64 vec_y0f_param_2
)
{
	.local .align 4 .b8 	__local_depot70[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<57>;
	.reg .f32 	%f<336>;
	.reg .b32 	%r<375>;
	.reg .b64 	%rd<61>;


	mov.u64 	%rd60, __local_depot70;
	cvta.local.u64 	%SP, %rd60;
	ld.param.u64 	%rd26, [vec_y0f_param_0];
	ld.param.u64 	%rd24, [vec_y0f_param_1];
	ld.param.u64 	%rd25, [vec_y0f_param_2];
	add.u64 	%rd27, %SP, 0;
	cvta.to.local.u64 	%rd57, %rd27;
	mov.u32 	%r143, %ntid.x;
	mov.u32 	%r144, %ctaid.x;
	mov.u32 	%r145, %tid.x;
	mad.lo.s32 	%r1, %r143, %r144, %r145;
	cvt.s64.s32	%rd28, %r1;
	setp.ge.u64	%p1, %rd28, %rd26;
	@%p1 bra 	BB70_85;

	cvta.to.global.u64 	%rd29, %rd25;
	mul.wide.s32 	%rd30, %r1, 4;
	add.s64 	%rd31, %rd29, %rd30;
	ld.global.f32 	%f1, [%rd31];
	abs.f32 	%f2, %f1;
	setp.gtu.f32	%p2, %f2, 0f3EE4C176;
	@%p2 bra 	BB70_42;
	bra.uni 	BB70_2;

BB70_42:
	setp.gtu.f32	%p31, %f2, 0f3FF67AF8;
	@%p31 bra 	BB70_44;
	bra.uni 	BB70_43;

BB70_44:
	setp.gtu.f32	%p32, %f2, 0f40B0B31E;
	@%p32 bra 	BB70_46;
	bra.uni 	BB70_45;

BB70_46:
	setp.gtu.f32	%p33, %f2, 0f410A7798;
	@%p33 bra 	BB70_48;
	bra.uni 	BB70_47;

BB70_48:
	abs.f32 	%f265, %f2;
	mov.f32 	%f335, 0f00000000;
	setp.eq.f32	%p34, %f265, 0f7F800000;
	@%p34 bra 	BB70_82;

	// inline asm
	rcp.approx.ftz.f32 %f266,%f2;
	// inline asm
	mul.f32 	%f268, %f266, %f266;
	mov.f32 	%f269, 0f3DD0D5F0;
	mov.f32 	%f270, 0fBECC69F3;
	fma.rn.f32 	%f271, %f270, %f268, %f269;
	mov.f32 	%f272, 0fBD7FF855;
	fma.rn.f32 	%f273, %f271, %f268, %f272;
	mov.f32 	%f274, 0f3F800000;
	fma.rn.f32 	%f275, %f273, %f268, %f274;
	mov.f32 	%f276, 0fBE50D31C;
	mov.f32 	%f277, 0f3F8CCD61;
	fma.rn.f32 	%f278, %f277, %f268, %f276;
	mov.f32 	%f279, 0f3D854783;
	fma.rn.f32 	%f280, %f278, %f268, %f279;
	mov.f32 	%f281, 0fBDFFFFFB;
	fma.rn.f32 	%f282, %f280, %f268, %f281;
	fma.rn.f32 	%f38, %f282, %f266, %f2;
	rsqrt.approx.f32 	%f283, %f2;
	mul.f32 	%f284, %f283, 0f3F4C422A;
	mul.f32 	%f39, %f275, %f284;
	mul.f32 	%f285, %f38, 0f3F22F983;
	cvt.rni.s32.f32	%r364, %f285;
	cvt.rn.f32.s32	%f286, %r364;
	neg.f32 	%f287, %f286;
	mov.f32 	%f288, 0f3FC90FDA;
	fma.rn.f32 	%f289, %f287, %f288, %f38;
	mov.f32 	%f290, 0f33A22168;
	fma.rn.f32 	%f291, %f287, %f290, %f289;
	mov.f32 	%f292, 0f27C234C5;
	fma.rn.f32 	%f329, %f287, %f292, %f291;
	abs.f32 	%f293, %f38;
	setp.leu.f32	%p35, %f293, 0f47CE4780;
	@%p35 bra 	BB70_59;

	mov.b32 	 %r73, %f38;
	bfe.u32 	%r243, %r73, 23, 8;
	add.s32 	%r244, %r243, -128;
	shl.b32 	%r245, %r73, 8;
	or.b32  	%r74, %r245, -2147483648;
	shr.u32 	%r75, %r244, 5;
	mov.u32 	%r356, 0;
	mov.u64 	%rd56, __cudart_i2opi_f;
	mov.u32 	%r355, -6;

BB70_51:
	.pragma "nounroll";
	ld.const.u32 	%r248, [%rd56];
	// inline asm
	{
	mad.lo.cc.u32   %r246, %r248, %r74, %r356;
	madc.hi.u32     %r356, %r248, %r74,  0;
	}
	// inline asm
	st.local.u32 	[%rd57], %r246;
	add.s64 	%rd57, %rd57, 4;
	add.s64 	%rd56, %rd56, 4;
	add.s32 	%r355, %r355, 1;
	setp.ne.s32	%p36, %r355, 0;
	@%p36 bra 	BB70_51;

	and.b32  	%r80, %r73, -2147483648;
	cvta.to.local.u64 	%rd43, %rd27;
	st.local.u32 	[%rd43+24], %r356;
	bfe.u32 	%r81, %r73, 23, 5;
	mov.u32 	%r251, 6;
	sub.s32 	%r252, %r251, %r75;
	mul.wide.s32 	%rd44, %r252, 4;
	add.s64 	%rd17, %rd43, %rd44;
	ld.local.u32 	%r357, [%rd17];
	ld.local.u32 	%r358, [%rd17+-4];
	setp.eq.s32	%p37, %r81, 0;
	@%p37 bra 	BB70_54;

	mov.u32 	%r253, 32;
	sub.s32 	%r254, %r253, %r81;
	shr.u32 	%r255, %r358, %r254;
	shl.b32 	%r256, %r357, %r81;
	add.s32 	%r357, %r255, %r256;
	ld.local.u32 	%r257, [%rd17+-8];
	shr.u32 	%r258, %r257, %r254;
	shl.b32 	%r259, %r358, %r81;
	add.s32 	%r358, %r258, %r259;

BB70_54:
	shr.u32 	%r260, %r358, 30;
	shl.b32 	%r261, %r357, 2;
	add.s32 	%r359, %r260, %r261;
	shl.b32 	%r89, %r358, 2;
	shr.u32 	%r262, %r359, 31;
	shr.u32 	%r263, %r357, 30;
	add.s32 	%r90, %r262, %r263;
	setp.eq.s32	%p38, %r262, 0;
	mov.u32 	%r360, %r80;
	mov.u32 	%r361, %r89;
	@%p38 bra 	BB70_56;

	not.b32 	%r264, %r359;
	neg.s32 	%r91, %r89;
	setp.eq.s32	%p39, %r89, 0;
	selp.u32	%r265, 1, 0, %p39;
	add.s32 	%r359, %r265, %r264;
	xor.b32  	%r93, %r80, -2147483648;
	mov.u32 	%r360, %r93;
	mov.u32 	%r361, %r91;

BB70_56:
	mov.u32 	%r95, %r360;
	neg.s32 	%r266, %r90;
	setp.eq.s32	%p40, %r80, 0;
	selp.b32	%r364, %r90, %r266, %p40;
	clz.b32 	%r363, %r359;
	setp.eq.s32	%p41, %r363, 0;
	shl.b32 	%r267, %r359, %r363;
	mov.u32 	%r268, 32;
	sub.s32 	%r269, %r268, %r363;
	shr.u32 	%r270, %r361, %r269;
	add.s32 	%r271, %r270, %r267;
	selp.b32	%r99, %r359, %r271, %p41;
	mov.u32 	%r272, -921707870;
	mul.hi.u32 	%r362, %r99, %r272;
	setp.lt.s32	%p42, %r362, 1;
	@%p42 bra 	BB70_58;

	mul.lo.s32 	%r273, %r99, -921707870;
	shr.u32 	%r274, %r273, 31;
	shl.b32 	%r275, %r362, 1;
	add.s32 	%r362, %r274, %r275;
	add.s32 	%r363, %r363, 1;

BB70_58:
	mov.u32 	%r276, 126;
	sub.s32 	%r277, %r276, %r363;
	shl.b32 	%r278, %r277, 23;
	add.s32 	%r279, %r362, 1;
	shr.u32 	%r280, %r279, 7;
	add.s32 	%r281, %r280, 1;
	shr.u32 	%r282, %r281, 1;
	add.s32 	%r283, %r282, %r278;
	or.b32  	%r284, %r283, %r95;
	mov.b32 	 %f329, %r284;

BB70_59:
	and.b32  	%r285, %r364, 3;
	cvt.rn.f32.s32	%f294, %r285;
	add.f32 	%f295, %f329, 0fC016CBE4;
	fma.rn.f32 	%f330, %f294, 0f3FC90FDB, %f295;
	abs.f32 	%f296, %f330;
	setp.neu.f32	%p43, %f296, 0f7F800000;
	@%p43 bra 	BB70_61;

	mov.f32 	%f297, 0f00000000;
	mul.rn.f32 	%f330, %f330, %f297;

BB70_61:
	mul.f32 	%f298, %f330, 0f3F22F983;
	cvt.rni.s32.f32	%r374, %f298;
	cvt.rn.f32.s32	%f299, %r374;
	neg.f32 	%f300, %f299;
	fma.rn.f32 	%f302, %f300, %f288, %f330;
	fma.rn.f32 	%f304, %f300, %f290, %f302;
	fma.rn.f32 	%f331, %f300, %f292, %f304;
	abs.f32 	%f306, %f330;
	setp.leu.f32	%p44, %f306, 0f47CE4780;
	@%p44 bra 	BB70_71;

	mov.b32 	 %r107, %f330;
	shr.u32 	%r108, %r107, 23;
	bfe.u32 	%r288, %r107, 23, 8;
	add.s32 	%r289, %r288, -128;
	shl.b32 	%r290, %r107, 8;
	or.b32  	%r109, %r290, -2147483648;
	shr.u32 	%r110, %r289, 5;
	cvta.to.local.u64 	%rd59, %rd27;
	mov.u32 	%r366, 0;
	mov.u64 	%rd58, __cudart_i2opi_f;
	mov.u32 	%r365, -6;

BB70_63:
	.pragma "nounroll";
	ld.const.u32 	%r293, [%rd58];
	// inline asm
	{
	mad.lo.cc.u32   %r291, %r293, %r109, %r366;
	madc.hi.u32     %r366, %r293, %r109,  0;
	}
	// inline asm
	st.local.u32 	[%rd59], %r291;
	add.s64 	%rd59, %rd59, 4;
	add.s64 	%rd58, %rd58, 4;
	add.s32 	%r365, %r365, 1;
	setp.ne.s32	%p45, %r365, 0;
	@%p45 bra 	BB70_63;

	and.b32  	%r115, %r107, -2147483648;
	cvta.to.local.u64 	%rd48, %rd27;
	st.local.u32 	[%rd48+24], %r366;
	mov.u32 	%r296, 6;
	sub.s32 	%r297, %r296, %r110;
	mul.wide.s32 	%rd49, %r297, 4;
	add.s64 	%rd23, %rd48, %rd49;
	ld.local.u32 	%r367, [%rd23];
	ld.local.u32 	%r368, [%rd23+-4];
	and.b32  	%r118, %r108, 31;
	setp.eq.s32	%p46, %r118, 0;
	@%p46 bra 	BB70_66;

	mov.u32 	%r298, 32;
	sub.s32 	%r299, %r298, %r118;
	shr.u32 	%r300, %r368, %r299;
	shl.b32 	%r301, %r367, %r118;
	add.s32 	%r367, %r300, %r301;
	ld.local.u32 	%r302, [%rd23+-8];
	shr.u32 	%r303, %r302, %r299;
	shl.b32 	%r304, %r368, %r118;
	add.s32 	%r368, %r303, %r304;

BB70_66:
	shr.u32 	%r305, %r368, 30;
	shl.b32 	%r306, %r367, 2;
	add.s32 	%r369, %r305, %r306;
	shl.b32 	%r124, %r368, 2;
	shr.u32 	%r307, %r369, 31;
	shr.u32 	%r308, %r367, 30;
	add.s32 	%r125, %r307, %r308;
	setp.eq.s32	%p47, %r307, 0;
	mov.u32 	%r370, %r115;
	mov.u32 	%r371, %r124;
	@%p47 bra 	BB70_68;

	not.b32 	%r309, %r369;
	neg.s32 	%r126, %r124;
	setp.eq.s32	%p48, %r124, 0;
	selp.u32	%r310, 1, 0, %p48;
	add.s32 	%r369, %r310, %r309;
	xor.b32  	%r128, %r115, -2147483648;
	mov.u32 	%r370, %r128;
	mov.u32 	%r371, %r126;

BB70_68:
	mov.u32 	%r130, %r370;
	neg.s32 	%r311, %r125;
	setp.eq.s32	%p49, %r115, 0;
	selp.b32	%r374, %r125, %r311, %p49;
	clz.b32 	%r373, %r369;
	setp.eq.s32	%p50, %r373, 0;
	shl.b32 	%r312, %r369, %r373;
	mov.u32 	%r313, 32;
	sub.s32 	%r314, %r313, %r373;
	shr.u32 	%r315, %r371, %r314;
	add.s32 	%r316, %r315, %r312;
	selp.b32	%r134, %r369, %r316, %p50;
	mov.u32 	%r317, -921707870;
	mul.hi.u32 	%r372, %r134, %r317;
	setp.lt.s32	%p51, %r372, 1;
	@%p51 bra 	BB70_70;

	mul.lo.s32 	%r318, %r134, -921707870;
	shr.u32 	%r319, %r318, 31;
	shl.b32 	%r320, %r372, 1;
	add.s32 	%r372, %r319, %r320;
	add.s32 	%r373, %r373, 1;

BB70_70:
	mov.u32 	%r321, 126;
	sub.s32 	%r322, %r321, %r373;
	shl.b32 	%r323, %r322, 23;
	add.s32 	%r324, %r372, 1;
	shr.u32 	%r325, %r324, 7;
	add.s32 	%r326, %r325, 1;
	shr.u32 	%r327, %r326, 1;
	add.s32 	%r328, %r327, %r323;
	or.b32  	%r329, %r328, %r130;
	mov.b32 	 %f331, %r329;

BB70_71:
	mul.rn.f32 	%f49, %f331, %f331;
	add.s32 	%r141, %r374, 1;
	and.b32  	%r142, %r141, 1;
	setp.eq.s32	%p52, %r142, 0;
	@%p52 bra 	BB70_73;

	mov.f32 	%f307, 0fBAB6061A;
	mov.f32 	%f308, 0f37CCF5CE;
	fma.rn.f32 	%f332, %f308, %f49, %f307;
	bra.uni 	BB70_74;

BB70_2:
	mul.f32 	%f65, %f2, %f2;
	mov.f32 	%f66, 0fB71F49B6;
	mov.f32 	%f67, 0f33DBE5AC;
	fma.rn.f32 	%f68, %f67, %f65, %f66;
	mov.f32 	%f69, 0f3A0D3100;
	fma.rn.f32 	%f70, %f68, %f65, %f69;
	mov.f32 	%f71, 0fBC83AD8E;
	fma.rn.f32 	%f72, %f70, %f65, %f71;
	mov.f32 	%f73, 0f3E35DE5A;
	fma.rn.f32 	%f74, %f72, %f65, %f73;
	mov.f32 	%f75, 0fBD9726B5;
	fma.rn.f32 	%f3, %f74, %f65, %f75;
	setp.lt.f32	%p3, %f2, 0f7F800000;
	setp.gt.f32	%p4, %f2, 0f00000000;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB70_4;
	bra.uni 	BB70_3;

BB70_4:
	setp.lt.f32	%p6, %f2, 0f00800000;
	mul.f32 	%f78, %f2, 0f4B800000;
	selp.f32	%f79, %f78, %f2, %p6;
	selp.f32	%f80, 0fC3170000, 0fC2FE0000, %p6;
	mov.b32 	 %r146, %f79;
	and.b32  	%r147, %r146, 8388607;
	or.b32  	%r148, %r147, 1065353216;
	mov.b32 	 %f81, %r148;
	shr.u32 	%r149, %r146, 23;
	cvt.rn.f32.u32	%f82, %r149;
	add.f32 	%f83, %f80, %f82;
	setp.gt.f32	%p7, %f81, 0f3FAE147B;
	mul.f32 	%f84, %f81, 0f3F000000;
	add.f32 	%f85, %f83, 0f3F800000;
	selp.f32	%f86, %f84, %f81, %p7;
	selp.f32	%f87, %f85, %f83, %p7;
	add.f32 	%f77, %f86, 0f3F800000;
	add.f32 	%f88, %f86, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f76,%f77;
	// inline asm
	mul.f32 	%f89, %f88, %f88;
	neg.f32 	%f90, %f89;
	mul.rn.f32 	%f91, %f76, %f90;
	add.rn.f32 	%f92, %f88, %f91;
	mul.f32 	%f93, %f92, %f92;
	mov.f32 	%f94, 0f3C4C6A36;
	mov.f32 	%f95, 0f3B1E94E6;
	fma.rn.f32 	%f96, %f95, %f93, %f94;
	mov.f32 	%f97, 0f3DAAAB1A;
	fma.rn.f32 	%f98, %f96, %f93, %f97;
	mul.f32 	%f99, %f93, %f98;
	fma.rn.f32 	%f100, %f99, %f92, %f91;
	add.f32 	%f101, %f88, %f100;
	mov.f32 	%f102, 0f3F317218;
	fma.rn.f32 	%f321, %f87, %f102, %f101;
	bra.uni 	BB70_5;

BB70_43:
	add.f32 	%f197, %f2, 0fBF64C176;
	add.f32 	%f198, %f197, 0f32657D03;
	mov.f32 	%f199, 0fBE02574C;
	mov.f32 	%f200, 0f3CDDC8B3;
	fma.rn.f32 	%f201, %f200, %f198, %f199;
	mov.f32 	%f202, 0f3E7F2CC9;
	fma.rn.f32 	%f203, %f201, %f198, %f202;
	mov.f32 	%f204, 0fBE8BF29B;
	fma.rn.f32 	%f205, %f203, %f198, %f204;
	mov.f32 	%f206, 0f3E5BCE93;
	fma.rn.f32 	%f207, %f205, %f198, %f206;
	mov.f32 	%f208, 0fBE38C4FF;
	fma.rn.f32 	%f209, %f207, %f198, %f208;
	mov.f32 	%f210, 0f3E42774D;
	fma.rn.f32 	%f211, %f209, %f198, %f210;
	mov.f32 	%f212, 0fBE525CB2;
	fma.rn.f32 	%f213, %f211, %f198, %f212;
	mov.f32 	%f214, 0f3E60F43D;
	fma.rn.f32 	%f215, %f213, %f198, %f214;
	mov.f32 	%f216, 0fBE679145;
	fma.rn.f32 	%f217, %f215, %f198, %f216;
	mov.f32 	%f218, 0f3E61D24A;
	fma.rn.f32 	%f219, %f217, %f198, %f218;
	mov.f32 	%f220, 0fBEFBF1AD;
	fma.rn.f32 	%f221, %f219, %f198, %f220;
	mov.f32 	%f222, 0f3F6121BB;
	fma.rn.f32 	%f223, %f221, %f198, %f222;
	mul.f32 	%f335, %f198, %f223;
	bra.uni 	BB70_82;

BB70_3:
	lg2.approx.f32 	%f321, %f2;

BB70_5:
	abs.f32 	%f7, %f2;
	setp.gtu.f32	%p8, %f7, 0f41000000;
	@%p8 bra 	BB70_7;
	bra.uni 	BB70_6;

BB70_7:
	abs.f32 	%f139, %f7;
	mov.f32 	%f328, 0f00000000;
	setp.eq.f32	%p9, %f139, 0f7F800000;
	@%p9 bra 	BB70_41;

	// inline asm
	rcp.approx.ftz.f32 %f140,%f7;
	// inline asm
	mul.f32 	%f142, %f140, %f140;
	mov.f32 	%f143, 0fBF03B7C2;
	mov.f32 	%f144, 0f4056FE93;
	fma.rn.f32 	%f145, %f144, %f142, %f143;
	mov.f32 	%f146, 0f3DD3B3F3;
	fma.rn.f32 	%f147, %f145, %f142, %f146;
	mov.f32 	%f148, 0fBD7FFFB6;
	fma.rn.f32 	%f149, %f147, %f142, %f148;
	mov.f32 	%f150, 0f3F800000;
	fma.rn.f32 	%f151, %f149, %f142, %f150;
	mov.f32 	%f152, 0fBE52412D;
	mov.f32 	%f153, 0f3F91E009;
	fma.rn.f32 	%f154, %f153, %f142, %f152;
	mov.f32 	%f155, 0f3D854ED1;
	fma.rn.f32 	%f156, %f154, %f142, %f155;
	mov.f32 	%f157, 0fBDFFFFFF;
	fma.rn.f32 	%f158, %f156, %f142, %f157;
	fma.rn.f32 	%f9, %f158, %f140, %f7;
	rsqrt.approx.f32 	%f159, %f7;
	mul.f32 	%f160, %f159, 0f3F4C422A;
	mul.f32 	%f10, %f151, %f160;
	mul.f32 	%f161, %f9, 0f3F22F983;
	cvt.rni.s32.f32	%r344, %f161;
	cvt.rn.f32.s32	%f162, %r344;
	neg.f32 	%f163, %f162;
	mov.f32 	%f164, 0f3FC90FDA;
	fma.rn.f32 	%f165, %f163, %f164, %f9;
	mov.f32 	%f166, 0f33A22168;
	fma.rn.f32 	%f167, %f163, %f166, %f165;
	mov.f32 	%f168, 0f27C234C5;
	fma.rn.f32 	%f322, %f163, %f168, %f167;
	abs.f32 	%f169, %f9;
	setp.leu.f32	%p10, %f169, 0f47CE4780;
	@%p10 bra 	BB70_18;

	mov.b32 	 %r3, %f9;
	shl.b32 	%r152, %r3, 8;
	or.b32  	%r4, %r152, -2147483648;
	mov.u32 	%r336, 0;
	mov.u64 	%rd53, __cudart_i2opi_f;
	mov.u32 	%r335, -6;

BB70_10:
	.pragma "nounroll";
	ld.const.u32 	%r155, [%rd53];
	// inline asm
	{
	mad.lo.cc.u32   %r153, %r155, %r4, %r336;
	madc.hi.u32     %r336, %r155, %r4,  0;
	}
	// inline asm
	st.local.u32 	[%rd57], %r153;
	add.s64 	%rd57, %rd57, 4;
	add.s64 	%rd53, %rd53, 4;
	add.s32 	%r335, %r335, 1;
	setp.ne.s32	%p11, %r335, 0;
	@%p11 bra 	BB70_10;

	and.b32  	%r9, %r3, -2147483648;
	bfe.u32 	%r158, %r3, 23, 8;
	add.s32 	%r159, %r158, -128;
	shr.u32 	%r160, %r159, 5;
	cvta.to.local.u64 	%rd34, %rd27;
	st.local.u32 	[%rd34+24], %r336;
	bfe.u32 	%r10, %r3, 23, 5;
	mov.u32 	%r161, 6;
	sub.s32 	%r162, %r161, %r160;
	mul.wide.s32 	%rd35, %r162, 4;
	add.s64 	%rd6, %rd34, %rd35;
	ld.local.u32 	%r337, [%rd6];
	ld.local.u32 	%r338, [%rd6+-4];
	setp.eq.s32	%p12, %r10, 0;
	@%p12 bra 	BB70_13;

	mov.u32 	%r163, 32;
	sub.s32 	%r164, %r163, %r10;
	shr.u32 	%r165, %r338, %r164;
	shl.b32 	%r166, %r337, %r10;
	add.s32 	%r337, %r165, %r166;
	ld.local.u32 	%r167, [%rd6+-8];
	shr.u32 	%r168, %r167, %r164;
	shl.b32 	%r169, %r338, %r10;
	add.s32 	%r338, %r168, %r169;

BB70_13:
	shr.u32 	%r170, %r338, 30;
	shl.b32 	%r171, %r337, 2;
	add.s32 	%r339, %r170, %r171;
	shl.b32 	%r18, %r338, 2;
	shr.u32 	%r172, %r339, 31;
	shr.u32 	%r173, %r337, 30;
	add.s32 	%r19, %r172, %r173;
	setp.eq.s32	%p13, %r172, 0;
	mov.u32 	%r340, %r9;
	mov.u32 	%r341, %r18;
	@%p13 bra 	BB70_15;

	not.b32 	%r174, %r339;
	neg.s32 	%r20, %r18;
	setp.eq.s32	%p14, %r18, 0;
	selp.u32	%r175, 1, 0, %p14;
	add.s32 	%r339, %r175, %r174;
	xor.b32  	%r22, %r9, -2147483648;
	mov.u32 	%r340, %r22;
	mov.u32 	%r341, %r20;

BB70_15:
	mov.u32 	%r24, %r340;
	neg.s32 	%r176, %r19;
	setp.eq.s32	%p15, %r9, 0;
	selp.b32	%r344, %r19, %r176, %p15;
	clz.b32 	%r343, %r339;
	setp.eq.s32	%p16, %r343, 0;
	shl.b32 	%r177, %r339, %r343;
	mov.u32 	%r178, 32;
	sub.s32 	%r179, %r178, %r343;
	shr.u32 	%r180, %r341, %r179;
	add.s32 	%r181, %r180, %r177;
	selp.b32	%r28, %r339, %r181, %p16;
	mov.u32 	%r182, -921707870;
	mul.hi.u32 	%r342, %r28, %r182;
	setp.lt.s32	%p17, %r342, 1;
	@%p17 bra 	BB70_17;

	mul.lo.s32 	%r183, %r28, -921707870;
	shr.u32 	%r184, %r183, 31;
	shl.b32 	%r185, %r342, 1;
	add.s32 	%r342, %r184, %r185;
	add.s32 	%r343, %r343, 1;

BB70_17:
	mov.u32 	%r186, 126;
	sub.s32 	%r187, %r186, %r343;
	shl.b32 	%r188, %r187, 23;
	add.s32 	%r189, %r342, 1;
	shr.u32 	%r190, %r189, 7;
	add.s32 	%r191, %r190, 1;
	shr.u32 	%r192, %r191, 1;
	add.s32 	%r193, %r192, %r188;
	or.b32  	%r194, %r193, %r24;
	mov.b32 	 %f322, %r194;

BB70_18:
	and.b32  	%r195, %r344, 3;
	cvt.rn.f32.s32	%f170, %r195;
	add.f32 	%f171, %f322, 0fBF490FDB;
	fma.rn.f32 	%f323, %f170, 0f3FC90FDB, %f171;
	abs.f32 	%f172, %f323;
	setp.neu.f32	%p18, %f172, 0f7F800000;
	@%p18 bra 	BB70_20;

	mov.f32 	%f173, 0f00000000;
	mul.rn.f32 	%f323, %f323, %f173;

BB70_20:
	mul.f32 	%f174, %f323, 0f3F22F983;
	cvt.rni.s32.f32	%r354, %f174;
	cvt.rn.f32.s32	%f175, %r354;
	neg.f32 	%f176, %f175;
	fma.rn.f32 	%f178, %f176, %f164, %f323;
	fma.rn.f32 	%f180, %f176, %f166, %f178;
	fma.rn.f32 	%f324, %f176, %f168, %f180;
	abs.f32 	%f182, %f323;
	setp.leu.f32	%p19, %f182, 0f47CE4780;
	@%p19 bra 	BB70_30;

	mov.b32 	 %r36, %f323;
	shr.u32 	%r37, %r36, 23;
	bfe.u32 	%r198, %r36, 23, 8;
	add.s32 	%r199, %r198, -128;
	shl.b32 	%r200, %r36, 8;
	or.b32  	%r38, %r200, -2147483648;
	shr.u32 	%r39, %r199, 5;
	cvta.to.local.u64 	%rd55, %rd27;
	mov.u32 	%r346, 0;
	mov.u64 	%rd54, __cudart_i2opi_f;
	mov.u32 	%r345, -6;

BB70_22:
	.pragma "nounroll";
	ld.const.u32 	%r203, [%rd54];
	// inline asm
	{
	mad.lo.cc.u32   %r201, %r203, %r38, %r346;
	madc.hi.u32     %r346, %r203, %r38,  0;
	}
	// inline asm
	st.local.u32 	[%rd55], %r201;
	add.s64 	%rd55, %rd55, 4;
	add.s64 	%rd54, %rd54, 4;
	add.s32 	%r345, %r345, 1;
	setp.ne.s32	%p20, %r345, 0;
	@%p20 bra 	BB70_22;

	and.b32  	%r44, %r36, -2147483648;
	cvta.to.local.u64 	%rd39, %rd27;
	st.local.u32 	[%rd39+24], %r346;
	mov.u32 	%r206, 6;
	sub.s32 	%r207, %r206, %r39;
	mul.wide.s32 	%rd40, %r207, 4;
	add.s64 	%rd12, %rd39, %rd40;
	ld.local.u32 	%r347, [%rd12];
	ld.local.u32 	%r348, [%rd12+-4];
	and.b32  	%r47, %r37, 31;
	setp.eq.s32	%p21, %r47, 0;
	@%p21 bra 	BB70_25;

	mov.u32 	%r208, 32;
	sub.s32 	%r209, %r208, %r47;
	shr.u32 	%r210, %r348, %r209;
	shl.b32 	%r211, %r347, %r47;
	add.s32 	%r347, %r210, %r211;
	ld.local.u32 	%r212, [%rd12+-8];
	shr.u32 	%r213, %r212, %r209;
	shl.b32 	%r214, %r348, %r47;
	add.s32 	%r348, %r213, %r214;

BB70_25:
	shr.u32 	%r215, %r348, 30;
	shl.b32 	%r216, %r347, 2;
	add.s32 	%r349, %r215, %r216;
	shl.b32 	%r53, %r348, 2;
	shr.u32 	%r217, %r349, 31;
	shr.u32 	%r218, %r347, 30;
	add.s32 	%r54, %r217, %r218;
	setp.eq.s32	%p22, %r217, 0;
	mov.u32 	%r350, %r44;
	mov.u32 	%r351, %r53;
	@%p22 bra 	BB70_27;

	not.b32 	%r219, %r349;
	neg.s32 	%r55, %r53;
	setp.eq.s32	%p23, %r53, 0;
	selp.u32	%r220, 1, 0, %p23;
	add.s32 	%r349, %r220, %r219;
	xor.b32  	%r57, %r44, -2147483648;
	mov.u32 	%r350, %r57;
	mov.u32 	%r351, %r55;

BB70_27:
	mov.u32 	%r59, %r350;
	neg.s32 	%r221, %r54;
	setp.eq.s32	%p24, %r44, 0;
	selp.b32	%r354, %r54, %r221, %p24;
	clz.b32 	%r353, %r349;
	setp.eq.s32	%p25, %r353, 0;
	shl.b32 	%r222, %r349, %r353;
	mov.u32 	%r223, 32;
	sub.s32 	%r224, %r223, %r353;
	shr.u32 	%r225, %r351, %r224;
	add.s32 	%r226, %r225, %r222;
	selp.b32	%r63, %r349, %r226, %p25;
	mov.u32 	%r227, -921707870;
	mul.hi.u32 	%r352, %r63, %r227;
	setp.lt.s32	%p26, %r352, 1;
	@%p26 bra 	BB70_29;

	mul.lo.s32 	%r228, %r63, -921707870;
	shr.u32 	%r229, %r228, 31;
	shl.b32 	%r230, %r352, 1;
	add.s32 	%r352, %r229, %r230;
	add.s32 	%r353, %r353, 1;

BB70_29:
	mov.u32 	%r231, 126;
	sub.s32 	%r232, %r231, %r353;
	shl.b32 	%r233, %r232, 23;
	add.s32 	%r234, %r352, 1;
	shr.u32 	%r235, %r234, 7;
	add.s32 	%r236, %r235, 1;
	shr.u32 	%r237, %r236, 1;
	add.s32 	%r238, %r237, %r233;
	or.b32  	%r239, %r238, %r59;
	mov.b32 	 %f324, %r239;

BB70_30:
	mul.rn.f32 	%f20, %f324, %f324;
	add.s32 	%r70, %r354, 1;
	and.b32  	%r71, %r70, 1;
	setp.eq.s32	%p27, %r71, 0;
	@%p27 bra 	BB70_32;

	mov.f32 	%f183, 0fBAB6061A;
	mov.f32 	%f184, 0f37CCF5CE;
	fma.rn.f32 	%f325, %f184, %f20, %f183;
	bra.uni 	BB70_33;

BB70_6:
	add.f32 	%f103, %f7, 0fC019E8A9;
	add.f32 	%f104, %f103, 0fB3E971B3;
	mov.f32 	%f105, 0fA9ACA9B3;
	mov.f32 	%f106, 0fA6B3B8E7;
	fma.rn.f32 	%f107, %f106, %f104, %f105;
	mov.f32 	%f108, 0f2C3F0E18;
	fma.rn.f32 	%f109, %f107, %f104, %f108;
	mov.f32 	%f110, 0fACD41781;
	fma.rn.f32 	%f111, %f109, %f104, %f110;
	mov.f32 	%f112, 0fAFE90F38;
	fma.rn.f32 	%f113, %f111, %f104, %f112;
	mov.f32 	%f114, 0f3020305B;
	fma.rn.f32 	%f115, %f113, %f104, %f114;
	mov.f32 	%f116, 0f33797143;
	fma.rn.f32 	%f117, %f115, %f104, %f116;
	mov.f32 	%f118, 0f30F76F85;
	fma.rn.f32 	%f119, %f117, %f104, %f118;
	mov.f32 	%f120, 0fB6B6DFC6;
	fma.rn.f32 	%f121, %f119, %f104, %f120;
	mov.f32 	%f122, 0fB6F665C9;
	fma.rn.f32 	%f123, %f121, %f104, %f122;
	mov.f32 	%f124, 0f399E2DEB;
	fma.rn.f32 	%f125, %f123, %f104, %f124;
	mov.f32 	%f126, 0f3A4AE334;
	fma.rn.f32 	%f127, %f125, %f104, %f126;
	mov.f32 	%f128, 0fBBEEAA1B;
	fma.rn.f32 	%f129, %f127, %f104, %f128;
	mov.f32 	%f130, 0fBCDA7747;
	fma.rn.f32 	%f131, %f129, %f104, %f130;
	mul.f32 	%f132, %f104, %f131;
	add.f32 	%f133, %f7, 0fC0B0A47B;
	add.f32 	%f134, %f133, 0f339A7A37;
	mul.f32 	%f135, %f134, %f132;
	add.f32 	%f136, %f7, 0fC10A75AB;
	add.f32 	%f137, %f136, 0fB4CCCDED;
	mul.f32 	%f328, %f137, %f135;
	bra.uni 	BB70_41;

BB70_45:
	add.f32 	%f224, %f2, 0fC07D4A9A;
	add.f32 	%f225, %f224, 0fB3D9856A;
	mov.f32 	%f226, 0fB45E2607;
	mov.f32 	%f227, 0fB449DD3F;
	fma.rn.f32 	%f228, %f227, %f225, %f226;
	mov.f32 	%f229, 0fB6857064;
	fma.rn.f32 	%f230, %f228, %f225, %f229;
	mov.f32 	%f231, 0f38554610;
	fma.rn.f32 	%f232, %f230, %f225, %f231;
	mov.f32 	%f233, 0f394ACED7;
	fma.rn.f32 	%f234, %f232, %f225, %f233;
	mov.f32 	%f235, 0fBB0F1A0C;
	fma.rn.f32 	%f236, %f234, %f225, %f235;
	mov.f32 	%f237, 0fBBE07F2E;
	fma.rn.f32 	%f238, %f236, %f225, %f237;
	mov.f32 	%f239, 0f3D6FB6B5;
	fma.rn.f32 	%f240, %f238, %f225, %f239;
	mov.f32 	%f241, 0f3D504DF1;
	fma.rn.f32 	%f242, %f240, %f225, %f241;
	mov.f32 	%f243, 0fBECE1A13;
	fma.rn.f32 	%f244, %f242, %f225, %f243;
	mul.f32 	%f335, %f225, %f244;
	bra.uni 	BB70_82;

BB70_47:
	add.f32 	%f245, %f2, 0fC0E2C0EE;
	add.f32 	%f246, %f245, 0fB39CE420;
	mov.f32 	%f247, 0f3629DA6C;
	mov.f32 	%f248, 0f3510CEBE;
	fma.rn.f32 	%f249, %f248, %f246, %f247;
	mov.f32 	%f250, 0fB84054C0;
	fma.rn.f32 	%f251, %f249, %f246, %f250;
	mov.f32 	%f252, 0fB91318AB;
	fma.rn.f32 	%f253, %f251, %f246, %f252;
	mov.f32 	%f254, 0f3B0E9921;
	fma.rn.f32 	%f255, %f253, %f246, %f254;
	mov.f32 	%f256, 0f3B5974D5;
	fma.rn.f32 	%f257, %f255, %f246, %f256;
	mov.f32 	%f258, 0fBD44B4D7;
	fma.rn.f32 	%f259, %f257, %f246, %f258;
	mov.f32 	%f260, 0fBCAD7799;
	fma.rn.f32 	%f261, %f259, %f246, %f260;
	mov.f32 	%f262, 0f3E99A665;
	fma.rn.f32 	%f263, %f261, %f246, %f262;
	mul.f32 	%f335, %f246, %f263;
	bra.uni 	BB70_82;

BB70_32:
	mov.f32 	%f185, 0f3C08839E;
	mov.f32 	%f186, 0fB94CA1F9;
	fma.rn.f32 	%f325, %f186, %f20, %f185;

BB70_33:
	@%p27 bra 	BB70_35;

	mov.f32 	%f187, 0f3D2AAAA5;
	fma.rn.f32 	%f188, %f325, %f20, %f187;
	mov.f32 	%f189, 0fBF000000;
	fma.rn.f32 	%f326, %f188, %f20, %f189;
	bra.uni 	BB70_36;

BB70_35:
	mov.f32 	%f190, 0fBE2AAAA3;
	fma.rn.f32 	%f191, %f325, %f20, %f190;
	mov.f32 	%f192, 0f00000000;
	fma.rn.f32 	%f326, %f191, %f20, %f192;

BB70_36:
	fma.rn.f32 	%f327, %f326, %f324, %f324;
	@%p27 bra 	BB70_38;

	fma.rn.f32 	%f327, %f326, %f20, %f150;

BB70_38:
	and.b32  	%r240, %r70, 2;
	setp.eq.s32	%p30, %r240, 0;
	@%p30 bra 	BB70_40;

	mov.f32 	%f194, 0f00000000;
	mov.f32 	%f195, 0fBF800000;
	fma.rn.f32 	%f327, %f327, %f195, %f194;

BB70_40:
	mul.f32 	%f328, %f10, %f327;

BB70_41:
	mul.f32 	%f196, %f321, 0f3F22F983;
	fma.rn.f32 	%f335, %f196, %f328, %f3;

BB70_82:
	setp.geu.f32	%p56, %f1, 0f00000000;
	@%p56 bra 	BB70_84;

	mov.f32 	%f320, 0fBF800000;
	sqrt.rn.f32 	%f335, %f320;

BB70_84:
	cvta.to.global.u64 	%rd50, %rd24;
	add.s64 	%rd52, %rd50, %rd30;
	st.global.f32 	[%rd52], %f335;

BB70_85:
	ret;

BB70_73:
	mov.f32 	%f309, 0f3C08839E;
	mov.f32 	%f310, 0fB94CA1F9;
	fma.rn.f32 	%f332, %f310, %f49, %f309;

BB70_74:
	@%p52 bra 	BB70_76;

	mov.f32 	%f311, 0f3D2AAAA5;
	fma.rn.f32 	%f312, %f332, %f49, %f311;
	mov.f32 	%f313, 0fBF000000;
	fma.rn.f32 	%f333, %f312, %f49, %f313;
	bra.uni 	BB70_77;

BB70_76:
	mov.f32 	%f314, 0fBE2AAAA3;
	fma.rn.f32 	%f315, %f332, %f49, %f314;
	mov.f32 	%f316, 0f00000000;
	fma.rn.f32 	%f333, %f315, %f49, %f316;

BB70_77:
	fma.rn.f32 	%f334, %f333, %f331, %f331;
	@%p52 bra 	BB70_79;

	fma.rn.f32 	%f334, %f333, %f49, %f274;

BB70_79:
	and.b32  	%r330, %r141, 2;
	setp.eq.s32	%p55, %r330, 0;
	@%p55 bra 	BB70_81;

	mov.f32 	%f318, 0f00000000;
	mov.f32 	%f319, 0fBF800000;
	fma.rn.f32 	%f334, %f334, %f319, %f318;

BB70_81:
	mul.f32 	%f335, %f39, %f334;
	bra.uni 	BB70_82;
}

	// .globl	vec_y1f
.visible .entry vec_y1f(
	.param .u64 vec_y1f_param_0,
	.param .u64 vec_y1f_param_1,
	.param .u64 vec_y1f_param_2
)
{
	.local .align 4 .b8 	__local_depot71[28];
	.reg .b64 	%SP;
	.reg .b64 	%SPL;
	.reg .pred 	%p<59>;
	.reg .f32 	%f<332>;
	.reg .b32 	%r<381>;
	.reg .b64 	%rd<59>;


	mov.u64 	%rd58, __local_depot71;
	cvta.local.u64 	%SP, %rd58;
	ld.param.u64 	%rd26, [vec_y1f_param_0];
	ld.param.u64 	%rd24, [vec_y1f_param_1];
	ld.param.u64 	%rd25, [vec_y1f_param_2];
	add.u64 	%rd27, %SP, 0;
	cvta.to.local.u64 	%rd57, %rd27;
	mov.u32 	%r144, %ntid.x;
	mov.u32 	%r145, %ctaid.x;
	mov.u32 	%r146, %tid.x;
	mad.lo.s32 	%r1, %r144, %r145, %r146;
	cvt.s64.s32	%rd28, %r1;
	setp.ge.u64	%p1, %rd28, %rd26;
	@%p1 bra 	BB71_87;

	cvta.to.global.u64 	%rd29, %rd25;
	mul.wide.s32 	%rd30, %r1, 4;
	add.s64 	%rd31, %rd29, %rd30;
	ld.global.f32 	%f1, [%rd31];
	abs.f32 	%f2, %f1;
	setp.lt.f32	%p2, %f2, 0f00800000;
	@%p2 bra 	BB71_83;
	bra.uni 	BB71_2;

BB71_83:
	mov.f32 	%f315, 0fBF22F983;
	div.rn.f32 	%f331, %f315, %f2;
	bra.uni 	BB71_84;

BB71_2:
	setp.gtu.f32	%p3, %f2, 0f3FD96AC4;
	@%p3 bra 	BB71_43;
	bra.uni 	BB71_3;

BB71_43:
	setp.gtu.f32	%p33, %f2, 0f40740EEE;
	@%p33 bra 	BB71_45;
	bra.uni 	BB71_44;

BB71_45:
	setp.gtu.f32	%p34, %f2, 0f40E06937;
	@%p34 bra 	BB71_47;
	bra.uni 	BB71_46;

BB71_47:
	setp.gtu.f32	%p35, %f2, 0f4122C2E3;
	@%p35 bra 	BB71_49;
	bra.uni 	BB71_48;

BB71_49:
	abs.f32 	%f260, %f2;
	mov.f32 	%f331, 0f00000000;
	setp.eq.f32	%p36, %f260, 0f7F800000;
	@%p36 bra 	BB71_84;

	add.s64 	%rd13, %rd57, 24;
	// inline asm
	rcp.approx.ftz.f32 %f261,%f2;
	// inline asm
	mul.f32 	%f263, %f261, %f261;
	mov.f32 	%f264, 0fBE44AB90;
	mov.f32 	%f265, 0f3F267F60;
	fma.rn.f32 	%f266, %f265, %f263, %f264;
	mov.f32 	%f267, 0f3E3FFEBF;
	fma.rn.f32 	%f268, %f266, %f263, %f267;
	mov.f32 	%f269, 0f3F800000;
	fma.rn.f32 	%f270, %f268, %f263, %f269;
	mov.f32 	%f271, 0f3EBB73AB;
	mov.f32 	%f272, 0fBFE4E1AB;
	fma.rn.f32 	%f273, %f272, %f263, %f271;
	mov.f32 	%f274, 0fBE27FB6E;
	fma.rn.f32 	%f275, %f273, %f263, %f274;
	mov.f32 	%f276, 0f3EBFFFFF;
	fma.rn.f32 	%f277, %f275, %f263, %f276;
	fma.rn.f32 	%f38, %f277, %f261, %f2;
	rsqrt.approx.f32 	%f278, %f2;
	mul.f32 	%f279, %f278, 0f3F4C422A;
	mul.f32 	%f39, %f270, %f279;
	mul.f32 	%f280, %f38, 0f3F22F983;
	cvt.rni.s32.f32	%r370, %f280;
	cvt.rn.f32.s32	%f281, %r370;
	neg.f32 	%f282, %f281;
	mov.f32 	%f283, 0f3FC90FDA;
	fma.rn.f32 	%f284, %f282, %f283, %f38;
	mov.f32 	%f285, 0f33A22168;
	fma.rn.f32 	%f286, %f282, %f285, %f284;
	mov.f32 	%f287, 0f27C234C5;
	fma.rn.f32 	%f325, %f282, %f287, %f286;
	abs.f32 	%f288, %f38;
	setp.leu.f32	%p37, %f288, 0f47CE4780;
	@%p37 bra 	BB71_60;

	mov.b32 	 %r73, %f38;
	shr.u32 	%r74, %r73, 23;
	bfe.u32 	%r249, %r73, 23, 8;
	add.s32 	%r250, %r249, -128;
	shl.b32 	%r251, %r73, 8;
	or.b32  	%r75, %r251, -2147483648;
	shr.u32 	%r76, %r250, 5;
	mov.u32 	%r362, 0;
	mov.u64 	%rd51, __cudart_i2opi_f;
	mov.u32 	%r361, -6;
	mov.u64 	%rd56, %rd57;

BB71_52:
	.pragma "nounroll";
	ld.const.u32 	%r254, [%rd51];
	// inline asm
	{
	mad.lo.cc.u32   %r252, %r254, %r75, %r362;
	madc.hi.u32     %r362, %r254, %r75,  0;
	}
	// inline asm
	st.local.u32 	[%rd56], %r252;
	add.s64 	%rd56, %rd56, 4;
	add.s64 	%rd51, %rd51, 4;
	add.s32 	%r361, %r361, 1;
	setp.ne.s32	%p38, %r361, 0;
	@%p38 bra 	BB71_52;

	and.b32  	%r81, %r73, -2147483648;
	st.local.u32 	[%rd13], %r362;
	mov.u32 	%r257, 6;
	sub.s32 	%r258, %r257, %r76;
	mul.wide.s32 	%rd42, %r258, 4;
	add.s64 	%rd18, %rd57, %rd42;
	ld.local.u32 	%r363, [%rd18];
	ld.local.u32 	%r364, [%rd18+-4];
	and.b32  	%r84, %r74, 31;
	setp.eq.s32	%p39, %r84, 0;
	@%p39 bra 	BB71_55;

	mov.u32 	%r259, 32;
	sub.s32 	%r260, %r259, %r84;
	shr.u32 	%r261, %r364, %r260;
	shl.b32 	%r262, %r363, %r84;
	add.s32 	%r363, %r261, %r262;
	ld.local.u32 	%r263, [%rd18+-8];
	shr.u32 	%r264, %r263, %r260;
	shl.b32 	%r265, %r364, %r84;
	add.s32 	%r364, %r264, %r265;

BB71_55:
	shr.u32 	%r266, %r364, 30;
	shl.b32 	%r267, %r363, 2;
	add.s32 	%r365, %r266, %r267;
	shl.b32 	%r90, %r364, 2;
	shr.u32 	%r268, %r365, 31;
	shr.u32 	%r269, %r363, 30;
	add.s32 	%r91, %r268, %r269;
	setp.eq.s32	%p40, %r268, 0;
	mov.u32 	%r366, %r81;
	mov.u32 	%r367, %r90;
	@%p40 bra 	BB71_57;

	not.b32 	%r270, %r365;
	neg.s32 	%r92, %r90;
	setp.eq.s32	%p41, %r90, 0;
	selp.u32	%r271, 1, 0, %p41;
	add.s32 	%r365, %r271, %r270;
	xor.b32  	%r94, %r81, -2147483648;
	mov.u32 	%r366, %r94;
	mov.u32 	%r367, %r92;

BB71_57:
	mov.u32 	%r96, %r366;
	neg.s32 	%r272, %r91;
	setp.eq.s32	%p42, %r81, 0;
	selp.b32	%r370, %r91, %r272, %p42;
	clz.b32 	%r369, %r365;
	setp.eq.s32	%p43, %r369, 0;
	shl.b32 	%r273, %r365, %r369;
	mov.u32 	%r274, 32;
	sub.s32 	%r275, %r274, %r369;
	shr.u32 	%r276, %r367, %r275;
	add.s32 	%r277, %r276, %r273;
	selp.b32	%r100, %r365, %r277, %p43;
	mov.u32 	%r278, -921707870;
	mul.hi.u32 	%r368, %r100, %r278;
	setp.lt.s32	%p44, %r368, 1;
	@%p44 bra 	BB71_59;

	mul.lo.s32 	%r279, %r100, -921707870;
	shr.u32 	%r280, %r279, 31;
	shl.b32 	%r281, %r368, 1;
	add.s32 	%r368, %r280, %r281;
	add.s32 	%r369, %r369, 1;

BB71_59:
	mov.u32 	%r282, 126;
	sub.s32 	%r283, %r282, %r369;
	shl.b32 	%r284, %r283, 23;
	add.s32 	%r285, %r368, 1;
	shr.u32 	%r286, %r285, 7;
	add.s32 	%r287, %r286, 1;
	shr.u32 	%r288, %r287, 1;
	add.s32 	%r289, %r288, %r284;
	or.b32  	%r290, %r289, %r96;
	mov.b32 	 %f325, %r290;

BB71_60:
	and.b32  	%r291, %r370, 3;
	cvt.rn.f32.s32	%f289, %r291;
	add.f32 	%f290, %f325, 0fC07B53D1;
	fma.rn.f32 	%f326, %f289, 0f3FC90FDB, %f290;
	abs.f32 	%f291, %f326;
	setp.neu.f32	%p45, %f291, 0f7F800000;
	@%p45 bra 	BB71_62;

	mov.f32 	%f292, 0f00000000;
	mul.rn.f32 	%f326, %f326, %f292;

BB71_62:
	mul.f32 	%f293, %f326, 0f3F22F983;
	cvt.rni.s32.f32	%r380, %f293;
	cvt.rn.f32.s32	%f294, %r380;
	neg.f32 	%f295, %f294;
	fma.rn.f32 	%f297, %f295, %f283, %f326;
	fma.rn.f32 	%f299, %f295, %f285, %f297;
	fma.rn.f32 	%f327, %f295, %f287, %f299;
	abs.f32 	%f301, %f326;
	setp.leu.f32	%p46, %f301, 0f47CE4780;
	@%p46 bra 	BB71_72;

	mov.b32 	 %r108, %f326;
	shr.u32 	%r109, %r108, 23;
	bfe.u32 	%r294, %r108, 23, 8;
	add.s32 	%r295, %r294, -128;
	shl.b32 	%r296, %r108, 8;
	or.b32  	%r110, %r296, -2147483648;
	shr.u32 	%r111, %r295, 5;
	mov.u32 	%r372, 0;
	mov.u64 	%rd52, __cudart_i2opi_f;
	mov.u32 	%r371, -6;
	mov.u64 	%rd55, %rd57;

BB71_64:
	.pragma "nounroll";
	ld.const.u32 	%r299, [%rd52];
	// inline asm
	{
	mad.lo.cc.u32   %r297, %r299, %r110, %r372;
	madc.hi.u32     %r372, %r299, %r110,  0;
	}
	// inline asm
	st.local.u32 	[%rd55], %r297;
	add.s64 	%rd55, %rd55, 4;
	add.s64 	%rd52, %rd52, 4;
	add.s32 	%r371, %r371, 1;
	setp.ne.s32	%p47, %r371, 0;
	@%p47 bra 	BB71_64;

	and.b32  	%r116, %r108, -2147483648;
	st.local.u32 	[%rd13], %r372;
	mov.u32 	%r302, 6;
	sub.s32 	%r303, %r302, %r111;
	mul.wide.s32 	%rd44, %r303, 4;
	add.s64 	%rd23, %rd57, %rd44;
	ld.local.u32 	%r373, [%rd23];
	ld.local.u32 	%r374, [%rd23+-4];
	and.b32  	%r119, %r109, 31;
	setp.eq.s32	%p48, %r119, 0;
	@%p48 bra 	BB71_67;

	mov.u32 	%r304, 32;
	sub.s32 	%r305, %r304, %r119;
	shr.u32 	%r306, %r374, %r305;
	shl.b32 	%r307, %r373, %r119;
	add.s32 	%r373, %r306, %r307;
	ld.local.u32 	%r308, [%rd23+-8];
	shr.u32 	%r309, %r308, %r305;
	shl.b32 	%r310, %r374, %r119;
	add.s32 	%r374, %r309, %r310;

BB71_67:
	shr.u32 	%r311, %r374, 30;
	shl.b32 	%r312, %r373, 2;
	add.s32 	%r375, %r311, %r312;
	shl.b32 	%r125, %r374, 2;
	shr.u32 	%r313, %r375, 31;
	shr.u32 	%r314, %r373, 30;
	add.s32 	%r126, %r313, %r314;
	setp.eq.s32	%p49, %r313, 0;
	mov.u32 	%r376, %r116;
	mov.u32 	%r377, %r125;
	@%p49 bra 	BB71_69;

	not.b32 	%r315, %r375;
	neg.s32 	%r127, %r125;
	setp.eq.s32	%p50, %r125, 0;
	selp.u32	%r316, 1, 0, %p50;
	add.s32 	%r375, %r316, %r315;
	xor.b32  	%r129, %r116, -2147483648;
	mov.u32 	%r376, %r129;
	mov.u32 	%r377, %r127;

BB71_69:
	mov.u32 	%r131, %r376;
	neg.s32 	%r317, %r126;
	setp.eq.s32	%p51, %r116, 0;
	selp.b32	%r380, %r126, %r317, %p51;
	clz.b32 	%r379, %r375;
	setp.eq.s32	%p52, %r379, 0;
	shl.b32 	%r318, %r375, %r379;
	mov.u32 	%r319, 32;
	sub.s32 	%r320, %r319, %r379;
	shr.u32 	%r321, %r377, %r320;
	add.s32 	%r322, %r321, %r318;
	selp.b32	%r135, %r375, %r322, %p52;
	mov.u32 	%r323, -921707870;
	mul.hi.u32 	%r378, %r135, %r323;
	setp.lt.s32	%p53, %r378, 1;
	@%p53 bra 	BB71_71;

	mul.lo.s32 	%r324, %r135, -921707870;
	shr.u32 	%r325, %r324, 31;
	shl.b32 	%r326, %r378, 1;
	add.s32 	%r378, %r325, %r326;
	add.s32 	%r379, %r379, 1;

BB71_71:
	mov.u32 	%r327, 126;
	sub.s32 	%r328, %r327, %r379;
	shl.b32 	%r329, %r328, 23;
	add.s32 	%r330, %r378, 1;
	shr.u32 	%r331, %r330, 7;
	add.s32 	%r332, %r331, 1;
	shr.u32 	%r333, %r332, 1;
	add.s32 	%r334, %r333, %r329;
	or.b32  	%r335, %r334, %r131;
	mov.b32 	 %f327, %r335;

BB71_72:
	mul.rn.f32 	%f49, %f327, %f327;
	add.s32 	%r142, %r380, 1;
	and.b32  	%r143, %r142, 1;
	setp.eq.s32	%p54, %r143, 0;
	@%p54 bra 	BB71_74;

	mov.f32 	%f302, 0fBAB6061A;
	mov.f32 	%f303, 0f37CCF5CE;
	fma.rn.f32 	%f328, %f303, %f49, %f302;
	bra.uni 	BB71_75;

BB71_3:
	mul.f32 	%f66, %f2, %f2;
	mov.f32 	%f67, 0fB58527DA;
	mov.f32 	%f68, 0f321462CC;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0f38963E95;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mov.f32 	%f72, 0fBB41ADCB;
	fma.rn.f32 	%f73, %f71, %f66, %f72;
	mov.f32 	%f74, 0f3D5E9CBB;
	fma.rn.f32 	%f75, %f73, %f66, %f74;
	mov.f32 	%f76, 0fBE48C331;
	fma.rn.f32 	%f3, %f75, %f66, %f76;
	setp.lt.f32	%p4, %f2, 0f7F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	and.pred  	%p6, %p5, %p4;
	@%p6 bra 	BB71_5;
	bra.uni 	BB71_4;

BB71_5:
	mov.b32 	 %r147, %f2;
	and.b32  	%r148, %r147, 8388607;
	or.b32  	%r149, %r148, 1065353216;
	mov.b32 	 %f79, %r149;
	shr.u32 	%r150, %r147, 23;
	cvt.rn.f32.u32	%f80, %r150;
	add.f32 	%f81, %f80, 0fC2FE0000;
	setp.gt.f32	%p7, %f79, 0f3FAE147B;
	mul.f32 	%f82, %f79, 0f3F000000;
	add.f32 	%f83, %f81, 0f3F800000;
	selp.f32	%f84, %f82, %f79, %p7;
	selp.f32	%f85, %f83, %f81, %p7;
	add.f32 	%f78, %f84, 0f3F800000;
	add.f32 	%f86, %f84, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f77,%f78;
	// inline asm
	mul.f32 	%f87, %f86, %f86;
	neg.f32 	%f88, %f87;
	mul.rn.f32 	%f89, %f77, %f88;
	add.rn.f32 	%f90, %f86, %f89;
	mul.f32 	%f91, %f90, %f90;
	mov.f32 	%f92, 0f3C4C6A36;
	mov.f32 	%f93, 0f3B1E94E6;
	fma.rn.f32 	%f94, %f93, %f91, %f92;
	mov.f32 	%f95, 0f3DAAAB1A;
	fma.rn.f32 	%f96, %f94, %f91, %f95;
	mul.f32 	%f97, %f91, %f96;
	fma.rn.f32 	%f98, %f97, %f90, %f89;
	add.f32 	%f99, %f86, %f98;
	mov.f32 	%f100, 0f3F317218;
	fma.rn.f32 	%f317, %f85, %f100, %f99;
	bra.uni 	BB71_6;

BB71_44:
	add.f32 	%f198, %f2, 0fC00C9DF7;
	add.f32 	%f199, %f198, 0f33B200DC;
	mov.f32 	%f200, 0f39064A88;
	mov.f32 	%f201, 0fB789E29D;
	fma.rn.f32 	%f202, %f201, %f199, %f200;
	mov.f32 	%f203, 0fB9F0AB0D;
	fma.rn.f32 	%f204, %f202, %f199, %f203;
	mov.f32 	%f205, 0f3A8F6102;
	fma.rn.f32 	%f206, %f204, %f199, %f205;
	mov.f32 	%f207, 0fBB2C7045;
	fma.rn.f32 	%f208, %f206, %f199, %f207;
	mov.f32 	%f209, 0f3BF35DF7;
	fma.rn.f32 	%f210, %f208, %f199, %f209;
	mov.f32 	%f211, 0fBB9D097C;
	fma.rn.f32 	%f212, %f210, %f199, %f211;
	mov.f32 	%f213, 0fBD06968A;
	fma.rn.f32 	%f214, %f212, %f199, %f213;
	mov.f32 	%f215, 0fBDF2B7DF;
	fma.rn.f32 	%f216, %f214, %f199, %f215;
	mov.f32 	%f217, 0f3F055242;
	fma.rn.f32 	%f218, %f216, %f199, %f217;
	mul.f32 	%f331, %f199, %f218;
	bra.uni 	BB71_84;

BB71_4:
	lg2.approx.f32 	%f317, %f2;

BB71_6:
	abs.f32 	%f7, %f2;
	setp.gtu.f32	%p8, %f7, 0f40FB3333;
	@%p8 bra 	BB71_8;
	bra.uni 	BB71_7;

BB71_8:
	abs.f32 	%f133, %f7;
	mov.f32 	%f324, 0f00000000;
	setp.eq.f32	%p9, %f133, 0f7F800000;
	@%p9 bra 	BB71_42;

	// inline asm
	rcp.approx.ftz.f32 %f134,%f7;
	// inline asm
	mul.f32 	%f136, %f134, %f134;
	mov.f32 	%f137, 0f3F3FF7E9;
	mov.f32 	%f138, 0fC082CB37;
	fma.rn.f32 	%f139, %f138, %f136, %f137;
	mov.f32 	%f140, 0fBE458BAE;
	fma.rn.f32 	%f141, %f139, %f136, %f140;
	mov.f32 	%f142, 0f3E3FFF8B;
	fma.rn.f32 	%f143, %f141, %f136, %f142;
	mov.f32 	%f144, 0f3F800000;
	fma.rn.f32 	%f145, %f143, %f136, %f144;
	mov.f32 	%f146, 0f3EB914AD;
	mov.f32 	%f147, 0fBFCA3BA2;
	fma.rn.f32 	%f148, %f147, %f136, %f146;
	mov.f32 	%f149, 0fBE27F2EC;
	fma.rn.f32 	%f150, %f148, %f136, %f149;
	mov.f32 	%f151, 0f3EBFFFFD;
	fma.rn.f32 	%f152, %f150, %f136, %f151;
	fma.rn.f32 	%f9, %f152, %f134, %f7;
	rsqrt.approx.f32 	%f153, %f7;
	mul.f32 	%f154, %f153, 0f3F4C422A;
	mul.f32 	%f10, %f145, %f154;
	mul.f32 	%f155, %f9, 0f3F22F983;
	cvt.rni.s32.f32	%r350, %f155;
	cvt.rn.f32.s32	%f156, %r350;
	neg.f32 	%f157, %f156;
	mov.f32 	%f158, 0f3FC90FDA;
	fma.rn.f32 	%f159, %f157, %f158, %f9;
	mov.f32 	%f160, 0f33A22168;
	fma.rn.f32 	%f161, %f157, %f160, %f159;
	mov.f32 	%f162, 0f27C234C5;
	fma.rn.f32 	%f318, %f157, %f162, %f161;
	abs.f32 	%f163, %f9;
	setp.leu.f32	%p10, %f163, 0f47CE4780;
	@%p10 bra 	BB71_19;

	mov.b32 	 %r3, %f9;
	shl.b32 	%r153, %r3, 8;
	or.b32  	%r4, %r153, -2147483648;
	mov.u32 	%r342, 0;
	mov.u64 	%rd48, __cudart_i2opi_f;
	mov.u32 	%r341, -6;

BB71_11:
	.pragma "nounroll";
	ld.const.u32 	%r156, [%rd48];
	// inline asm
	{
	mad.lo.cc.u32   %r154, %r156, %r4, %r342;
	madc.hi.u32     %r342, %r156, %r4,  0;
	}
	// inline asm
	st.local.u32 	[%rd57], %r154;
	add.s64 	%rd57, %rd57, 4;
	add.s64 	%rd48, %rd48, 4;
	add.s32 	%r341, %r341, 1;
	setp.ne.s32	%p11, %r341, 0;
	@%p11 bra 	BB71_11;

	and.b32  	%r9, %r3, -2147483648;
	bfe.u32 	%r159, %r3, 23, 8;
	add.s32 	%r160, %r159, -128;
	shr.u32 	%r161, %r160, 5;
	cvta.to.local.u64 	%rd34, %rd27;
	st.local.u32 	[%rd34+24], %r342;
	bfe.u32 	%r10, %r3, 23, 5;
	mov.u32 	%r162, 6;
	sub.s32 	%r163, %r162, %r161;
	mul.wide.s32 	%rd35, %r163, 4;
	add.s64 	%rd6, %rd34, %rd35;
	ld.local.u32 	%r343, [%rd6];
	ld.local.u32 	%r344, [%rd6+-4];
	setp.eq.s32	%p12, %r10, 0;
	@%p12 bra 	BB71_14;

	mov.u32 	%r164, 32;
	sub.s32 	%r165, %r164, %r10;
	shr.u32 	%r166, %r344, %r165;
	shl.b32 	%r167, %r343, %r10;
	add.s32 	%r343, %r166, %r167;
	ld.local.u32 	%r168, [%rd6+-8];
	shr.u32 	%r169, %r168, %r165;
	shl.b32 	%r170, %r344, %r10;
	add.s32 	%r344, %r169, %r170;

BB71_14:
	shr.u32 	%r171, %r344, 30;
	shl.b32 	%r172, %r343, 2;
	add.s32 	%r345, %r171, %r172;
	shl.b32 	%r18, %r344, 2;
	shr.u32 	%r173, %r345, 31;
	shr.u32 	%r174, %r343, 30;
	add.s32 	%r19, %r173, %r174;
	setp.eq.s32	%p13, %r173, 0;
	mov.u32 	%r346, %r9;
	mov.u32 	%r347, %r18;
	@%p13 bra 	BB71_16;

	not.b32 	%r175, %r345;
	neg.s32 	%r20, %r18;
	setp.eq.s32	%p14, %r18, 0;
	selp.u32	%r176, 1, 0, %p14;
	add.s32 	%r345, %r176, %r175;
	xor.b32  	%r22, %r9, -2147483648;
	mov.u32 	%r346, %r22;
	mov.u32 	%r347, %r20;

BB71_16:
	mov.u32 	%r24, %r346;
	neg.s32 	%r177, %r19;
	setp.eq.s32	%p15, %r9, 0;
	selp.b32	%r350, %r19, %r177, %p15;
	clz.b32 	%r349, %r345;
	setp.eq.s32	%p16, %r349, 0;
	shl.b32 	%r178, %r345, %r349;
	mov.u32 	%r179, 32;
	sub.s32 	%r180, %r179, %r349;
	shr.u32 	%r181, %r347, %r180;
	add.s32 	%r182, %r181, %r178;
	selp.b32	%r28, %r345, %r182, %p16;
	mov.u32 	%r183, -921707870;
	mul.hi.u32 	%r348, %r28, %r183;
	setp.lt.s32	%p17, %r348, 1;
	@%p17 bra 	BB71_18;

	mul.lo.s32 	%r184, %r28, -921707870;
	shr.u32 	%r185, %r184, 31;
	shl.b32 	%r186, %r348, 1;
	add.s32 	%r348, %r185, %r186;
	add.s32 	%r349, %r349, 1;

BB71_18:
	mov.u32 	%r187, 126;
	sub.s32 	%r188, %r187, %r349;
	shl.b32 	%r189, %r188, 23;
	add.s32 	%r190, %r348, 1;
	shr.u32 	%r191, %r190, 7;
	add.s32 	%r192, %r191, 1;
	shr.u32 	%r193, %r192, 1;
	add.s32 	%r194, %r193, %r189;
	or.b32  	%r195, %r194, %r24;
	mov.b32 	 %f318, %r195;

BB71_19:
	and.b32  	%r196, %r350, 3;
	cvt.rn.f32.s32	%f164, %r196;
	add.f32 	%f165, %f318, 0fC016CBE4;
	fma.rn.f32 	%f319, %f164, 0f3FC90FDB, %f165;
	abs.f32 	%f166, %f319;
	setp.neu.f32	%p18, %f166, 0f7F800000;
	@%p18 bra 	BB71_21;

	mov.f32 	%f167, 0f00000000;
	mul.rn.f32 	%f319, %f319, %f167;

BB71_21:
	mul.f32 	%f168, %f319, 0f3F22F983;
	cvt.rni.s32.f32	%r360, %f168;
	cvt.rn.f32.s32	%f169, %r360;
	neg.f32 	%f170, %f169;
	fma.rn.f32 	%f172, %f170, %f158, %f319;
	fma.rn.f32 	%f174, %f170, %f160, %f172;
	fma.rn.f32 	%f320, %f170, %f162, %f174;
	abs.f32 	%f176, %f319;
	setp.leu.f32	%p19, %f176, 0f47CE4780;
	@%p19 bra 	BB71_31;

	mov.b32 	 %r36, %f319;
	shr.u32 	%r37, %r36, 23;
	bfe.u32 	%r199, %r36, 23, 8;
	add.s32 	%r200, %r199, -128;
	shl.b32 	%r201, %r36, 8;
	or.b32  	%r38, %r201, -2147483648;
	shr.u32 	%r39, %r200, 5;
	cvta.to.local.u64 	%rd50, %rd27;
	mov.u32 	%r352, 0;
	mov.u64 	%rd49, __cudart_i2opi_f;
	mov.u32 	%r351, -6;

BB71_23:
	.pragma "nounroll";
	ld.const.u32 	%r204, [%rd49];
	// inline asm
	{
	mad.lo.cc.u32   %r202, %r204, %r38, %r352;
	madc.hi.u32     %r352, %r204, %r38,  0;
	}
	// inline asm
	st.local.u32 	[%rd50], %r202;
	add.s64 	%rd50, %rd50, 4;
	add.s64 	%rd49, %rd49, 4;
	add.s32 	%r351, %r351, 1;
	setp.ne.s32	%p20, %r351, 0;
	@%p20 bra 	BB71_23;

	and.b32  	%r44, %r36, -2147483648;
	cvta.to.local.u64 	%rd39, %rd27;
	st.local.u32 	[%rd39+24], %r352;
	mov.u32 	%r207, 6;
	sub.s32 	%r208, %r207, %r39;
	mul.wide.s32 	%rd40, %r208, 4;
	add.s64 	%rd12, %rd39, %rd40;
	ld.local.u32 	%r353, [%rd12];
	ld.local.u32 	%r354, [%rd12+-4];
	and.b32  	%r47, %r37, 31;
	setp.eq.s32	%p21, %r47, 0;
	@%p21 bra 	BB71_26;

	mov.u32 	%r209, 32;
	sub.s32 	%r210, %r209, %r47;
	shr.u32 	%r211, %r354, %r210;
	shl.b32 	%r212, %r353, %r47;
	add.s32 	%r353, %r211, %r212;
	ld.local.u32 	%r213, [%rd12+-8];
	shr.u32 	%r214, %r213, %r210;
	shl.b32 	%r215, %r354, %r47;
	add.s32 	%r354, %r214, %r215;

BB71_26:
	shr.u32 	%r216, %r354, 30;
	shl.b32 	%r217, %r353, 2;
	add.s32 	%r355, %r216, %r217;
	shl.b32 	%r53, %r354, 2;
	shr.u32 	%r218, %r355, 31;
	shr.u32 	%r219, %r353, 30;
	add.s32 	%r54, %r218, %r219;
	setp.eq.s32	%p22, %r218, 0;
	mov.u32 	%r356, %r44;
	mov.u32 	%r357, %r53;
	@%p22 bra 	BB71_28;

	not.b32 	%r220, %r355;
	neg.s32 	%r55, %r53;
	setp.eq.s32	%p23, %r53, 0;
	selp.u32	%r221, 1, 0, %p23;
	add.s32 	%r355, %r221, %r220;
	xor.b32  	%r57, %r44, -2147483648;
	mov.u32 	%r356, %r57;
	mov.u32 	%r357, %r55;

BB71_28:
	mov.u32 	%r59, %r356;
	neg.s32 	%r222, %r54;
	setp.eq.s32	%p24, %r44, 0;
	selp.b32	%r360, %r54, %r222, %p24;
	clz.b32 	%r359, %r355;
	setp.eq.s32	%p25, %r359, 0;
	shl.b32 	%r223, %r355, %r359;
	mov.u32 	%r224, 32;
	sub.s32 	%r225, %r224, %r359;
	shr.u32 	%r226, %r357, %r225;
	add.s32 	%r227, %r226, %r223;
	selp.b32	%r63, %r355, %r227, %p25;
	mov.u32 	%r228, -921707870;
	mul.hi.u32 	%r358, %r63, %r228;
	setp.lt.s32	%p26, %r358, 1;
	@%p26 bra 	BB71_30;

	mul.lo.s32 	%r229, %r63, -921707870;
	shr.u32 	%r230, %r229, 31;
	shl.b32 	%r231, %r358, 1;
	add.s32 	%r358, %r230, %r231;
	add.s32 	%r359, %r359, 1;

BB71_30:
	mov.u32 	%r232, 126;
	sub.s32 	%r233, %r232, %r359;
	shl.b32 	%r234, %r233, 23;
	add.s32 	%r235, %r358, 1;
	shr.u32 	%r236, %r235, 7;
	add.s32 	%r237, %r236, 1;
	shr.u32 	%r238, %r237, 1;
	add.s32 	%r239, %r238, %r234;
	or.b32  	%r240, %r239, %r59;
	mov.b32 	 %f320, %r240;

BB71_31:
	mul.rn.f32 	%f20, %f320, %f320;
	add.s32 	%r70, %r360, 1;
	and.b32  	%r71, %r70, 1;
	setp.eq.s32	%p27, %r71, 0;
	@%p27 bra 	BB71_33;

	mov.f32 	%f177, 0fBAB6061A;
	mov.f32 	%f178, 0f37CCF5CE;
	fma.rn.f32 	%f321, %f178, %f20, %f177;
	bra.uni 	BB71_34;

BB71_7:
	add.f32 	%f101, %f7, 0fC0753AAC;
	add.f32 	%f102, %f101, 0f33A5090F;
	mov.f32 	%f103, 0f2B81BF42;
	mov.f32 	%f104, 0f29AF3463;
	fma.rn.f32 	%f105, %f104, %f102, %f103;
	mov.f32 	%f106, 0fADE21EC1;
	fma.rn.f32 	%f107, %f105, %f102, %f106;
	mov.f32 	%f108, 0fAF5DDEFF;
	fma.rn.f32 	%f109, %f107, %f102, %f108;
	mov.f32 	%f110, 0f319B0C9D;
	fma.rn.f32 	%f111, %f109, %f102, %f110;
	mov.f32 	%f112, 0f32E81173;
	fma.rn.f32 	%f113, %f111, %f102, %f112;
	mov.f32 	%f114, 0fB50F8DC8;
	fma.rn.f32 	%f115, %f113, %f102, %f114;
	mov.f32 	%f116, 0fB61E653D;
	fma.rn.f32 	%f117, %f115, %f102, %f116;
	mov.f32 	%f118, 0f382CD9C5;
	fma.rn.f32 	%f119, %f117, %f102, %f118;
	mov.f32 	%f120, 0f38F9EB10;
	fma.rn.f32 	%f121, %f119, %f102, %f120;
	mov.f32 	%f122, 0fBAECEB9C;
	fma.rn.f32 	%f123, %f121, %f102, %f122;
	mov.f32 	%f124, 0fBB276FFD;
	fma.rn.f32 	%f125, %f123, %f102, %f124;
	mov.f32 	%f126, 0f3D073993;
	fma.rn.f32 	%f127, %f125, %f102, %f126;
	add.f32 	%f128, %f7, 0fC0E07FB0;
	add.f32 	%f129, %f128, 0f3444B8DB;
	mul.f32 	%f130, %f129, %f127;
	mul.f32 	%f131, %f102, %f130;
	mul.f32 	%f324, %f7, %f131;
	bra.uni 	BB71_42;

BB71_46:
	add.f32 	%f219, %f2, 0fC0ADBFF2;
	add.f32 	%f220, %f219, 0fB4687B03;
	mov.f32 	%f221, 0fB508A416;
	mov.f32 	%f222, 0f32BE57D0;
	fma.rn.f32 	%f223, %f222, %f220, %f221;
	mov.f32 	%f224, 0fB63F8A14;
	fma.rn.f32 	%f225, %f223, %f220, %f224;
	mov.f32 	%f226, 0f38427E02;
	fma.rn.f32 	%f227, %f225, %f220, %f226;
	mov.f32 	%f228, 0f3919BB1C;
	fma.rn.f32 	%f229, %f227, %f220, %f228;
	mov.f32 	%f230, 0fBB0DF1FD;
	fma.rn.f32 	%f231, %f229, %f220, %f230;
	mov.f32 	%f232, 0fBB885189;
	fma.rn.f32 	%f233, %f231, %f220, %f232;
	mov.f32 	%f234, 0f3D50AEC1;
	fma.rn.f32 	%f235, %f233, %f220, %f234;
	mov.f32 	%f236, 0f3D005CFC;
	fma.rn.f32 	%f237, %f235, %f220, %f236;
	mov.f32 	%f238, 0fBEAE3E2B;
	fma.rn.f32 	%f239, %f237, %f220, %f238;
	mul.f32 	%f331, %f220, %f239;
	bra.uni 	BB71_84;

BB71_48:
	add.f32 	%f240, %f2, 0fC109893D;
	add.f32 	%f241, %f240, 0fB4E6169B;
	mov.f32 	%f242, 0f3602902E;
	mov.f32 	%f243, 0f350CF383;
	fma.rn.f32 	%f244, %f243, %f241, %f242;
	mov.f32 	%f245, 0fB8375F71;
	fma.rn.f32 	%f246, %f244, %f241, %f245;
	mov.f32 	%f247, 0fB8D9FAA8;
	fma.rn.f32 	%f248, %f246, %f241, %f247;
	mov.f32 	%f249, 0f3B03D19A;
	fma.rn.f32 	%f250, %f248, %f241, %f249;
	mov.f32 	%f251, 0f3B1E736D;
	fma.rn.f32 	%f252, %f250, %f241, %f251;
	mov.f32 	%f253, 0fBD31CAE5;
	fma.rn.f32 	%f254, %f252, %f241, %f253;
	mov.f32 	%f255, 0fBC8159B6;
	fma.rn.f32 	%f256, %f254, %f241, %f255;
	mov.f32 	%f257, 0f3E8AFCCA;
	fma.rn.f32 	%f258, %f256, %f241, %f257;
	mul.f32 	%f331, %f241, %f258;
	bra.uni 	BB71_84;

BB71_33:
	mov.f32 	%f179, 0f3C08839E;
	mov.f32 	%f180, 0fB94CA1F9;
	fma.rn.f32 	%f321, %f180, %f20, %f179;

BB71_34:
	@%p27 bra 	BB71_36;

	mov.f32 	%f181, 0f3D2AAAA5;
	fma.rn.f32 	%f182, %f321, %f20, %f181;
	mov.f32 	%f183, 0fBF000000;
	fma.rn.f32 	%f322, %f182, %f20, %f183;
	bra.uni 	BB71_37;

BB71_36:
	mov.f32 	%f184, 0fBE2AAAA3;
	fma.rn.f32 	%f185, %f321, %f20, %f184;
	mov.f32 	%f186, 0f00000000;
	fma.rn.f32 	%f322, %f185, %f20, %f186;

BB71_37:
	fma.rn.f32 	%f323, %f322, %f320, %f320;
	@%p27 bra 	BB71_39;

	fma.rn.f32 	%f323, %f322, %f20, %f144;

BB71_39:
	and.b32  	%r241, %r70, 2;
	setp.eq.s32	%p30, %r241, 0;
	@%p30 bra 	BB71_41;

	mov.f32 	%f188, 0f00000000;
	mov.f32 	%f189, 0fBF800000;
	fma.rn.f32 	%f323, %f323, %f189, %f188;

BB71_41:
	mul.f32 	%f324, %f10, %f323;

BB71_42:
	neg.f32 	%f190, %f324;
	setp.lt.f32	%p31, %f2, 0f00000000;
	selp.f32	%f191, %f190, %f324, %p31;
	mov.b32 	 %r242, %f2;
	and.b32  	%r243, %r242, -2147483648;
	mov.b32 	 %r244, %f191;
	and.b32  	%r245, %r244, 2147483647;
	or.b32  	%r246, %r245, %r243;
	mov.b32 	 %f192, %r246;
	setp.lt.f32	%p32, %f7, 0f0DA24260;
	selp.f32	%f193, %f192, %f191, %p32;
	mov.f32 	%f194, 0fBF800000;
	div.rn.f32 	%f195, %f194, %f2;
	fma.rn.f32 	%f196, %f317, %f193, %f195;
	mul.f32 	%f197, %f196, 0f3F22F983;
	fma.rn.f32 	%f331, %f2, %f3, %f197;

BB71_84:
	setp.geu.f32	%p58, %f1, 0f00000000;
	@%p58 bra 	BB71_86;

	mov.f32 	%f316, 0fBF800000;
	sqrt.rn.f32 	%f331, %f316;

BB71_86:
	cvta.to.global.u64 	%rd45, %rd24;
	add.s64 	%rd47, %rd45, %rd30;
	st.global.f32 	[%rd47], %f331;

BB71_87:
	ret;

BB71_74:
	mov.f32 	%f304, 0f3C08839E;
	mov.f32 	%f305, 0fB94CA1F9;
	fma.rn.f32 	%f328, %f305, %f49, %f304;

BB71_75:
	@%p54 bra 	BB71_77;

	mov.f32 	%f306, 0f3D2AAAA5;
	fma.rn.f32 	%f307, %f328, %f49, %f306;
	mov.f32 	%f308, 0fBF000000;
	fma.rn.f32 	%f329, %f307, %f49, %f308;
	bra.uni 	BB71_78;

BB71_77:
	mov.f32 	%f309, 0fBE2AAAA3;
	fma.rn.f32 	%f310, %f328, %f49, %f309;
	mov.f32 	%f311, 0f00000000;
	fma.rn.f32 	%f329, %f310, %f49, %f311;

BB71_78:
	fma.rn.f32 	%f330, %f329, %f327, %f327;
	@%p54 bra 	BB71_80;

	fma.rn.f32 	%f330, %f329, %f49, %f269;

BB71_80:
	and.b32  	%r336, %r142, 2;
	setp.eq.s32	%p57, %r336, 0;
	@%p57 bra 	BB71_82;

	mov.f32 	%f313, 0f00000000;
	mov.f32 	%f314, 0fBF800000;
	fma.rn.f32 	%f330, %f330, %f314, %f313;

BB71_82:
	mul.f32 	%f331, %f39, %f330;
	bra.uni 	BB71_84;
}

	// .globl	vec_copysignf
.visible .entry vec_copysignf(
	.param .u64 vec_copysignf_param_0,
	.param .u64 vec_copysignf_param_1,
	.param .u64 vec_copysignf_param_2,
	.param .u64 vec_copysignf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .b32 	%r<10>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_copysignf_param_0];
	ld.param.u64 	%rd2, [vec_copysignf_param_1];
	ld.param.u64 	%rd3, [vec_copysignf_param_2];
	ld.param.u64 	%rd4, [vec_copysignf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB72_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.u32 	%r5, [%rd10];
	and.b32  	%r6, %r5, -2147483648;
	ld.global.u32 	%r7, [%rd8];
	and.b32  	%r8, %r7, 2147483647;
	or.b32  	%r9, %r6, %r8;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.u32 	[%rd12], %r9;

BB72_2:
	ret;
}

	// .globl	vec_fdimf
.visible .entry vec_fdimf(
	.param .u64 vec_fdimf_param_0,
	.param .u64 vec_fdimf_param_1,
	.param .u64 vec_fdimf_param_2,
	.param .u64 vec_fdimf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<5>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_fdimf_param_0];
	ld.param.u64 	%rd2, [vec_fdimf_param_1];
	ld.param.u64 	%rd3, [vec_fdimf_param_2];
	ld.param.u64 	%rd4, [vec_fdimf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB73_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	sub.f32 	%f3, %f2, %f1;
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f4, %f3, 0f00000000, %p2;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f4;

BB73_2:
	ret;
}

	// .globl	vec_fdividef
.visible .entry vec_fdividef(
	.param .u64 vec_fdividef_param_0,
	.param .u64 vec_fdividef_param_1,
	.param .u64 vec_fdividef_param_2,
	.param .u64 vec_fdividef_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_fdividef_param_0];
	ld.param.u64 	%rd2, [vec_fdividef_param_1];
	ld.param.u64 	%rd3, [vec_fdividef_param_2];
	ld.param.u64 	%rd4, [vec_fdividef_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB74_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB74_2:
	ret;
}

	// .globl	vec_fmaxf
.visible .entry vec_fmaxf(
	.param .u64 vec_fmaxf_param_0,
	.param .u64 vec_fmaxf_param_1,
	.param .u64 vec_fmaxf_param_2,
	.param .u64 vec_fmaxf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_fmaxf_param_0];
	ld.param.u64 	%rd2, [vec_fmaxf_param_1];
	ld.param.u64 	%rd3, [vec_fmaxf_param_2];
	ld.param.u64 	%rd4, [vec_fmaxf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB75_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	max.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB75_2:
	ret;
}

	// .globl	vec_fminf
.visible .entry vec_fminf(
	.param .u64 vec_fminf_param_0,
	.param .u64 vec_fminf_param_1,
	.param .u64 vec_fminf_param_2,
	.param .u64 vec_fminf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<5>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_fminf_param_0];
	ld.param.u64 	%rd2, [vec_fminf_param_1];
	ld.param.u64 	%rd3, [vec_fminf_param_2];
	ld.param.u64 	%rd4, [vec_fminf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB76_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd10];
	ld.global.f32 	%f2, [%rd8];
	min.f32 	%f3, %f2, %f1;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f3;

BB76_2:
	ret;
}

	// .globl	vec_fmodf
.visible .entry vec_fmodf(
	.param .u64 vec_fmodf_param_0,
	.param .u64 vec_fmodf_param_1,
	.param .u64 vec_fmodf_param_2,
	.param .u64 vec_fmodf_param_3
)
{
	.reg .pred 	%p<20>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<22>;
	.reg .b64 	%rd<14>;


	ld.param.u64 	%rd5, [vec_fmodf_param_0];
	ld.param.u64 	%rd2, [vec_fmodf_param_1];
	ld.param.u64 	%rd3, [vec_fmodf_param_2];
	ld.param.u64 	%rd4, [vec_fmodf_param_3];
	mov.u32 	%r3, %tid.x;
	mov.u32 	%r4, %ntid.x;
	mov.u32 	%r5, %ctaid.x;
	mad.lo.s32 	%r6, %r4, %r5, %r3;
	cvt.s64.s32	%rd1, %r6;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB77_15;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd8];
	abs.f32 	%f46, %f1;
	ld.global.f32 	%f3, [%rd10];
	abs.f32 	%f4, %f3;
	setp.eq.f32	%p2, %f46, 0f7F800000;
	setp.eq.f32	%p3, %f4, 0f00000000;
	or.pred  	%p4, %p2, %p3;
	mov.f32 	%f47, 0f7FFFFFFF;
	@%p4 bra 	BB77_14;

	setp.ltu.f32	%p5, %f46, %f4;
	@%p5 bra 	BB77_13;
	bra.uni 	BB77_3;

BB77_13:
	setp.gtu.f32	%p18, %f4, 0f7F800000;
	add.f32 	%f41, %f1, %f3;
	selp.f32	%f42, %f41, %f1, %p18;
	add.f32 	%f43, %f1, %f42;
	setp.leu.f32	%p19, %f46, 0f00000000;
	selp.f32	%f47, %f43, %f42, %p19;
	bra.uni 	BB77_14;

BB77_3:
	lg2.approx.f32 	%f21, %f46;
	cvt.rzi.s32.f32	%r7, %f21;
	lg2.approx.f32 	%f22, %f4;
	cvt.rzi.s32.f32	%r8, %f22;
	sub.s32 	%r1, %r7, %r8;
	abs.f32 	%f5, %f4;
	setp.eq.f32	%p6, %f5, 0f00000000;
	setp.eq.f32	%p7, %f5, 0f7F800000;
	or.pred  	%p8, %p6, %p7;
	setp.eq.s32	%p9, %r7, %r8;
	or.pred  	%p10, %p8, %p9;
	@%p10 bra 	BB77_9;
	bra.uni 	BB77_4;

BB77_9:
	setp.leu.f32	%p13, %f5, 0f00000000;
	add.f32 	%f37, %f4, %f4;
	selp.f32	%f44, %f37, %f4, %p13;
	bra.uni 	BB77_10;

BB77_4:
	abs.s32 	%r2, %r1;
	setp.lt.s32	%p11, %r2, 126;
	@%p11 bra 	BB77_8;
	bra.uni 	BB77_5;

BB77_8:
	cvt.rn.f32.s32	%f36, %r1;
	// inline asm
	ex2.approx.ftz.f32 %f35,%f36;
	// inline asm
	mul.f32 	%f44, %f4, %f35;
	bra.uni 	BB77_10;

BB77_5:
	setp.lt.s32	%p12, %r2, 252;
	@%p12 bra 	BB77_7;
	bra.uni 	BB77_6;

BB77_7:
	shr.u32 	%r14, %r1, 31;
	add.s32 	%r15, %r1, %r14;
	shr.s32 	%r16, %r15, 1;
	cvt.rn.f32.s32	%f31, %r16;
	// inline asm
	ex2.approx.ftz.f32 %f30,%f31;
	// inline asm
	mul.f32 	%f34, %f4, %f30;
	sub.s32 	%r17, %r1, %r16;
	cvt.rn.f32.s32	%f33, %r17;
	// inline asm
	ex2.approx.ftz.f32 %f32,%f33;
	// inline asm
	mul.f32 	%f44, %f34, %f32;
	bra.uni 	BB77_10;

BB77_6:
	shr.s32 	%r9, %r1, 31;
	shr.u32 	%r10, %r9, 30;
	add.s32 	%r11, %r1, %r10;
	shr.s32 	%r12, %r11, 2;
	cvt.rn.f32.s32	%f24, %r12;
	// inline asm
	ex2.approx.ftz.f32 %f23,%f24;
	// inline asm
	mul.f32 	%f27, %f4, %f23;
	mul.f32 	%f28, %f23, %f27;
	mul.f32 	%f29, %f23, %f28;
	mad.lo.s32 	%r13, %r12, -3, %r1;
	cvt.rn.f32.s32	%f26, %r13;
	// inline asm
	ex2.approx.ftz.f32 %f25,%f26;
	// inline asm
	mul.f32 	%f44, %f25, %f29;

BB77_10:
	mul.f32 	%f38, %f46, 0f3F000000;
	setp.gtu.f32	%p14, %f44, %f38;
	add.f32 	%f39, %f44, %f44;
	selp.f32	%f45, %f44, %f39, %p14;
	setp.ltu.f32	%p15, %f45, %f4;
	@%p15 bra 	BB77_12;

BB77_11:
	sub.f32 	%f40, %f46, %f45;
	setp.ltu.f32	%p16, %f46, %f45;
	selp.f32	%f46, %f46, %f40, %p16;
	mul.f32 	%f45, %f45, 0f3F000000;
	setp.ge.f32	%p17, %f45, %f4;
	@%p17 bra 	BB77_11;

BB77_12:
	mov.b32 	 %r18, %f1;
	and.b32  	%r19, %r18, -2147483648;
	mov.b32 	 %r20, %f46;
	or.b32  	%r21, %r20, %r19;
	mov.b32 	 %f47, %r21;

BB77_14:
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd13, %rd11, %rd7;
	st.global.f32 	[%rd13], %f47;

BB77_15:
	ret;
}

	// .globl	vec_hypotf
.visible .entry vec_hypotf(
	.param .u64 vec_hypotf_param_0,
	.param .u64 vec_hypotf_param_1,
	.param .u64 vec_hypotf_param_2,
	.param .u64 vec_hypotf_param_3
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<17>;
	.reg .b32 	%r<13>;
	.reg .b64 	%rd<13>;


	ld.param.u64 	%rd5, [vec_hypotf_param_0];
	ld.param.u64 	%rd2, [vec_hypotf_param_1];
	ld.param.u64 	%rd3, [vec_hypotf_param_2];
	ld.param.u64 	%rd4, [vec_hypotf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB78_2;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd8];
	abs.f32 	%f2, %f1;
	mov.b32 	 %r5, %f2;
	ld.global.f32 	%f3, [%rd10];
	abs.f32 	%f4, %f3;
	mov.b32 	 %r6, %f4;
	min.s32 	%r7, %r6, %r5;
	mov.b32 	 %f5, %r7;
	max.s32 	%r8, %r5, %r6;
	mov.b32 	 %f6, %r8;
	and.b32  	%r9, %r8, -33554432;
	mov.u32 	%r10, 2122317824;
	sub.s32 	%r11, %r10, %r9;
	mov.b32 	 %f7, %r11;
	mul.f32 	%f8, %f5, %f7;
	mul.f32 	%f9, %f6, %f7;
	mul.f32 	%f10, %f8, %f8;
	fma.rn.f32 	%f11, %f9, %f9, %f10;
	sqrt.rn.f32 	%f12, %f11;
	add.s32 	%r12, %r9, 8388608;
	mov.b32 	 %f13, %r12;
	mul.f32 	%f14, %f12, %f13;
	setp.eq.f32	%p2, %f5, 0f00000000;
	selp.f32	%f15, %f6, %f14, %p2;
	setp.eq.f32	%p3, %f5, 0f7F800000;
	selp.f32	%f16, 0f7F800000, %f15, %p3;
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd12, %rd11, %rd7;
	st.global.f32 	[%rd12], %f16;

BB78_2:
	ret;
}

	// .globl	vec_nextafterf
.visible .entry vec_nextafterf(
	.param .u64 vec_nextafterf_param_0,
	.param .u64 vec_nextafterf_param_1,
	.param .u64 vec_nextafterf_param_2,
	.param .u64 vec_nextafterf_param_3
)
{
	.reg .pred 	%p<14>;
	.reg .f32 	%f<11>;
	.reg .b32 	%r<18>;
	.reg .b64 	%rd<14>;


	ld.param.u64 	%rd5, [vec_nextafterf_param_0];
	ld.param.u64 	%rd2, [vec_nextafterf_param_1];
	ld.param.u64 	%rd3, [vec_nextafterf_param_2];
	ld.param.u64 	%rd4, [vec_nextafterf_param_3];
	mov.u32 	%r3, %tid.x;
	mov.u32 	%r4, %ntid.x;
	mov.u32 	%r5, %ctaid.x;
	mad.lo.s32 	%r6, %r4, %r5, %r3;
	cvt.s64.s32	%rd1, %r6;
	setp.ge.u64	%p1, %rd1, %rd5;
	@%p1 bra 	BB79_9;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd8];
	mov.b32 	 %r1, %f1;
	ld.global.f32 	%f10, [%rd10];
	mov.b32 	 %r2, %f10;
	abs.f32 	%f7, %f1;
	setp.gtu.f32	%p2, %f7, 0f7F800000;
	@%p2 bra 	BB79_7;

	abs.f32 	%f8, %f10;
	setp.gtu.f32	%p3, %f8, 0f7F800000;
	@%p3 bra 	BB79_7;
	bra.uni 	BB79_3;

BB79_7:
	add.f32 	%f10, %f1, %f10;
	bra.uni 	BB79_8;

BB79_3:
	or.b32  	%r7, %r2, %r1;
	mov.b32 	 %f9, %r7;
	setp.eq.f32	%p4, %f9, 0f00000000;
	@%p4 bra 	BB79_8;

	setp.eq.f32	%p5, %f1, 0f00000000;
	@%p5 bra 	BB79_6;
	bra.uni 	BB79_5;

BB79_6:
	and.b32  	%r16, %r2, -2147483648;
	or.b32  	%r17, %r16, 1;
	mov.b32 	 %f10, %r17;
	bra.uni 	BB79_8;

BB79_5:
	setp.lt.f32	%p6, %f1, %f10;
	setp.lt.f32	%p7, %f1, 0f00000000;
	and.pred  	%p8, %p6, %p7;
	selp.s32	%r8, -1, 0, %p8;
	add.s32 	%r9, %r8, %r1;
	setp.gt.f32	%p9, %f1, 0f00000000;
	and.pred  	%p10, %p6, %p9;
	selp.u32	%r10, 1, 0, %p10;
	add.s32 	%r11, %r9, %r10;
	setp.gt.f32	%p11, %f1, %f10;
	and.pred  	%p12, %p11, %p7;
	selp.u32	%r12, 1, 0, %p12;
	add.s32 	%r13, %r11, %r12;
	and.pred  	%p13, %p11, %p9;
	selp.s32	%r14, -1, 0, %p13;
	add.s32 	%r15, %r13, %r14;
	mov.b32 	 %f10, %r15;

BB79_8:
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd13, %rd11, %rd7;
	st.global.f32 	[%rd13], %f10;

BB79_9:
	ret;
}

	// .globl	vec_powf
.visible .entry vec_powf(
	.param .u64 vec_powf_param_0,
	.param .u64 vec_powf_param_1,
	.param .u64 vec_powf_param_2,
	.param .u64 vec_powf_param_3
)
{
	.reg .pred 	%p<30>;
	.reg .f32 	%f<103>;
	.reg .b32 	%r<24>;
	.reg .b64 	%rd<14>;


	ld.param.u64 	%rd6, [vec_powf_param_0];
	ld.param.u64 	%rd3, [vec_powf_param_1];
	ld.param.u64 	%rd4, [vec_powf_param_2];
	ld.param.u64 	%rd5, [vec_powf_param_3];
	mov.u32 	%r1, %tid.x;
	mov.u32 	%r2, %ntid.x;
	mov.u32 	%r3, %ctaid.x;
	mad.lo.s32 	%r4, %r2, %r3, %r1;
	cvt.s64.s32	%rd1, %r4;
	setp.ge.u64	%p2, %rd1, %rd6;
	@%p2 bra 	BB80_15;

	cvta.to.global.u64 	%rd7, %rd4;
	cvta.to.global.u64 	%rd2, %rd3;
	shl.b64 	%rd8, %rd1, 2;
	add.s64 	%rd9, %rd7, %rd8;
	cvta.to.global.u64 	%rd10, %rd5;
	add.s64 	%rd11, %rd10, %rd8;
	ld.global.f32 	%f1, [%rd11];
	mul.f32 	%f22, %f1, 0f3F000000;
	cvt.rzi.f32.f32	%f23, %f22;
	fma.rn.f32 	%f24, %f23, 0fC0000000, %f1;
	abs.f32 	%f2, %f24;
	ld.global.f32 	%f3, [%rd9];
	abs.f32 	%f4, %f3;
	setp.lt.f32	%p3, %f4, 0f00800000;
	mul.f32 	%f25, %f4, 0f4B800000;
	selp.f32	%f26, 0fC3170000, 0fC2FE0000, %p3;
	selp.f32	%f27, %f25, %f4, %p3;
	mov.b32 	 %r5, %f27;
	and.b32  	%r6, %r5, 8388607;
	or.b32  	%r7, %r6, 1065353216;
	mov.b32 	 %f28, %r7;
	shr.u32 	%r8, %r5, 23;
	cvt.rn.f32.u32	%f29, %r8;
	add.f32 	%f30, %f26, %f29;
	setp.gt.f32	%p4, %f28, 0f3FB504F3;
	mul.f32 	%f31, %f28, 0f3F000000;
	add.f32 	%f32, %f30, 0f3F800000;
	selp.f32	%f33, %f31, %f28, %p4;
	selp.f32	%f34, %f32, %f30, %p4;
	add.f32 	%f35, %f33, 0fBF800000;
	add.f32 	%f19, %f33, 0f3F800000;
	// inline asm
	rcp.approx.ftz.f32 %f18,%f19;
	// inline asm
	add.f32 	%f36, %f35, %f35;
	mul.f32 	%f37, %f18, %f36;
	mul.f32 	%f38, %f37, %f37;
	mov.f32 	%f39, 0f3C4CAF63;
	mov.f32 	%f40, 0f3B18F0FE;
	fma.rn.f32 	%f41, %f40, %f38, %f39;
	mov.f32 	%f42, 0f3DAAAABD;
	fma.rn.f32 	%f43, %f41, %f38, %f42;
	mul.rn.f32 	%f44, %f43, %f38;
	mul.rn.f32 	%f45, %f44, %f37;
	sub.f32 	%f46, %f35, %f37;
	neg.f32 	%f47, %f37;
	add.f32 	%f48, %f46, %f46;
	fma.rn.f32 	%f49, %f47, %f35, %f48;
	mul.rn.f32 	%f50, %f18, %f49;
	add.f32 	%f51, %f45, %f37;
	sub.f32 	%f52, %f37, %f51;
	add.f32 	%f53, %f45, %f52;
	add.f32 	%f54, %f50, %f53;
	add.f32 	%f55, %f51, %f54;
	sub.f32 	%f56, %f51, %f55;
	add.f32 	%f57, %f54, %f56;
	mov.f32 	%f58, 0f3F317200;
	mul.rn.f32 	%f59, %f34, %f58;
	mov.f32 	%f60, 0f35BFBE8E;
	mul.rn.f32 	%f61, %f34, %f60;
	add.f32 	%f62, %f59, %f55;
	sub.f32 	%f63, %f59, %f62;
	add.f32 	%f64, %f55, %f63;
	add.f32 	%f65, %f57, %f64;
	add.f32 	%f66, %f61, %f65;
	add.f32 	%f67, %f62, %f66;
	sub.f32 	%f68, %f62, %f67;
	add.f32 	%f69, %f66, %f68;
	abs.f32 	%f5, %f1;
	setp.gt.f32	%p5, %f5, 0f77F684DF;
	mul.f32 	%f70, %f1, 0f39000000;
	selp.f32	%f71, %f70, %f1, %p5;
	mul.rn.f32 	%f72, %f71, %f67;
	neg.f32 	%f73, %f72;
	fma.rn.f32 	%f74, %f71, %f67, %f73;
	fma.rn.f32 	%f75, %f71, %f69, %f74;
	mov.f32 	%f76, 0f00000000;
	fma.rn.f32 	%f77, %f76, %f67, %f75;
	add.rn.f32 	%f78, %f72, %f77;
	neg.f32 	%f79, %f78;
	add.rn.f32 	%f80, %f72, %f79;
	add.rn.f32 	%f81, %f80, %f77;
	mov.b32 	 %r9, %f78;
	setp.eq.s32	%p6, %r9, 1118925336;
	add.s32 	%r10, %r9, -1;
	mov.b32 	 %f82, %r10;
	add.f32 	%f83, %f81, 0f37000000;
	selp.f32	%f84, %f82, %f78, %p6;
	selp.f32	%f6, %f83, %f81, %p6;
	mul.f32 	%f85, %f84, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f86, %f85;
	mov.f32 	%f87, 0fBF317200;
	fma.rn.f32 	%f88, %f86, %f87, %f84;
	mov.f32 	%f89, 0fB5BFBE8E;
	fma.rn.f32 	%f90, %f86, %f89, %f88;
	mul.f32 	%f21, %f90, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f20,%f21;
	// inline asm
	add.f32 	%f91, %f86, 0f00000000;
	ex2.approx.f32 	%f92, %f91;
	mul.f32 	%f93, %f20, %f92;
	setp.lt.f32	%p7, %f84, 0fC2D20000;
	selp.f32	%f94, 0f00000000, %f93, %p7;
	setp.gt.f32	%p8, %f84, 0f42D20000;
	selp.f32	%f101, 0f7F800000, %f94, %p8;
	setp.eq.f32	%p9, %f101, 0f7F800000;
	@%p9 bra 	BB80_3;

	fma.rn.f32 	%f101, %f101, %f6, %f101;

BB80_3:
	setp.lt.f32	%p10, %f3, 0f00000000;
	setp.eq.f32	%p11, %f2, 0f3F800000;
	and.pred  	%p1, %p10, %p11;
	mov.b32 	 %r11, %f101;
	xor.b32  	%r12, %r11, -2147483648;
	mov.b32 	 %f95, %r12;
	selp.f32	%f102, %f95, %f101, %p1;
	setp.eq.f32	%p12, %f3, 0f00000000;
	@%p12 bra 	BB80_6;
	bra.uni 	BB80_4;

BB80_6:
	add.f32 	%f97, %f3, %f3;
	mov.b32 	 %r13, %f97;
	selp.b32	%r14, %r13, 0, %p11;
	or.b32  	%r15, %r14, 2139095040;
	setp.lt.f32	%p16, %f1, 0f00000000;
	selp.b32	%r16, %r15, %r14, %p16;
	mov.b32 	 %f102, %r16;
	bra.uni 	BB80_7;

BB80_4:
	setp.geu.f32	%p13, %f3, 0f00000000;
	@%p13 bra 	BB80_7;

	cvt.rzi.f32.f32	%f96, %f1;
	setp.neu.f32	%p14, %f96, %f1;
	selp.f32	%f102, 0f7FFFFFFF, %f102, %p14;

BB80_7:
	add.f32 	%f98, %f4, %f5;
	mov.b32 	 %r17, %f98;
	setp.lt.s32	%p17, %r17, 2139095040;
	@%p17 bra 	BB80_14;

	setp.gtu.f32	%p18, %f4, 0f7F800000;
	setp.gtu.f32	%p19, %f5, 0f7F800000;
	or.pred  	%p20, %p18, %p19;
	@%p20 bra 	BB80_13;
	bra.uni 	BB80_9;

BB80_13:
	add.f32 	%f102, %f3, %f1;
	bra.uni 	BB80_14;

BB80_9:
	setp.eq.f32	%p21, %f5, 0f7F800000;
	@%p21 bra 	BB80_12;
	bra.uni 	BB80_10;

BB80_12:
	setp.gt.f32	%p24, %f4, 0f3F800000;
	selp.b32	%r21, 2139095040, 0, %p24;
	xor.b32  	%r22, %r21, 2139095040;
	setp.lt.f32	%p25, %f1, 0f00000000;
	selp.b32	%r23, %r22, %r21, %p25;
	mov.b32 	 %f99, %r23;
	setp.eq.f32	%p26, %f3, 0fBF800000;
	selp.f32	%f102, 0f3F800000, %f99, %p26;
	bra.uni 	BB80_14;

BB80_10:
	setp.neu.f32	%p22, %f4, 0f7F800000;
	@%p22 bra 	BB80_14;

	setp.ltu.f32	%p23, %f1, 0f00000000;
	selp.b32	%r18, 0, 2139095040, %p23;
	or.b32  	%r19, %r18, -2147483648;
	selp.b32	%r20, %r19, %r18, %p1;
	mov.b32 	 %f102, %r20;

BB80_14:
	setp.eq.f32	%p27, %f1, 0f00000000;
	setp.eq.f32	%p28, %f3, 0f3F800000;
	or.pred  	%p29, %p28, %p27;
	selp.f32	%f100, 0f3F800000, %f102, %p29;
	add.s64 	%rd13, %rd2, %rd8;
	st.global.f32 	[%rd13], %f100;

BB80_15:
	ret;
}

	// .globl	vec_remainderf
.visible .entry vec_remainderf(
	.param .u64 vec_remainderf_param_0,
	.param .u64 vec_remainderf_param_1,
	.param .u64 vec_remainderf_param_2,
	.param .u64 vec_remainderf_param_3
)
{
	.reg .pred 	%p<29>;
	.reg .f32 	%f<52>;
	.reg .b32 	%r<27>;
	.reg .b64 	%rd<14>;


	ld.param.u64 	%rd5, [vec_remainderf_param_0];
	ld.param.u64 	%rd2, [vec_remainderf_param_1];
	ld.param.u64 	%rd3, [vec_remainderf_param_2];
	ld.param.u64 	%rd4, [vec_remainderf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r8, %r6, %r7, %r5;
	cvt.s64.s32	%rd1, %r8;
	setp.ge.u64	%p3, %rd1, %rd5;
	@%p3 bra 	BB81_20;

	cvta.to.global.u64 	%rd6, %rd3;
	shl.b64 	%rd7, %rd1, 2;
	add.s64 	%rd8, %rd6, %rd7;
	cvta.to.global.u64 	%rd9, %rd4;
	add.s64 	%rd10, %rd9, %rd7;
	ld.global.f32 	%f1, [%rd8];
	abs.f32 	%f2, %f1;
	ld.global.f32 	%f3, [%rd10];
	abs.f32 	%f4, %f3;
	setp.gtu.f32	%p4, %f2, 0f7F800000;
	setp.gtu.f32	%p5, %f4, 0f7F800000;
	or.pred  	%p6, %p4, %p5;
	@%p6 bra 	BB81_18;
	bra.uni 	BB81_2;

BB81_18:
	add.f32 	%f51, %f1, %f3;
	bra.uni 	BB81_19;

BB81_2:
	setp.eq.f32	%p7, %f2, 0f7F800000;
	setp.eq.f32	%p8, %f4, 0f00000000;
	or.pred  	%p9, %p7, %p8;
	mov.f32 	%f51, 0f7FFFFFFF;
	@%p9 bra 	BB81_19;

	setp.ltu.f32	%p10, %f2, %f4;
	mov.u32 	%r26, 0;
	mov.f32 	%f49, %f2;
	@%p10 bra 	BB81_15;

	lg2.approx.f32 	%f22, %f2;
	cvt.rzi.s32.f32	%r10, %f22;
	lg2.approx.f32 	%f23, %f4;
	cvt.rzi.s32.f32	%r11, %f23;
	sub.s32 	%r1, %r10, %r11;
	abs.f32 	%f5, %f4;
	setp.eq.f32	%p11, %f5, 0f00000000;
	setp.eq.f32	%p12, %f5, 0f7F800000;
	or.pred  	%p13, %p11, %p12;
	setp.eq.s32	%p14, %r10, %r11;
	or.pred  	%p15, %p13, %p14;
	@%p15 bra 	BB81_10;
	bra.uni 	BB81_5;

BB81_10:
	setp.leu.f32	%p18, %f5, 0f00000000;
	add.f32 	%f38, %f4, %f4;
	selp.f32	%f44, %f38, %f4, %p18;
	bra.uni 	BB81_11;

BB81_5:
	abs.s32 	%r2, %r1;
	setp.lt.s32	%p16, %r2, 126;
	@%p16 bra 	BB81_9;
	bra.uni 	BB81_6;

BB81_9:
	cvt.rn.f32.s32	%f37, %r1;
	// inline asm
	ex2.approx.ftz.f32 %f36,%f37;
	// inline asm
	mul.f32 	%f44, %f4, %f36;
	bra.uni 	BB81_11;

BB81_6:
	setp.lt.s32	%p17, %r2, 252;
	@%p17 bra 	BB81_8;
	bra.uni 	BB81_7;

BB81_8:
	shr.u32 	%r17, %r1, 31;
	add.s32 	%r18, %r1, %r17;
	shr.s32 	%r19, %r18, 1;
	cvt.rn.f32.s32	%f32, %r19;
	// inline asm
	ex2.approx.ftz.f32 %f31,%f32;
	// inline asm
	mul.f32 	%f35, %f4, %f31;
	sub.s32 	%r20, %r1, %r19;
	cvt.rn.f32.s32	%f34, %r20;
	// inline asm
	ex2.approx.ftz.f32 %f33,%f34;
	// inline asm
	mul.f32 	%f44, %f35, %f33;
	bra.uni 	BB81_11;

BB81_7:
	shr.s32 	%r12, %r1, 31;
	shr.u32 	%r13, %r12, 30;
	add.s32 	%r14, %r1, %r13;
	shr.s32 	%r15, %r14, 2;
	cvt.rn.f32.s32	%f25, %r15;
	// inline asm
	ex2.approx.ftz.f32 %f24,%f25;
	// inline asm
	mul.f32 	%f28, %f4, %f24;
	mul.f32 	%f29, %f24, %f28;
	mul.f32 	%f30, %f24, %f29;
	mad.lo.s32 	%r16, %r15, -3, %r1;
	cvt.rn.f32.s32	%f27, %r16;
	// inline asm
	ex2.approx.ftz.f32 %f26,%f27;
	// inline asm
	mul.f32 	%f44, %f26, %f30;

BB81_11:
	mul.f32 	%f39, %f2, 0f3F000000;
	setp.gtu.f32	%p19, %f44, %f39;
	add.f32 	%f40, %f44, %f44;
	selp.f32	%f45, %f44, %f40, %p19;
	setp.ltu.f32	%p20, %f45, %f4;
	mov.f32 	%f46, %f2;
	mov.f32 	%f49, %f46;
	@%p20 bra 	BB81_15;

	mov.f32 	%f50, %f2;

BB81_13:
	mov.f32 	%f12, %f50;
	mov.f32 	%f13, %f45;
	sub.f32 	%f41, %f12, %f13;
	setp.ltu.f32	%p21, %f12, %f13;
	selp.f32	%f50, %f12, %f41, %p21;
	mul.f32 	%f45, %f13, 0f3F000000;
	setp.ge.f32	%p22, %f45, %f4;
	@%p22 bra 	BB81_13;

	setp.ge.f32	%p23, %f12, %f13;
	selp.u32	%r26, 1, 0, %p23;
	mov.f32 	%f49, %f50;

BB81_15:
	add.f32 	%f17, %f49, %f49;
	setp.gt.f32	%p25, %f17, %f4;
	mov.pred 	%p28, -1;
	@%p25 bra 	BB81_17;

	setp.eq.f32	%p26, %f17, %f4;
	setp.ne.s32	%p27, %r26, 0;
	and.pred  	%p28, %p26, %p27;

BB81_17:
	sub.f32 	%f42, %f49, %f4;
	selp.f32	%f43, %f42, %f49, %p28;
	mov.b32 	 %r22, %f1;
	and.b32  	%r23, %r22, -2147483648;
	mov.b32 	 %r24, %f43;
	xor.b32  	%r25, %r24, %r23;
	mov.b32 	 %f51, %r25;

BB81_19:
	cvta.to.global.u64 	%rd11, %rd2;
	add.s64 	%rd13, %rd11, %rd7;
	st.global.f32 	[%rd13], %f51;

BB81_20:
	ret;
}






© 2015 - 2025 Weber Informatics LLC | Privacy Policy