All Downloads are FREE. Search and download functionalities are using the official Maven repository.

kernels.JCudaVec_kernels_float_32_cc20.ptx Maven / Gradle / Ivy

The newest version!
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-19805474
// Cuda compilation tools, release 7.5, V7.5.16
// Based on LLVM 3.4svn
//

.version 4.3
.target sm_20
.address_size 32

	// .globl	vec_setf
.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162};

.visible .entry vec_setf(
	.param .u32 vec_setf_param_0,
	.param .u32 vec_setf_param_1,
	.param .f32 vec_setf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<2>;
	.reg .b32 	%r<10>;


	ld.param.u32 	%r3, [vec_setf_param_0];
	ld.param.u32 	%r2, [vec_setf_param_1];
	ld.param.f32 	%f1, [vec_setf_param_2];
	mov.u32 	%r4, %tid.x;
	mov.u32 	%r5, %ntid.x;
	mov.u32 	%r6, %ctaid.x;
	mad.lo.s32 	%r1, %r5, %r6, %r4;
	setp.ge.u32	%p1, %r1, %r3;
	@%p1 bra 	BB0_2;

	cvta.to.global.u32 	%r7, %r2;
	shl.b32 	%r8, %r1, 2;
	add.s32 	%r9, %r7, %r8;
	st.global.f32 	[%r9], %f1;

BB0_2:
	ret;
}

	// .globl	vec_addf
.visible .entry vec_addf(
	.param .u32 vec_addf_param_0,
	.param .u32 vec_addf_param_1,
	.param .u32 vec_addf_param_2,
	.param .u32 vec_addf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_addf_param_0];
	ld.param.u32 	%r2, [vec_addf_param_1];
	ld.param.u32 	%r3, [vec_addf_param_2];
	ld.param.u32 	%r4, [vec_addf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB1_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB1_2:
	ret;
}

	// .globl	vec_subf
.visible .entry vec_subf(
	.param .u32 vec_subf_param_0,
	.param .u32 vec_subf_param_1,
	.param .u32 vec_subf_param_2,
	.param .u32 vec_subf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_subf_param_0];
	ld.param.u32 	%r2, [vec_subf_param_1];
	ld.param.u32 	%r3, [vec_subf_param_2];
	ld.param.u32 	%r4, [vec_subf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB2_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	sub.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB2_2:
	ret;
}

	// .globl	vec_mulf
.visible .entry vec_mulf(
	.param .u32 vec_mulf_param_0,
	.param .u32 vec_mulf_param_1,
	.param .u32 vec_mulf_param_2,
	.param .u32 vec_mulf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_mulf_param_0];
	ld.param.u32 	%r2, [vec_mulf_param_1];
	ld.param.u32 	%r3, [vec_mulf_param_2];
	ld.param.u32 	%r4, [vec_mulf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB3_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB3_2:
	ret;
}

	// .globl	vec_divf
.visible .entry vec_divf(
	.param .u32 vec_divf_param_0,
	.param .u32 vec_divf_param_1,
	.param .u32 vec_divf_param_2,
	.param .u32 vec_divf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_divf_param_0];
	ld.param.u32 	%r2, [vec_divf_param_1];
	ld.param.u32 	%r3, [vec_divf_param_2];
	ld.param.u32 	%r4, [vec_divf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB4_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB4_2:
	ret;
}

	// .globl	vec_negatef
.visible .entry vec_negatef(
	.param .u32 vec_negatef_param_0,
	.param .u32 vec_negatef_param_1,
	.param .u32 vec_negatef_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_negatef_param_0];
	ld.param.u32 	%r2, [vec_negatef_param_1];
	ld.param.u32 	%r3, [vec_negatef_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB5_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	neg.f32 	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB5_2:
	ret;
}

	// .globl	vec_addScalarf
.visible .entry vec_addScalarf(
	.param .u32 vec_addScalarf_param_0,
	.param .u32 vec_addScalarf_param_1,
	.param .u32 vec_addScalarf_param_2,
	.param .f32 vec_addScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_addScalarf_param_0];
	ld.param.u32 	%r2, [vec_addScalarf_param_1];
	ld.param.u32 	%r3, [vec_addScalarf_param_2];
	ld.param.f32 	%f1, [vec_addScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB6_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB6_2:
	ret;
}

	// .globl	vec_subScalarf
.visible .entry vec_subScalarf(
	.param .u32 vec_subScalarf_param_0,
	.param .u32 vec_subScalarf_param_1,
	.param .u32 vec_subScalarf_param_2,
	.param .f32 vec_subScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_subScalarf_param_0];
	ld.param.u32 	%r2, [vec_subScalarf_param_1];
	ld.param.u32 	%r3, [vec_subScalarf_param_2];
	ld.param.f32 	%f1, [vec_subScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB7_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	sub.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB7_2:
	ret;
}

	// .globl	vec_mulScalarf
.visible .entry vec_mulScalarf(
	.param .u32 vec_mulScalarf_param_0,
	.param .u32 vec_mulScalarf_param_1,
	.param .u32 vec_mulScalarf_param_2,
	.param .f32 vec_mulScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_mulScalarf_param_0];
	ld.param.u32 	%r2, [vec_mulScalarf_param_1];
	ld.param.u32 	%r3, [vec_mulScalarf_param_2];
	ld.param.f32 	%f1, [vec_mulScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB8_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB8_2:
	ret;
}

	// .globl	vec_divScalarf
.visible .entry vec_divScalarf(
	.param .u32 vec_divScalarf_param_0,
	.param .u32 vec_divScalarf_param_1,
	.param .u32 vec_divScalarf_param_2,
	.param .f32 vec_divScalarf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_divScalarf_param_0];
	ld.param.u32 	%r2, [vec_divScalarf_param_1];
	ld.param.u32 	%r3, [vec_divScalarf_param_2];
	ld.param.f32 	%f1, [vec_divScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB9_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB9_2:
	ret;
}

	// .globl	vec_scalarAddf
.visible .entry vec_scalarAddf(
	.param .u32 vec_scalarAddf_param_0,
	.param .u32 vec_scalarAddf_param_1,
	.param .f32 vec_scalarAddf_param_2,
	.param .u32 vec_scalarAddf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_scalarAddf_param_0];
	ld.param.u32 	%r2, [vec_scalarAddf_param_1];
	ld.param.f32 	%f1, [vec_scalarAddf_param_2];
	ld.param.u32 	%r3, [vec_scalarAddf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB10_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	add.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB10_2:
	ret;
}

	// .globl	vec_scalarSubf
.visible .entry vec_scalarSubf(
	.param .u32 vec_scalarSubf_param_0,
	.param .u32 vec_scalarSubf_param_1,
	.param .f32 vec_scalarSubf_param_2,
	.param .u32 vec_scalarSubf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_scalarSubf_param_0];
	ld.param.u32 	%r2, [vec_scalarSubf_param_1];
	ld.param.f32 	%f1, [vec_scalarSubf_param_2];
	ld.param.u32 	%r3, [vec_scalarSubf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB11_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	sub.f32 	%f3, %f1, %f2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB11_2:
	ret;
}

	// .globl	vec_scalarMulf
.visible .entry vec_scalarMulf(
	.param .u32 vec_scalarMulf_param_0,
	.param .u32 vec_scalarMulf_param_1,
	.param .f32 vec_scalarMulf_param_2,
	.param .u32 vec_scalarMulf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_scalarMulf_param_0];
	ld.param.u32 	%r2, [vec_scalarMulf_param_1];
	ld.param.f32 	%f1, [vec_scalarMulf_param_2];
	ld.param.u32 	%r3, [vec_scalarMulf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB12_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	mul.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB12_2:
	ret;
}

	// .globl	vec_scalarDivf
.visible .entry vec_scalarDivf(
	.param .u32 vec_scalarDivf_param_0,
	.param .u32 vec_scalarDivf_param_1,
	.param .f32 vec_scalarDivf_param_2,
	.param .u32 vec_scalarDivf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_scalarDivf_param_0];
	ld.param.u32 	%r2, [vec_scalarDivf_param_1];
	ld.param.f32 	%f1, [vec_scalarDivf_param_2];
	ld.param.u32 	%r3, [vec_scalarDivf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB13_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	div.rn.f32 	%f3, %f1, %f2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB13_2:
	ret;
}

	// .globl	vec_ltf
.visible .entry vec_ltf(
	.param .u32 vec_ltf_param_0,
	.param .u32 vec_ltf_param_1,
	.param .u32 vec_ltf_param_2,
	.param .u32 vec_ltf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_ltf_param_0];
	ld.param.u32 	%r2, [vec_ltf_param_1];
	ld.param.u32 	%r3, [vec_ltf_param_2];
	ld.param.u32 	%r4, [vec_ltf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB14_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.lt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB14_2:
	ret;
}

	// .globl	vec_ltef
.visible .entry vec_ltef(
	.param .u32 vec_ltef_param_0,
	.param .u32 vec_ltef_param_1,
	.param .u32 vec_ltef_param_2,
	.param .u32 vec_ltef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_ltef_param_0];
	ld.param.u32 	%r2, [vec_ltef_param_1];
	ld.param.u32 	%r3, [vec_ltef_param_2];
	ld.param.u32 	%r4, [vec_ltef_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB15_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB15_2:
	ret;
}

	// .globl	vec_eqf
.visible .entry vec_eqf(
	.param .u32 vec_eqf_param_0,
	.param .u32 vec_eqf_param_1,
	.param .u32 vec_eqf_param_2,
	.param .u32 vec_eqf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_eqf_param_0];
	ld.param.u32 	%r2, [vec_eqf_param_1];
	ld.param.u32 	%r3, [vec_eqf_param_2];
	ld.param.u32 	%r4, [vec_eqf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB16_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.eq.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB16_2:
	ret;
}

	// .globl	vec_gtef
.visible .entry vec_gtef(
	.param .u32 vec_gtef_param_0,
	.param .u32 vec_gtef_param_1,
	.param .u32 vec_gtef_param_2,
	.param .u32 vec_gtef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_gtef_param_0];
	ld.param.u32 	%r2, [vec_gtef_param_1];
	ld.param.u32 	%r3, [vec_gtef_param_2];
	ld.param.u32 	%r4, [vec_gtef_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB17_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.ltu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB17_2:
	ret;
}

	// .globl	vec_gtf
.visible .entry vec_gtf(
	.param .u32 vec_gtf_param_0,
	.param .u32 vec_gtf_param_1,
	.param .u32 vec_gtf_param_2,
	.param .u32 vec_gtf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_gtf_param_0];
	ld.param.u32 	%r2, [vec_gtf_param_1];
	ld.param.u32 	%r3, [vec_gtf_param_2];
	ld.param.u32 	%r4, [vec_gtf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB18_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.gt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB18_2:
	ret;
}

	// .globl	vec_nef
.visible .entry vec_nef(
	.param .u32 vec_nef_param_0,
	.param .u32 vec_nef_param_1,
	.param .u32 vec_nef_param_2,
	.param .u32 vec_nef_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_nef_param_0];
	ld.param.u32 	%r2, [vec_nef_param_1];
	ld.param.u32 	%r3, [vec_nef_param_2];
	ld.param.u32 	%r4, [vec_nef_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB19_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	setp.neu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB19_2:
	ret;
}

	// .globl	vec_ltScalarf
.visible .entry vec_ltScalarf(
	.param .u32 vec_ltScalarf_param_0,
	.param .u32 vec_ltScalarf_param_1,
	.param .u32 vec_ltScalarf_param_2,
	.param .f32 vec_ltScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_ltScalarf_param_0];
	ld.param.u32 	%r2, [vec_ltScalarf_param_1];
	ld.param.u32 	%r3, [vec_ltScalarf_param_2];
	ld.param.f32 	%f1, [vec_ltScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB20_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.lt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB20_2:
	ret;
}

	// .globl	vec_lteScalarf
.visible .entry vec_lteScalarf(
	.param .u32 vec_lteScalarf_param_0,
	.param .u32 vec_lteScalarf_param_1,
	.param .u32 vec_lteScalarf_param_2,
	.param .f32 vec_lteScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_lteScalarf_param_0];
	ld.param.u32 	%r2, [vec_lteScalarf_param_1];
	ld.param.u32 	%r3, [vec_lteScalarf_param_2];
	ld.param.f32 	%f1, [vec_lteScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB21_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB21_2:
	ret;
}

	// .globl	vec_eqScalarf
.visible .entry vec_eqScalarf(
	.param .u32 vec_eqScalarf_param_0,
	.param .u32 vec_eqScalarf_param_1,
	.param .u32 vec_eqScalarf_param_2,
	.param .f32 vec_eqScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_eqScalarf_param_0];
	ld.param.u32 	%r2, [vec_eqScalarf_param_1];
	ld.param.u32 	%r3, [vec_eqScalarf_param_2];
	ld.param.f32 	%f1, [vec_eqScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB22_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.eq.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB22_2:
	ret;
}

	// .globl	vec_gteScalarf
.visible .entry vec_gteScalarf(
	.param .u32 vec_gteScalarf_param_0,
	.param .u32 vec_gteScalarf_param_1,
	.param .u32 vec_gteScalarf_param_2,
	.param .f32 vec_gteScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_gteScalarf_param_0];
	ld.param.u32 	%r2, [vec_gteScalarf_param_1];
	ld.param.u32 	%r3, [vec_gteScalarf_param_2];
	ld.param.f32 	%f1, [vec_gteScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB23_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.ltu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f00000000, 0f3F800000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB23_2:
	ret;
}

	// .globl	vec_gtScalarf
.visible .entry vec_gtScalarf(
	.param .u32 vec_gtScalarf_param_0,
	.param .u32 vec_gtScalarf_param_1,
	.param .u32 vec_gtScalarf_param_2,
	.param .f32 vec_gtScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_gtScalarf_param_0];
	ld.param.u32 	%r2, [vec_gtScalarf_param_1];
	ld.param.u32 	%r3, [vec_gtScalarf_param_2];
	ld.param.f32 	%f1, [vec_gtScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB24_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.gt.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB24_2:
	ret;
}

	// .globl	vec_neScalarf
.visible .entry vec_neScalarf(
	.param .u32 vec_neScalarf_param_0,
	.param .u32 vec_neScalarf_param_1,
	.param .u32 vec_neScalarf_param_2,
	.param .f32 vec_neScalarf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_neScalarf_param_0];
	ld.param.u32 	%r2, [vec_neScalarf_param_1];
	ld.param.u32 	%r3, [vec_neScalarf_param_2];
	ld.param.f32 	%f1, [vec_neScalarf_param_3];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB25_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f2, [%r10];
	setp.neu.f32	%p2, %f2, %f1;
	selp.f32	%f3, 0f3F800000, 0f00000000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f3;

BB25_2:
	ret;
}

	// .globl	vec_acosf
.visible .entry vec_acosf(
	.param .u32 vec_acosf_param_0,
	.param .u32 vec_acosf_param_1,
	.param .u32 vec_acosf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<27>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_acosf_param_0];
	ld.param.u32 	%r2, [vec_acosf_param_1];
	ld.param.u32 	%r3, [vec_acosf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB26_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	mov.f32 	%f3, 0f3F800000;
	sub.f32 	%f4, %f3, %f2;
	mul.f32 	%f5, %f4, 0f3F000000;
	sqrt.rn.f32 	%f6, %f5;
	setp.gt.f32	%p2, %f2, 0f3F11EB85;
	selp.f32	%f7, %f6, %f2, %p2;
	mul.f32 	%f8, %f7, %f7;
	mov.f32 	%f9, 0f3C94D2E9;
	mov.f32 	%f10, 0f3D53F941;
	fma.rn.f32 	%f11, %f10, %f8, %f9;
	mov.f32 	%f12, 0f3D3F841F;
	fma.rn.f32 	%f13, %f11, %f8, %f12;
	mov.f32 	%f14, 0f3D994929;
	fma.rn.f32 	%f15, %f13, %f8, %f14;
	mov.f32 	%f16, 0f3E2AAB94;
	fma.rn.f32 	%f17, %f15, %f8, %f16;
	mul.f32 	%f18, %f8, %f17;
	fma.rn.f32 	%f19, %f18, %f7, %f7;
	add.f32 	%f20, %f19, %f19;
	mov.f32 	%f21, 0f3FC90FDB;
	sub.f32 	%f22, %f21, %f19;
	selp.f32	%f23, %f20, %f22, %p2;
	setp.lt.f32	%p3, %f1, 0f00000000;
	mov.f32 	%f24, 0f40490FDB;
	sub.f32 	%f25, %f24, %f23;
	selp.f32	%f26, %f25, %f23, %p3;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f26;

BB26_2:
	ret;
}

	// .globl	vec_acoshf
.visible .entry vec_acoshf(
	.param .u32 vec_acoshf_param_0,
	.param .u32 vec_acoshf_param_1,
	.param .u32 vec_acoshf_param_2
)
{
	.reg .pred 	%p<16>;
	.reg .f32 	%f<85>;
	.reg .b32 	%r<23>;


	ld.param.u32 	%r4, [vec_acoshf_param_0];
	ld.param.u32 	%r2, [vec_acoshf_param_1];
	ld.param.u32 	%r3, [vec_acoshf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB27_12;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	add.f32 	%f2, %f1, 0fBF800000;
	mov.b32 	 %r11, %f2;
	setp.gt.u32	%p2, %r11, 1258291200;
	@%p2 bra 	BB27_7;
	bra.uni 	BB27_2;

BB27_7:
	setp.gt.f32	%p11, %f2, 0f00000000;
	setp.lt.f32	%p12, %f2, 0f7F800000;
	and.pred  	%p13, %p11, %p12;
	@%p13 bra 	BB27_9;
	bra.uni 	BB27_8;

BB27_9:
	setp.lt.f32	%p14, %f2, 0f00800000;
	mul.f32 	%f58, %f2, 0f4B800000;
	selp.f32	%f59, %f58, %f2, %p14;
	selp.f32	%f60, 0fC3170000, 0fC2FE0000, %p14;
	mov.b32 	 %r16, %f59;
	and.b32  	%r17, %r16, 8388607;
	or.b32  	%r18, %r17, 1065353216;
	mov.b32 	 %f61, %r18;
	shr.u32 	%r19, %r16, 23;
	cvt.rn.f32.u32	%f62, %r19;
	add.f32 	%f63, %f60, %f62;
	setp.gt.f32	%p15, %f61, 0f3FAE147B;
	mul.f32 	%f64, %f61, 0f3F000000;
	add.f32 	%f65, %f63, 0f3F800000;
	selp.f32	%f66, %f64, %f61, %p15;
	selp.f32	%f67, %f65, %f63, %p15;
	add.f32 	%f57, %f66, 0f3F800000;
	add.f32 	%f68, %f66, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f56,%f57;
	// inline asm
	mul.f32 	%f69, %f68, %f68;
	neg.f32 	%f70, %f69;
	mul.rn.f32 	%f71, %f56, %f70;
	add.rn.f32 	%f72, %f68, %f71;
	mul.f32 	%f73, %f72, %f72;
	mov.f32 	%f74, 0f3C4C6A36;
	mov.f32 	%f75, 0f3B1E94E6;
	fma.rn.f32 	%f76, %f75, %f73, %f74;
	mov.f32 	%f77, 0f3DAAAB1A;
	fma.rn.f32 	%f78, %f76, %f73, %f77;
	mul.f32 	%f79, %f73, %f78;
	fma.rn.f32 	%f80, %f79, %f72, %f71;
	add.f32 	%f81, %f68, %f80;
	mov.f32 	%f82, 0f3F317218;
	fma.rn.f32 	%f83, %f67, %f82, %f81;
	bra.uni 	BB27_10;

BB27_2:
	mul.rz.f32 	%f13, %f1, %f2;
	add.rn.f32 	%f14, %f13, %f2;
	sqrt.rn.f32 	%f15, %f14;
	add.f32 	%f3, %f2, %f15;
	setp.le.f32	%p3, %f3, 0f3F266666;
	setp.ge.f32	%p4, %f3, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB27_6;
	bra.uni 	BB27_3;

BB27_6:
	add.f32 	%f43, %f3, 0f40000000;
	div.approx.f32 	%f44, %f3, %f43;
	neg.f32 	%f45, %f3;
	mul.rn.f32 	%f46, %f45, %f44;
	add.rn.f32 	%f47, %f3, %f46;
	mul.f32 	%f48, %f47, %f47;
	mov.f32 	%f49, 0f3C4C4BE0;
	mov.f32 	%f50, 0f3B2063C3;
	fma.rn.f32 	%f51, %f50, %f48, %f49;
	mov.f32 	%f52, 0f3DAAAB50;
	fma.rn.f32 	%f53, %f51, %f48, %f52;
	mul.f32 	%f54, %f48, %f53;
	fma.rn.f32 	%f55, %f54, %f47, %f46;
	add.f32 	%f84, %f3, %f55;
	bra.uni 	BB27_11;

BB27_8:
	lg2.approx.f32 	%f83, %f2;

BB27_10:
	add.f32 	%f84, %f83, 0f3F317218;
	bra.uni 	BB27_11;

BB27_3:
	add.f32 	%f4, %f3, 0f3F800000;
	setp.gt.f32	%p6, %f4, 0f00000000;
	setp.lt.f32	%p7, %f4, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB27_5;
	bra.uni 	BB27_4;

BB27_5:
	setp.lt.f32	%p9, %f4, 0f00800000;
	mul.f32 	%f18, %f4, 0f4B800000;
	selp.f32	%f19, %f18, %f4, %p9;
	selp.f32	%f20, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r12, %f19;
	and.b32  	%r13, %r12, 8388607;
	or.b32  	%r14, %r13, 1065353216;
	mov.b32 	 %f21, %r14;
	shr.u32 	%r15, %r12, 23;
	cvt.rn.f32.u32	%f22, %r15;
	add.f32 	%f23, %f20, %f22;
	setp.gt.f32	%p10, %f21, 0f3FAE147B;
	mul.f32 	%f24, %f21, 0f3F000000;
	add.f32 	%f25, %f23, 0f3F800000;
	selp.f32	%f26, %f24, %f21, %p10;
	selp.f32	%f27, %f25, %f23, %p10;
	add.f32 	%f17, %f26, 0f3F800000;
	add.f32 	%f28, %f26, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f16,%f17;
	// inline asm
	mul.f32 	%f29, %f28, %f28;
	neg.f32 	%f30, %f29;
	mul.rn.f32 	%f31, %f16, %f30;
	add.rn.f32 	%f32, %f28, %f31;
	mul.f32 	%f33, %f32, %f32;
	mov.f32 	%f34, 0f3C4C6A36;
	mov.f32 	%f35, 0f3B1E94E6;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0f3DAAAB1A;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mul.f32 	%f39, %f33, %f38;
	fma.rn.f32 	%f40, %f39, %f32, %f31;
	add.f32 	%f41, %f28, %f40;
	mov.f32 	%f42, 0f3F317218;
	fma.rn.f32 	%f84, %f27, %f42, %f41;
	bra.uni 	BB27_11;

BB27_4:
	lg2.approx.f32 	%f84, %f4;

BB27_11:
	cvta.to.global.u32 	%r20, %r2;
	add.s32 	%r22, %r20, %r9;
	st.global.f32 	[%r22], %f84;

BB27_12:
	ret;
}

	// .globl	vec_asinf
.visible .entry vec_asinf(
	.param .u32 vec_asinf_param_0,
	.param .u32 vec_asinf_param_1,
	.param .u32 vec_asinf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<26>;
	.reg .b32 	%r<17>;


	ld.param.u32 	%r4, [vec_asinf_param_0];
	ld.param.u32 	%r2, [vec_asinf_param_1];
	ld.param.u32 	%r3, [vec_asinf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB28_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	mov.f32 	%f3, 0f3F800000;
	sub.f32 	%f4, %f3, %f2;
	mul.f32 	%f5, %f4, 0f3F000000;
	sqrt.rn.f32 	%f6, %f5;
	setp.gt.f32	%p2, %f2, 0f3F11EB85;
	selp.f32	%f7, %f6, %f2, %p2;
	mul.f32 	%f8, %f7, %f7;
	mov.f32 	%f9, 0f3C94D2E9;
	mov.f32 	%f10, 0f3D53F941;
	fma.rn.f32 	%f11, %f10, %f8, %f9;
	mov.f32 	%f12, 0f3D3F841F;
	fma.rn.f32 	%f13, %f11, %f8, %f12;
	mov.f32 	%f14, 0f3D994929;
	fma.rn.f32 	%f15, %f13, %f8, %f14;
	mov.f32 	%f16, 0f3E2AAB94;
	fma.rn.f32 	%f17, %f15, %f8, %f16;
	mul.f32 	%f18, %f8, %f17;
	fma.rn.f32 	%f19, %f18, %f7, %f7;
	mov.f32 	%f20, 0f3FC90FDB;
	mov.f32 	%f21, 0fC0000000;
	fma.rn.f32 	%f22, %f21, %f19, %f20;
	selp.f32	%f23, %f22, %f19, %p2;
	setp.gtu.f32	%p3, %f23, 0f7F800000;
	mov.b32 	 %r11, %f23;
	mov.b32 	 %r12, %f1;
	and.b32  	%r13, %r12, -2147483648;
	or.b32  	%r14, %r11, %r13;
	mov.b32 	 %f24, %r14;
	selp.f32	%f25, %f23, %f24, %p3;
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r16, %r15, %r9;
	st.global.f32 	[%r16], %f25;

BB28_2:
	ret;
}

	// .globl	vec_asinhf
.visible .entry vec_asinhf(
	.param .u32 vec_asinhf_param_0,
	.param .u32 vec_asinhf_param_1,
	.param .u32 vec_asinhf_param_2
)
{
	.reg .pred 	%p<12>;
	.reg .f32 	%f<62>;
	.reg .b32 	%r<22>;


	ld.param.u32 	%r4, [vec_asinhf_param_0];
	ld.param.u32 	%r2, [vec_asinhf_param_1];
	ld.param.u32 	%r3, [vec_asinhf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB29_9;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.gt.f32	%p2, %f2, 0f7E800000;
	@%p2 bra 	BB29_7;
	bra.uni 	BB29_2;

BB29_7:
	lg2.approx.f32 	%f56, %f2;
	mul.f32 	%f57, %f56, 0f3F317218;
	mov.f32 	%f58, 0f3F317218;
	add.rn.f32 	%f61, %f58, %f57;
	bra.uni 	BB29_8;

BB29_2:
	rcp.rn.f32 	%f12, %f2;
	mov.f32 	%f13, 0f3F800000;
	fma.rn.f32 	%f14, %f12, %f12, %f13;
	sqrt.rn.f32 	%f15, %f14;
	add.f32 	%f11, %f12, %f15;
	// inline asm
	rcp.approx.ftz.f32 %f10,%f11;
	// inline asm
	fma.rn.f32 	%f3, %f2, %f10, %f2;
	setp.le.f32	%p3, %f3, 0f3F266666;
	setp.ge.f32	%p4, %f3, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB29_6;
	bra.uni 	BB29_3;

BB29_6:
	add.f32 	%f43, %f3, 0f40000000;
	div.approx.f32 	%f44, %f3, %f43;
	neg.f32 	%f45, %f3;
	mul.rn.f32 	%f46, %f45, %f44;
	add.rn.f32 	%f47, %f3, %f46;
	mul.f32 	%f48, %f47, %f47;
	mov.f32 	%f49, 0f3C4C4BE0;
	mov.f32 	%f50, 0f3B2063C3;
	fma.rn.f32 	%f51, %f50, %f48, %f49;
	mov.f32 	%f52, 0f3DAAAB50;
	fma.rn.f32 	%f53, %f51, %f48, %f52;
	mul.f32 	%f54, %f48, %f53;
	fma.rn.f32 	%f55, %f54, %f47, %f46;
	add.f32 	%f61, %f3, %f55;
	bra.uni 	BB29_8;

BB29_3:
	add.f32 	%f4, %f3, 0f3F800000;
	setp.gt.f32	%p6, %f4, 0f00000000;
	setp.lt.f32	%p7, %f4, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB29_5;
	bra.uni 	BB29_4;

BB29_5:
	setp.lt.f32	%p9, %f4, 0f00800000;
	mul.f32 	%f18, %f4, 0f4B800000;
	selp.f32	%f19, %f18, %f4, %p9;
	selp.f32	%f20, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r11, %f19;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f21, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f22, %r14;
	add.f32 	%f23, %f20, %f22;
	setp.gt.f32	%p10, %f21, 0f3FAE147B;
	mul.f32 	%f24, %f21, 0f3F000000;
	add.f32 	%f25, %f23, 0f3F800000;
	selp.f32	%f26, %f24, %f21, %p10;
	selp.f32	%f27, %f25, %f23, %p10;
	add.f32 	%f17, %f26, 0f3F800000;
	add.f32 	%f28, %f26, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f16,%f17;
	// inline asm
	mul.f32 	%f29, %f28, %f28;
	neg.f32 	%f30, %f29;
	mul.rn.f32 	%f31, %f16, %f30;
	add.rn.f32 	%f32, %f28, %f31;
	mul.f32 	%f33, %f32, %f32;
	mov.f32 	%f34, 0f3C4C6A36;
	mov.f32 	%f35, 0f3B1E94E6;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0f3DAAAB1A;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mul.f32 	%f39, %f33, %f38;
	fma.rn.f32 	%f40, %f39, %f32, %f31;
	add.f32 	%f41, %f28, %f40;
	mov.f32 	%f42, 0f3F317218;
	fma.rn.f32 	%f61, %f27, %f42, %f41;
	bra.uni 	BB29_8;

BB29_4:
	lg2.approx.f32 	%f61, %f4;

BB29_8:
	cvta.to.global.u32 	%r15, %r2;
	mov.b32 	 %r16, %f1;
	and.b32  	%r17, %r16, -2147483648;
	mov.b32 	 %r18, %f61;
	or.b32  	%r19, %r18, %r17;
	mov.b32 	 %f59, %r19;
	setp.gtu.f32	%p11, %f2, 0f7F800000;
	selp.f32	%f60, %f61, %f59, %p11;
	add.s32 	%r21, %r15, %r9;
	st.global.f32 	[%r21], %f60;

BB29_9:
	ret;
}

	// .globl	vec_atanf
.visible .entry vec_atanf(
	.param .u32 vec_atanf_param_0,
	.param .u32 vec_atanf_param_1,
	.param .u32 vec_atanf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<26>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_atanf_param_0];
	ld.param.u32 	%r2, [vec_atanf_param_1];
	ld.param.u32 	%r3, [vec_atanf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB30_4;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.leu.f32	%p2, %f2, 0f3F800000;
	mov.f32 	%f25, %f2;
	@%p2 bra 	BB30_3;

	rcp.rn.f32 	%f3, %f2;
	mov.f32 	%f25, %f3;

BB30_3:
	mov.f32 	%f4, %f25;
	cvta.to.global.u32 	%r11, %r2;
	mul.rn.f32 	%f5, %f4, %f4;
	mov.f32 	%f6, 0fC0B59883;
	mov.f32 	%f7, 0fBF52C7EA;
	fma.rn.f32 	%f8, %f5, %f7, %f6;
	mov.f32 	%f9, 0fC0D21907;
	fma.rn.f32 	%f10, %f8, %f5, %f9;
	mul.f32 	%f11, %f5, %f10;
	mul.f32 	%f12, %f4, %f11;
	add.f32 	%f13, %f5, 0f41355DC0;
	mov.f32 	%f14, 0f41E6BD60;
	fma.rn.f32 	%f15, %f13, %f5, %f14;
	mov.f32 	%f16, 0f419D92C8;
	fma.rn.f32 	%f17, %f15, %f5, %f16;
	rcp.rn.f32 	%f18, %f17;
	fma.rn.f32 	%f19, %f12, %f18, %f4;
	mov.f32 	%f20, 0f3FC90FDB;
	sub.f32 	%f21, %f20, %f19;
	setp.gt.f32	%p3, %f2, 0f3F800000;
	selp.f32	%f22, %f21, %f19, %p3;
	mov.b32 	 %r12, %f22;
	mov.b32 	 %r13, %f1;
	and.b32  	%r14, %r13, -2147483648;
	or.b32  	%r15, %r12, %r14;
	mov.b32 	 %f23, %r15;
	setp.gtu.f32	%p4, %f2, 0f7F800000;
	selp.f32	%f24, %f22, %f23, %p4;
	add.s32 	%r17, %r11, %r9;
	st.global.f32 	[%r17], %f24;

BB30_4:
	ret;
}

	// .globl	vec_atanhf
.visible .entry vec_atanhf(
	.param .u32 vec_atanhf_param_0,
	.param .u32 vec_atanhf_param_1,
	.param .u32 vec_atanhf_param_2
)
{
	.reg .pred 	%p<12>;
	.reg .f32 	%f<59>;
	.reg .b32 	%r<22>;


	ld.param.u32 	%r4, [vec_atanhf_param_0];
	ld.param.u32 	%r2, [vec_atanhf_param_1];
	ld.param.u32 	%r3, [vec_atanhf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB31_7;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f10, %f1;
	mov.f32 	%f11, 0f3F800000;
	sub.f32 	%f9, %f11, %f10;
	// inline asm
	rcp.approx.ftz.f32 %f8,%f9;
	// inline asm
	add.f32 	%f12, %f8, %f8;
	mul.f32 	%f13, %f10, %f12;
	setp.gt.f32	%p2, %f10, 0f7E800000;
	selp.f32	%f2, 0fC0000000, %f13, %p2;
	setp.le.f32	%p3, %f2, 0f3F266666;
	setp.ge.f32	%p4, %f2, 0fBEC9BA5E;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB31_5;
	bra.uni 	BB31_2;

BB31_5:
	add.f32 	%f41, %f2, 0f40000000;
	div.approx.f32 	%f42, %f2, %f41;
	neg.f32 	%f43, %f2;
	mul.rn.f32 	%f44, %f43, %f42;
	add.rn.f32 	%f45, %f2, %f44;
	mul.f32 	%f46, %f45, %f45;
	mov.f32 	%f47, 0f3C4C4BE0;
	mov.f32 	%f48, 0f3B2063C3;
	fma.rn.f32 	%f49, %f48, %f46, %f47;
	mov.f32 	%f50, 0f3DAAAB50;
	fma.rn.f32 	%f51, %f49, %f46, %f50;
	mul.f32 	%f52, %f46, %f51;
	fma.rn.f32 	%f53, %f52, %f45, %f44;
	add.f32 	%f58, %f2, %f53;
	bra.uni 	BB31_6;

BB31_2:
	add.f32 	%f3, %f2, 0f3F800000;
	setp.gt.f32	%p6, %f3, 0f00000000;
	setp.lt.f32	%p7, %f3, 0f7F800000;
	and.pred  	%p8, %p6, %p7;
	@%p8 bra 	BB31_4;
	bra.uni 	BB31_3;

BB31_4:
	setp.lt.f32	%p9, %f3, 0f00800000;
	mul.f32 	%f16, %f3, 0f4B800000;
	selp.f32	%f17, %f16, %f3, %p9;
	selp.f32	%f18, 0fC3170000, 0fC2FE0000, %p9;
	mov.b32 	 %r11, %f17;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f19, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f20, %r14;
	add.f32 	%f21, %f18, %f20;
	setp.gt.f32	%p10, %f19, 0f3FAE147B;
	mul.f32 	%f22, %f19, 0f3F000000;
	add.f32 	%f23, %f21, 0f3F800000;
	selp.f32	%f24, %f22, %f19, %p10;
	selp.f32	%f25, %f23, %f21, %p10;
	add.f32 	%f15, %f24, 0f3F800000;
	add.f32 	%f26, %f24, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f14,%f15;
	// inline asm
	mul.f32 	%f27, %f26, %f26;
	neg.f32 	%f28, %f27;
	mul.rn.f32 	%f29, %f14, %f28;
	add.rn.f32 	%f30, %f26, %f29;
	mul.f32 	%f31, %f30, %f30;
	mov.f32 	%f32, 0f3C4C6A36;
	mov.f32 	%f33, 0f3B1E94E6;
	fma.rn.f32 	%f34, %f33, %f31, %f32;
	mov.f32 	%f35, 0f3DAAAB1A;
	fma.rn.f32 	%f36, %f34, %f31, %f35;
	mul.f32 	%f37, %f31, %f36;
	fma.rn.f32 	%f38, %f37, %f30, %f29;
	add.f32 	%f39, %f26, %f38;
	mov.f32 	%f40, 0f3F317218;
	fma.rn.f32 	%f58, %f25, %f40, %f39;
	bra.uni 	BB31_6;

BB31_3:
	lg2.approx.f32 	%f58, %f3;

BB31_6:
	cvta.to.global.u32 	%r15, %r2;
	mul.f32 	%f54, %f58, 0f3F000000;
	abs.f32 	%f55, %f54;
	setp.gtu.f32	%p11, %f55, 0f7F800000;
	mov.b32 	 %r16, %f54;
	mov.b32 	 %r17, %f1;
	and.b32  	%r18, %r17, -2147483648;
	or.b32  	%r19, %r16, %r18;
	mov.b32 	 %f56, %r19;
	selp.f32	%f57, %f54, %f56, %p11;
	add.s32 	%r21, %r15, %r9;
	st.global.f32 	[%r21], %f57;

BB31_7:
	ret;
}

	// .globl	vec_cbrtf
.visible .entry vec_cbrtf(
	.param .u32 vec_cbrtf_param_0,
	.param .u32 vec_cbrtf_param_1,
	.param .u32 vec_cbrtf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<16>;
	.reg .b32 	%r<14>;


	ld.param.u32 	%r4, [vec_cbrtf_param_0];
	ld.param.u32 	%r2, [vec_cbrtf_param_1];
	ld.param.u32 	%r3, [vec_cbrtf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB32_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f5, [%r10];
	abs.f32 	%f6, %f5;
	lg2.approx.f32 	%f7, %f6;
	mul.f32 	%f2, %f7, 0f3EAAAAAB;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	mul.f32 	%f4, %f1, %f1;
	// inline asm
	rcp.approx.ftz.f32 %f3,%f4;
	// inline asm
	neg.f32 	%f8, %f6;
	fma.rn.f32 	%f9, %f3, %f8, %f1;
	mov.f32 	%f10, 0fBEAAAAAB;
	fma.rn.f32 	%f11, %f9, %f10, %f1;
	mov.b32 	 %r11, %f5;
	setp.lt.s32	%p2, %r11, 0;
	neg.f32 	%f12, %f11;
	selp.f32	%f13, %f12, %f11, %p2;
	add.f32 	%f14, %f5, %f5;
	setp.eq.f32	%p3, %f14, %f5;
	selp.f32	%f15, %f14, %f13, %p3;
	cvta.to.global.u32 	%r12, %r2;
	add.s32 	%r13, %r12, %r9;
	st.global.f32 	[%r13], %f15;

BB32_2:
	ret;
}

	// .globl	vec_ceilf
.visible .entry vec_ceilf(
	.param .u32 vec_ceilf_param_0,
	.param .u32 vec_ceilf_param_1,
	.param .u32 vec_ceilf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_ceilf_param_0];
	ld.param.u32 	%r2, [vec_ceilf_param_1];
	ld.param.u32 	%r3, [vec_ceilf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB33_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	cvt.rpi.f32.f32	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB33_2:
	ret;
}

	// .globl	vec_cosf
.visible .entry vec_cosf(
	.param .u32 vec_cosf_param_0,
	.param .u32 vec_cosf_param_1,
	.param .u32 vec_cosf_param_2
)
{
	.local .align 4 .b8 	__local_depot34[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<15>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<120>;


	mov.u32 	%r119, __local_depot34;
	cvta.local.u32 	%SP, %r119;
	ld.param.u32 	%r47, [vec_cosf_param_0];
	ld.param.u32 	%r45, [vec_cosf_param_1];
	ld.param.u32 	%r46, [vec_cosf_param_2];
	add.u32 	%r48, %SP, 0;
	cvta.to.local.u32 	%r1, %r48;
	mov.u32 	%r49, %ntid.x;
	mov.u32 	%r50, %ctaid.x;
	mov.u32 	%r51, %tid.x;
	mad.lo.s32 	%r2, %r49, %r50, %r51;
	setp.ge.u32	%p1, %r2, %r47;
	@%p1 bra 	BB34_24;

	cvta.to.global.u32 	%r52, %r46;
	shl.b32 	%r53, %r2, 2;
	add.s32 	%r54, %r52, %r53;
	ld.global.f32 	%f43, [%r54];
	abs.f32 	%f19, %f43;
	setp.neu.f32	%p2, %f19, 0f7F800000;
	@%p2 bra 	BB34_3;

	mov.f32 	%f20, 0f00000000;
	mul.rn.f32 	%f43, %f43, %f20;

BB34_3:
	mul.f32 	%f21, %f43, 0f3F22F983;
	cvt.rni.s32.f32	%r118, %f21;
	cvt.rn.f32.s32	%f22, %r118;
	neg.f32 	%f23, %f22;
	mov.f32 	%f24, 0f3FC90FDA;
	fma.rn.f32 	%f25, %f23, %f24, %f43;
	mov.f32 	%f26, 0f33A22168;
	fma.rn.f32 	%f27, %f23, %f26, %f25;
	mov.f32 	%f28, 0f27C234C5;
	fma.rn.f32 	%f44, %f23, %f28, %f27;
	abs.f32 	%f29, %f43;
	setp.leu.f32	%p3, %f29, 0f47CE4780;
	@%p3 bra 	BB34_13;

	mov.b32 	 %r4, %f43;
	shr.u32 	%r5, %r4, 23;
	bfe.u32 	%r58, %r4, 23, 8;
	add.s32 	%r59, %r58, -128;
	shl.b32 	%r60, %r4, 8;
	or.b32  	%r6, %r60, -2147483648;
	shr.u32 	%r7, %r59, 5;
	mov.u32 	%r110, 0;
	mov.u32 	%r108, 6;
	mov.u32 	%r107, __cudart_i2opi_f;
	mov.u32 	%r109, %r1;

BB34_5:
	.pragma "nounroll";
	mov.u32 	%r10, %r109;
	ld.const.u32 	%r63, [%r107];
	// inline asm
	{
	mad.lo.cc.u32   %r61, %r63, %r6, %r110;
	madc.hi.u32     %r110, %r63, %r6,  0;
	}
	// inline asm
	st.local.u32 	[%r10], %r61;
	add.s32 	%r13, %r10, 4;
	add.s32 	%r107, %r107, 4;
	add.s32 	%r108, %r108, -1;
	setp.ne.s32	%p4, %r108, 0;
	mov.u32 	%r109, %r13;
	@%p4 bra 	BB34_5;

	and.b32  	%r16, %r4, -2147483648;
	mov.u32 	%r66, 4;
	sub.s32 	%r67, %r66, %r7;
	shl.b32 	%r68, %r67, 2;
	add.s32 	%r69, %r68, %r1;
	st.local.u32 	[%r1+24], %r110;
	ld.local.u32 	%r111, [%r69+8];
	ld.local.u32 	%r112, [%r69+4];
	and.b32  	%r20, %r5, 31;
	setp.eq.s32	%p5, %r20, 0;
	@%p5 bra 	BB34_8;

	mov.u32 	%r70, 32;
	sub.s32 	%r71, %r70, %r20;
	shr.u32 	%r72, %r112, %r71;
	shl.b32 	%r73, %r111, %r20;
	add.s32 	%r111, %r72, %r73;
	add.s32 	%r106, %r69, 8;
	ld.local.u32 	%r74, [%r106+-8];
	shr.u32 	%r75, %r74, %r71;
	shl.b32 	%r76, %r112, %r20;
	add.s32 	%r112, %r75, %r76;

BB34_8:
	shr.u32 	%r77, %r112, 30;
	shl.b32 	%r78, %r111, 2;
	add.s32 	%r113, %r77, %r78;
	shl.b32 	%r26, %r112, 2;
	shr.u32 	%r79, %r113, 31;
	shr.u32 	%r80, %r111, 30;
	add.s32 	%r27, %r79, %r80;
	setp.eq.s32	%p6, %r79, 0;
	mov.u32 	%r114, %r16;
	mov.u32 	%r115, %r26;
	@%p6 bra 	BB34_10;

	not.b32 	%r81, %r113;
	neg.s32 	%r28, %r26;
	setp.eq.s32	%p7, %r26, 0;
	selp.u32	%r82, 1, 0, %p7;
	add.s32 	%r113, %r82, %r81;
	xor.b32  	%r30, %r16, -2147483648;
	mov.u32 	%r114, %r30;
	mov.u32 	%r115, %r28;

BB34_10:
	mov.u32 	%r32, %r114;
	neg.s32 	%r83, %r27;
	setp.eq.s32	%p8, %r16, 0;
	selp.b32	%r118, %r27, %r83, %p8;
	clz.b32 	%r117, %r113;
	setp.eq.s32	%p9, %r117, 0;
	shl.b32 	%r84, %r113, %r117;
	mov.u32 	%r85, 32;
	sub.s32 	%r86, %r85, %r117;
	shr.u32 	%r87, %r115, %r86;
	add.s32 	%r88, %r87, %r84;
	selp.b32	%r36, %r113, %r88, %p9;
	mov.u32 	%r89, -921707870;
	mul.hi.u32 	%r116, %r36, %r89;
	setp.lt.s32	%p10, %r116, 1;
	@%p10 bra 	BB34_12;

	mul.lo.s32 	%r90, %r36, -921707870;
	shr.u32 	%r91, %r90, 31;
	shl.b32 	%r92, %r116, 1;
	add.s32 	%r116, %r91, %r92;
	add.s32 	%r117, %r117, 1;

BB34_12:
	mov.u32 	%r93, 126;
	sub.s32 	%r94, %r93, %r117;
	shl.b32 	%r95, %r94, 23;
	add.s32 	%r96, %r116, 1;
	shr.u32 	%r97, %r96, 7;
	add.s32 	%r98, %r97, 1;
	shr.u32 	%r99, %r98, 1;
	add.s32 	%r100, %r99, %r95;
	or.b32  	%r101, %r100, %r32;
	mov.b32 	 %f44, %r101;

BB34_13:
	mul.rn.f32 	%f7, %f44, %f44;
	add.s32 	%r43, %r118, 1;
	and.b32  	%r44, %r43, 1;
	setp.eq.s32	%p11, %r44, 0;
	@%p11 bra 	BB34_15;

	mov.f32 	%f30, 0fBAB6061A;
	mov.f32 	%f31, 0f37CCF5CE;
	fma.rn.f32 	%f45, %f31, %f7, %f30;
	bra.uni 	BB34_16;

BB34_15:
	mov.f32 	%f32, 0f3C08839E;
	mov.f32 	%f33, 0fB94CA1F9;
	fma.rn.f32 	%f45, %f33, %f7, %f32;

BB34_16:
	@%p11 bra 	BB34_18;

	mov.f32 	%f34, 0f3D2AAAA5;
	fma.rn.f32 	%f35, %f45, %f7, %f34;
	mov.f32 	%f36, 0fBF000000;
	fma.rn.f32 	%f46, %f35, %f7, %f36;
	bra.uni 	BB34_19;

BB34_18:
	mov.f32 	%f37, 0fBE2AAAA3;
	fma.rn.f32 	%f38, %f45, %f7, %f37;
	mov.f32 	%f39, 0f00000000;
	fma.rn.f32 	%f46, %f38, %f7, %f39;

BB34_19:
	fma.rn.f32 	%f47, %f46, %f44, %f44;
	@%p11 bra 	BB34_21;

	mov.f32 	%f40, 0f3F800000;
	fma.rn.f32 	%f47, %f46, %f7, %f40;

BB34_21:
	and.b32  	%r102, %r43, 2;
	setp.eq.s32	%p14, %r102, 0;
	@%p14 bra 	BB34_23;

	mov.f32 	%f41, 0f00000000;
	mov.f32 	%f42, 0fBF800000;
	fma.rn.f32 	%f47, %f47, %f42, %f41;

BB34_23:
	cvta.to.global.u32 	%r103, %r45;
	add.s32 	%r105, %r103, %r53;
	st.global.f32 	[%r105], %f47;

BB34_24:
	ret;
}

	// .globl	vec_coshf
.visible .entry vec_coshf(
	.param .u32 vec_coshf_param_0,
	.param .u32 vec_coshf_param_1,
	.param .u32 vec_coshf_param_2
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<19>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_coshf_param_0];
	ld.param.u32 	%r2, [vec_coshf_param_1];
	ld.param.u32 	%r3, [vec_coshf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB35_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f3, [%r10];
	abs.f32 	%f4, %f3;
	mul.f32 	%f5, %f4, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f6, %f5;
	mov.f32 	%f7, 0fBF317200;
	fma.rn.f32 	%f8, %f6, %f7, %f4;
	mov.f32 	%f9, 0fB5BFBE8E;
	fma.rn.f32 	%f10, %f6, %f9, %f8;
	mul.f32 	%f2, %f10, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f11, %f6, 0fC0000000;
	ex2.approx.f32 	%f12, %f11;
	mul.f32 	%f13, %f1, %f12;
	mov.f32 	%f14, 0f3E000000;
	div.approx.f32 	%f15, %f14, %f13;
	mov.f32 	%f16, 0f40000000;
	fma.rn.f32 	%f17, %f16, %f13, %f15;
	setp.ltu.f32	%p2, %f4, 0f42B40000;
	selp.f32	%f18, %f17, 0f7F800000, %p2;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f18;

BB35_2:
	ret;
}

	// .globl	vec_cospif
.visible .entry vec_cospif(
	.param .u32 vec_cospif_param_0,
	.param .u32 vec_cospif_param_1,
	.param .u32 vec_cospif_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<43>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r6, [vec_cospif_param_0];
	ld.param.u32 	%r4, [vec_cospif_param_1];
	ld.param.u32 	%r5, [vec_cospif_param_2];
	mov.u32 	%r7, %tid.x;
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r7;
	setp.ge.u32	%p1, %r1, %r6;
	@%p1 bra 	BB36_14;

	cvta.to.global.u32 	%r10, %r5;
	shl.b32 	%r11, %r1, 2;
	add.s32 	%r12, %r10, %r11;
	ld.global.f32 	%f39, [%r12];
	abs.f32 	%f17, %f39;
	setp.leu.f32	%p2, %f17, 0f4B800000;
	@%p2 bra 	BB36_3;

	mov.f32 	%f18, 0f00000000;
	mul.rn.f32 	%f39, %f39, %f18;

BB36_3:
	add.f32 	%f19, %f39, %f39;
	cvt.rni.f32.f32	%f20, %f19;
	cvt.rzi.s32.f32	%r13, %f20;
	neg.f32 	%f21, %f20;
	mov.f32 	%f22, 0f3F000000;
	fma.rn.f32 	%f23, %f21, %f22, %f39;
	mul.f32 	%f24, %f23, 0f34222169;
	mov.f32 	%f25, 0f40490FDA;
	fma.rn.f32 	%f4, %f23, %f25, %f24;
	add.s32 	%r2, %r13, 1;
	mul.rn.f32 	%f5, %f4, %f4;
	and.b32  	%r3, %r2, 1;
	setp.eq.s32	%p3, %r3, 0;
	@%p3 bra 	BB36_5;

	mov.f32 	%f26, 0fBAB6061A;
	mov.f32 	%f27, 0f37CCF5CE;
	fma.rn.f32 	%f40, %f27, %f5, %f26;
	bra.uni 	BB36_6;

BB36_5:
	mov.f32 	%f28, 0f3C08839E;
	mov.f32 	%f29, 0fB94CA1F9;
	fma.rn.f32 	%f40, %f29, %f5, %f28;

BB36_6:
	@%p3 bra 	BB36_8;

	mov.f32 	%f30, 0f3D2AAAA5;
	fma.rn.f32 	%f31, %f40, %f5, %f30;
	mov.f32 	%f32, 0fBF000000;
	fma.rn.f32 	%f41, %f31, %f5, %f32;
	bra.uni 	BB36_9;

BB36_8:
	mov.f32 	%f33, 0fBE2AAAA3;
	fma.rn.f32 	%f34, %f40, %f5, %f33;
	mov.f32 	%f35, 0f00000000;
	fma.rn.f32 	%f41, %f34, %f5, %f35;

BB36_9:
	fma.rn.f32 	%f42, %f41, %f4, %f4;
	@%p3 bra 	BB36_11;

	mov.f32 	%f36, 0f3F800000;
	fma.rn.f32 	%f42, %f41, %f5, %f36;

BB36_11:
	and.b32  	%r14, %r2, 2;
	setp.eq.s32	%p6, %r14, 0;
	@%p6 bra 	BB36_13;

	mov.f32 	%f37, 0f00000000;
	mov.f32 	%f38, 0fBF800000;
	fma.rn.f32 	%f42, %f42, %f38, %f37;

BB36_13:
	cvta.to.global.u32 	%r15, %r4;
	add.s32 	%r17, %r15, %r11;
	st.global.f32 	[%r17], %f42;

BB36_14:
	ret;
}

	// .globl	vec_erfcf
.visible .entry vec_erfcf(
	.param .u32 vec_erfcf_param_0,
	.param .u32 vec_erfcf_param_1,
	.param .u32 vec_erfcf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<63>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_erfcf_param_0];
	ld.param.u32 	%r2, [vec_erfcf_param_1];
	ld.param.u32 	%r3, [vec_erfcf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB37_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f7, [%r10];
	abs.f32 	%f8, %f7;
	add.f32 	%f2, %f8, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f9, %f8, 0fC0800000;
	mul.rn.f32 	%f10, %f9, %f1;
	add.f32 	%f11, %f10, 0f3F800000;
	mov.f32 	%f12, 0fC0800000;
	fma.rn.f32 	%f13, %f12, %f11, %f8;
	neg.f32 	%f14, %f10;
	fma.rn.f32 	%f15, %f14, %f8, %f13;
	fma.rn.f32 	%f16, %f1, %f15, %f10;
	mov.f32 	%f17, 0f3BE6E05B;
	mov.f32 	%f18, 0f3A69A091;
	fma.rn.f32 	%f19, %f18, %f16, %f17;
	mov.f32 	%f20, 0fBC81FB4B;
	fma.rn.f32 	%f21, %f19, %f16, %f20;
	mov.f32 	%f22, 0f3D15373B;
	fma.rn.f32 	%f23, %f21, %f16, %f22;
	mov.f32 	%f24, 0fBD887C5A;
	fma.rn.f32 	%f25, %f23, %f16, %f24;
	mov.f32 	%f26, 0f3DC021D5;
	fma.rn.f32 	%f27, %f25, %f16, %f26;
	mov.f32 	%f28, 0fBDCED424;
	fma.rn.f32 	%f29, %f27, %f16, %f28;
	mov.f32 	%f30, 0f3D8B74DE;
	fma.rn.f32 	%f31, %f29, %f16, %f30;
	mov.f32 	%f32, 0f3C7BF170;
	fma.rn.f32 	%f33, %f31, %f16, %f32;
	mov.f32 	%f34, 0fBE0EF8D4;
	fma.rn.f32 	%f35, %f33, %f16, %f34;
	mov.f32 	%f36, 0f3F9DD2C9;
	fma.rn.f32 	%f37, %f35, %f16, %f36;
	mov.f32 	%f38, 0f3F800000;
	mov.f32 	%f39, 0f40000000;
	fma.rn.f32 	%f4, %f39, %f8, %f38;
	// inline asm
	rcp.approx.ftz.f32 %f3,%f4;
	// inline asm
	mul.rn.f32 	%f40, %f37, %f3;
	mul.f32 	%f41, %f40, 0fC0000000;
	fma.rn.f32 	%f42, %f8, %f41, %f37;
	sub.f32 	%f43, %f42, %f40;
	fma.rn.f32 	%f44, %f43, %f3, %f40;
	mul.f32 	%f45, %f8, %f8;
	neg.f32 	%f46, %f45;
	mul.f32 	%f47, %f45, 0fBFB8AA3B;
	cvt.rzi.f32.f32	%f48, %f47;
	mov.f32 	%f49, 0fBF317200;
	fma.rn.f32 	%f50, %f48, %f49, %f46;
	mov.f32 	%f51, 0fB5BFBE8E;
	fma.rn.f32 	%f52, %f48, %f51, %f50;
	mul.f32 	%f6, %f52, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f5,%f6;
	// inline asm
	add.f32 	%f53, %f48, 0f00000000;
	ex2.approx.f32 	%f54, %f53;
	mul.f32 	%f55, %f5, %f54;
	neg.f32 	%f56, %f8;
	fma.rn.f32 	%f57, %f56, %f8, %f45;
	fma.rn.f32 	%f58, %f55, %f57, %f55;
	mul.f32 	%f59, %f44, %f58;
	setp.gt.f32	%p2, %f8, 0f4120E148;
	selp.f32	%f60, 0f00000000, %f59, %p2;
	setp.lt.f32	%p3, %f7, 0f00000000;
	sub.f32 	%f61, %f39, %f60;
	selp.f32	%f62, %f61, %f60, %p3;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f62;

BB37_2:
	ret;
}

	// .globl	vec_erfcinvf
.visible .entry vec_erfcinvf(
	.param .u32 vec_erfcinvf_param_0,
	.param .u32 vec_erfcinvf_param_1,
	.param .u32 vec_erfcinvf_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<56>;
	.reg .b32 	%r<14>;


	ld.param.u32 	%r4, [vec_erfcinvf_param_0];
	ld.param.u32 	%r2, [vec_erfcinvf_param_1];
	ld.param.u32 	%r3, [vec_erfcinvf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB38_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	neg.f32 	%f2, %f1;
	mov.f32 	%f7, 0f40000000;
	add.rn.f32 	%f3, %f7, %f2;
	setp.le.f32	%p2, %f1, 0f3FFF9097;
	setp.ge.f32	%p3, %f1, 0f3B5ED289;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB38_3;
	bra.uni 	BB38_2;

BB38_3:
	mul.rn.f32 	%f34, %f3, %f1;
	// inline asm
	lg2.approx.ftz.f32 %f33,%f34;
	// inline asm
	neg.f32 	%f35, %f33;
	mov.f32 	%f36, 0f3221F645;
	mov.f32 	%f37, 0fAF8A6370;
	fma.rn.f32 	%f38, %f37, %f35, %f36;
	mov.f32 	%f39, 0fB4016FDA;
	fma.rn.f32 	%f40, %f38, %f35, %f39;
	mov.f32 	%f41, 0f3468F846;
	fma.rn.f32 	%f42, %f40, %f35, %f41;
	mov.f32 	%f43, 0f370742AA;
	fma.rn.f32 	%f44, %f42, %f35, %f43;
	mov.f32 	%f45, 0fB804DB4D;
	fma.rn.f32 	%f46, %f44, %f35, %f45;
	mov.f32 	%f47, 0fBA4AFEA1;
	fma.rn.f32 	%f48, %f46, %f35, %f47;
	mov.f32 	%f49, 0f3BB5C027;
	fma.rn.f32 	%f50, %f48, %f35, %f49;
	mov.f32 	%f51, 0f3E24AE0F;
	fma.rn.f32 	%f52, %f50, %f35, %f51;
	mov.f32 	%f53, 0f3F62DFC4;
	fma.rn.f32 	%f54, %f52, %f35, %f53;
	fma.rn.f32 	%f55, %f54, %f2, %f54;
	bra.uni 	BB38_4;

BB38_2:
	setp.gt.f32	%p5, %f1, 0f3F800000;
	selp.f32	%f12, %f3, %f1, %p5;
	lg2.approx.f32 	%f13, %f12;
	neg.f32 	%f9, %f13;
	// inline asm
	rsqrt.approx.ftz.f32 %f8,%f9;
	// inline asm
	mov.f32 	%f14, 0f42FEF829;
	mov.f32 	%f15, 0fC27C73F1;
	fma.rn.f32 	%f16, %f15, %f8, %f14;
	mov.f32 	%f17, 0fC2E4361C;
	fma.rn.f32 	%f18, %f16, %f8, %f17;
	mov.f32 	%f19, 0f42714D9B;
	fma.rn.f32 	%f20, %f18, %f8, %f19;
	mov.f32 	%f21, 0fC1AE51B3;
	fma.rn.f32 	%f22, %f20, %f8, %f21;
	mov.f32 	%f23, 0f40CEF504;
	fma.rn.f32 	%f24, %f22, %f8, %f23;
	mov.f32 	%f25, 0fBFEA9E05;
	fma.rn.f32 	%f26, %f24, %f8, %f25;
	mov.f32 	%f27, 0fBCF871F4;
	fma.rn.f32 	%f28, %f26, %f8, %f27;
	mov.f32 	%f29, 0f3F553775;
	fma.rn.f32 	%f30, %f28, %f8, %f29;
	// inline asm
	rcp.approx.ftz.f32 %f10,%f8;
	// inline asm
	mul.rn.f32 	%f31, %f30, %f10;
	neg.f32 	%f32, %f31;
	selp.f32	%f55, %f32, %f31, %p5;

BB38_4:
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r13, %r11, %r9;
	st.global.f32 	[%r13], %f55;

BB38_5:
	ret;
}

	// .globl	vec_erfcxf
.visible .entry vec_erfcxf(
	.param .u32 vec_erfcxf_param_0,
	.param .u32 vec_erfcxf_param_1,
	.param .u32 vec_erfcxf_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<81>;
	.reg .b32 	%r<14>;


	ld.param.u32 	%r4, [vec_erfcxf_param_0];
	ld.param.u32 	%r2, [vec_erfcxf_param_1];
	ld.param.u32 	%r3, [vec_erfcxf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB39_7;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.lt.f32	%p2, %f2, 0f4120E148;
	@%p2 bra 	BB39_3;
	bra.uni 	BB39_2;

BB39_3:
	add.f32 	%f23, %f2, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f22,%f23;
	// inline asm
	add.f32 	%f26, %f2, 0fC0800000;
	mul.rn.f32 	%f27, %f26, %f22;
	add.f32 	%f28, %f27, 0f3F800000;
	mov.f32 	%f29, 0fC0800000;
	fma.rn.f32 	%f30, %f29, %f28, %f2;
	neg.f32 	%f31, %f27;
	fma.rn.f32 	%f32, %f31, %f2, %f30;
	fma.rn.f32 	%f33, %f22, %f32, %f27;
	mov.f32 	%f34, 0f3BE6E05B;
	mov.f32 	%f35, 0f3A69A091;
	fma.rn.f32 	%f36, %f35, %f33, %f34;
	mov.f32 	%f37, 0fBC81FB4B;
	fma.rn.f32 	%f38, %f36, %f33, %f37;
	mov.f32 	%f39, 0f3D15373B;
	fma.rn.f32 	%f40, %f38, %f33, %f39;
	mov.f32 	%f41, 0fBD887C5A;
	fma.rn.f32 	%f42, %f40, %f33, %f41;
	mov.f32 	%f43, 0f3DC021D5;
	fma.rn.f32 	%f44, %f42, %f33, %f43;
	mov.f32 	%f45, 0fBDCED424;
	fma.rn.f32 	%f46, %f44, %f33, %f45;
	mov.f32 	%f47, 0f3D8B74DE;
	fma.rn.f32 	%f48, %f46, %f33, %f47;
	mov.f32 	%f49, 0f3C7BF170;
	fma.rn.f32 	%f50, %f48, %f33, %f49;
	mov.f32 	%f51, 0fBE0EF8D4;
	fma.rn.f32 	%f52, %f50, %f33, %f51;
	mov.f32 	%f53, 0f3F9DD2C9;
	fma.rn.f32 	%f54, %f52, %f33, %f53;
	mov.f32 	%f55, 0f3F800000;
	mov.f32 	%f56, 0f40000000;
	fma.rn.f32 	%f25, %f56, %f2, %f55;
	// inline asm
	rcp.approx.ftz.f32 %f24,%f25;
	// inline asm
	mul.rn.f32 	%f57, %f54, %f24;
	mul.f32 	%f58, %f57, 0fC0000000;
	fma.rn.f32 	%f59, %f2, %f58, %f54;
	sub.f32 	%f60, %f59, %f57;
	fma.rn.f32 	%f80, %f60, %f24, %f57;
	bra.uni 	BB39_4;

BB39_2:
	mul.f32 	%f8, %f2, 0f3E800000;
	mov.f32 	%f9, 0f3E800000;
	div.approx.f32 	%f10, %f9, %f8;
	mul.f32 	%f11, %f10, %f10;
	mov.f32 	%f12, 0fBFF00000;
	mov.f32 	%f13, 0f40D20000;
	fma.rn.f32 	%f14, %f13, %f11, %f12;
	mov.f32 	%f15, 0f3F400000;
	fma.rn.f32 	%f16, %f14, %f11, %f15;
	mov.f32 	%f17, 0fBF000000;
	fma.rn.f32 	%f18, %f16, %f11, %f17;
	mov.f32 	%f19, 0f3F800000;
	fma.rn.f32 	%f20, %f18, %f11, %f19;
	mul.f32 	%f21, %f10, 0f3F106EBB;
	mul.f32 	%f80, %f21, %f20;

BB39_4:
	setp.geu.f32	%p3, %f1, 0f00000000;
	@%p3 bra 	BB39_6;

	mul.rz.f32 	%f63, %f2, %f2;
	neg.f32 	%f64, %f63;
	fma.rn.f32 	%f65, %f2, %f2, %f64;
	mul.f32 	%f66, %f63, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f67, %f66;
	mov.f32 	%f68, 0fBF317200;
	fma.rn.f32 	%f69, %f67, %f68, %f63;
	mov.f32 	%f70, 0fB5BFBE8E;
	fma.rn.f32 	%f71, %f67, %f70, %f69;
	mul.f32 	%f62, %f71, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f61,%f62;
	// inline asm
	add.f32 	%f72, %f67, 0f00000000;
	ex2.approx.f32 	%f73, %f72;
	mul.f32 	%f74, %f61, %f73;
	setp.lt.f32	%p4, %f63, 0fC2D20000;
	selp.f32	%f75, 0f00000000, %f74, %p4;
	setp.gt.f32	%p5, %f63, 0f42D20000;
	selp.f32	%f76, 0f7F800000, %f75, %p5;
	add.f32 	%f77, %f76, %f76;
	fma.rn.f32 	%f78, %f77, %f65, %f77;
	sub.f32 	%f79, %f78, %f80;
	setp.eq.f32	%p6, %f77, 0f7F800000;
	selp.f32	%f80, %f77, %f79, %p6;

BB39_6:
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r13, %r11, %r9;
	st.global.f32 	[%r13], %f80;

BB39_7:
	ret;
}

	// .globl	vec_erff
.visible .entry vec_erff(
	.param .u32 vec_erff_param_0,
	.param .u32 vec_erff_param_1,
	.param .u32 vec_erff_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<37>;
	.reg .b32 	%r<19>;


	ld.param.u32 	%r4, [vec_erff_param_0];
	ld.param.u32 	%r2, [vec_erff_param_1];
	ld.param.u32 	%r3, [vec_erff_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB40_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F800000;
	@%p2 bra 	BB40_3;
	bra.uni 	BB40_2;

BB40_3:
	mul.f32 	%f24, %f1, %f1;
	mov.f32 	%f25, 0f3BA0C9F8;
	mov.f32 	%f26, 0fBA1268FB;
	fma.rn.f32 	%f27, %f26, %f24, %f25;
	mov.f32 	%f28, 0fBCDABFD4;
	fma.rn.f32 	%f29, %f27, %f24, %f28;
	mov.f32 	%f30, 0f3DE70331;
	fma.rn.f32 	%f31, %f29, %f24, %f30;
	mov.f32 	%f32, 0fBEC09330;
	fma.rn.f32 	%f33, %f31, %f24, %f32;
	mov.f32 	%f34, 0f3F906EBA;
	fma.rn.f32 	%f35, %f33, %f24, %f34;
	mul.f32 	%f36, %f1, %f35;
	bra.uni 	BB40_4;

BB40_2:
	mov.f32 	%f8, 0f3A03BB71;
	mov.f32 	%f9, 0fB7B730FB;
	fma.rn.f32 	%f10, %f9, %f2, %f8;
	mov.f32 	%f11, 0fBBACA3B3;
	fma.rn.f32 	%f12, %f10, %f2, %f11;
	mov.f32 	%f13, 0f3D0A7445;
	fma.rn.f32 	%f14, %f12, %f2, %f13;
	mov.f32 	%f15, 0fBE1B3B75;
	fma.rn.f32 	%f16, %f14, %f2, %f15;
	mov.f32 	%f17, 0fBF6B385A;
	fma.rn.f32 	%f18, %f16, %f2, %f17;
	mov.f32 	%f19, 0fBFD0316E;
	fma.rn.f32 	%f20, %f18, %f2, %f19;
	mov.f32 	%f21, 0fBA031CCE;
	fma.rn.f32 	%f7, %f20, %f2, %f21;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	mov.f32 	%f22, 0f3F800000;
	sub.f32 	%f23, %f22, %f6;
	mov.b32 	 %r11, %f23;
	setp.ltu.f32	%p3, %f2, 0f407AD445;
	selp.b32	%r12, %r11, 1065353216, %p3;
	mov.b32 	 %r13, %f1;
	and.b32  	%r14, %r13, -2147483648;
	or.b32  	%r15, %r12, %r14;
	mov.b32 	 %f36, %r15;

BB40_4:
	cvta.to.global.u32 	%r16, %r2;
	add.s32 	%r18, %r16, %r9;
	st.global.f32 	[%r18], %f36;

BB40_5:
	ret;
}

	// .globl	vec_erfinvf
.visible .entry vec_erfinvf(
	.param .u32 vec_erfinvf_param_0,
	.param .u32 vec_erfinvf_param_1,
	.param .u32 vec_erfinvf_param_2
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<45>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_erfinvf_param_0];
	ld.param.u32 	%r2, [vec_erfinvf_param_1];
	ld.param.u32 	%r3, [vec_erfinvf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB41_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	neg.f32 	%f8, %f1;
	mov.f32 	%f9, 0f3F800000;
	fma.rn.f32 	%f7, %f1, %f8, %f9;
	// inline asm
	lg2.approx.ftz.f32 %f6,%f7;
	// inline asm
	neg.f32 	%f2, %f6;
	setp.lt.f32	%p2, %f6, 0fC1033333;
	@%p2 bra 	BB41_3;
	bra.uni 	BB41_2;

BB41_3:
	// inline asm
	rsqrt.approx.ftz.f32 %f29,%f2;
	// inline asm
	mov.f32 	%f31, 0fBF29BAA5;
	mov.f32 	%f32, 0fBF1704A1;
	fma.rn.f32 	%f33, %f32, %f29, %f31;
	mov.f32 	%f34, 0f3FCC6ADC;
	fma.rn.f32 	%f35, %f33, %f29, %f34;
	mov.f32 	%f36, 0fBF2CDAED;
	fma.rn.f32 	%f37, %f35, %f29, %f36;
	mov.f32 	%f38, 0fBDC30537;
	fma.rn.f32 	%f39, %f37, %f29, %f38;
	mov.f32 	%f40, 0f3F55D9B9;
	fma.rn.f32 	%f41, %f39, %f29, %f40;
	rcp.rn.f32 	%f42, %f29;
	mul.f32 	%f43, %f41, %f42;
	mov.b32 	 %r11, %f43;
	mov.b32 	 %r12, %f1;
	and.b32  	%r13, %r12, -2147483648;
	or.b32  	%r14, %r11, %r13;
	mov.b32 	 %f44, %r14;
	bra.uni 	BB41_4;

BB41_2:
	mov.f32 	%f10, 0f3221F645;
	mov.f32 	%f11, 0fAF8A6370;
	fma.rn.f32 	%f12, %f11, %f2, %f10;
	mov.f32 	%f13, 0fB4016FDA;
	fma.rn.f32 	%f14, %f12, %f2, %f13;
	mov.f32 	%f15, 0f3468F846;
	fma.rn.f32 	%f16, %f14, %f2, %f15;
	mov.f32 	%f17, 0f370742AA;
	fma.rn.f32 	%f18, %f16, %f2, %f17;
	mov.f32 	%f19, 0fB804DB4D;
	fma.rn.f32 	%f20, %f18, %f2, %f19;
	mov.f32 	%f21, 0fBA4AFEA1;
	fma.rn.f32 	%f22, %f20, %f2, %f21;
	mov.f32 	%f23, 0f3BB5C027;
	fma.rn.f32 	%f24, %f22, %f2, %f23;
	mov.f32 	%f25, 0f3E24AE0F;
	fma.rn.f32 	%f26, %f24, %f2, %f25;
	mov.f32 	%f27, 0f3F62DFC4;
	fma.rn.f32 	%f28, %f26, %f2, %f27;
	mul.f32 	%f44, %f1, %f28;

BB41_4:
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r17, %r15, %r9;
	st.global.f32 	[%r17], %f44;

BB41_5:
	ret;
}

	// .globl	vec_exp10f
.visible .entry vec_exp10f(
	.param .u32 vec_exp10f_param_0,
	.param .u32 vec_exp10f_param_1,
	.param .u32 vec_exp10f_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<14>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_exp10f_param_0];
	ld.param.u32 	%r2, [vec_exp10f_param_1];
	ld.param.u32 	%r3, [vec_exp10f_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB42_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f3, [%r10];
	mul.f32 	%f4, %f3, 0f40549A78;
	cvt.rzi.f32.f32	%f5, %f4;
	mov.f32 	%f6, 0fBE9A2080;
	fma.rn.f32 	%f7, %f5, %f6, %f3;
	mov.f32 	%f8, 0fB55427DE;
	fma.rn.f32 	%f9, %f5, %f8, %f7;
	mul.f32 	%f2, %f9, 0f40549A78;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	ex2.approx.f32 	%f10, %f5;
	mul.f32 	%f11, %f1, %f10;
	setp.lt.f32	%p2, %f3, 0fC2380000;
	selp.f32	%f12, 0f00000000, %f11, %p2;
	setp.gt.f32	%p3, %f3, 0f42380000;
	selp.f32	%f13, 0f7F800000, %f12, %p3;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f13;

BB42_2:
	ret;
}

	// .globl	vec_exp2f
.visible .entry vec_exp2f(
	.param .u32 vec_exp2f_param_0,
	.param .u32 vec_exp2f_param_1,
	.param .u32 vec_exp2f_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_exp2f_param_0];
	ld.param.u32 	%r2, [vec_exp2f_param_1];
	ld.param.u32 	%r3, [vec_exp2f_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB43_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	ex2.approx.f32 	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB43_2:
	ret;
}

	// .globl	vec_expf
.visible .entry vec_expf(
	.param .u32 vec_expf_param_0,
	.param .u32 vec_expf_param_1,
	.param .u32 vec_expf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<15>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_expf_param_0];
	ld.param.u32 	%r2, [vec_expf_param_1];
	ld.param.u32 	%r3, [vec_expf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB44_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f3, [%r10];
	mul.f32 	%f4, %f3, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f5, %f4;
	mov.f32 	%f6, 0fBF317200;
	fma.rn.f32 	%f7, %f5, %f6, %f3;
	mov.f32 	%f8, 0fB5BFBE8E;
	fma.rn.f32 	%f9, %f5, %f8, %f7;
	mul.f32 	%f2, %f9, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f1,%f2;
	// inline asm
	add.f32 	%f10, %f5, 0f00000000;
	ex2.approx.f32 	%f11, %f10;
	mul.f32 	%f12, %f1, %f11;
	setp.lt.f32	%p2, %f3, 0fC2D20000;
	selp.f32	%f13, 0f00000000, %f12, %p2;
	setp.gt.f32	%p3, %f3, 0f42D20000;
	selp.f32	%f14, 0f7F800000, %f13, %p3;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f14;

BB44_2:
	ret;
}

	// .globl	vec_expm1f
.visible .entry vec_expm1f(
	.param .u32 vec_expm1f_param_0,
	.param .u32 vec_expm1f_param_1,
	.param .u32 vec_expm1f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_expm1f_param_0];
	ld.param.u32 	%r2, [vec_expm1f_param_1];
	ld.param.u32 	%r3, [vec_expm1f_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB45_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	mul.f32 	%f2, %f1, 0f3FB8AA3B;
	cvt.rni.f32.f32	%f3, %f2;
	abs.f32 	%f4, %f1;
	setp.lt.f32	%p2, %f4, 0f3ED1EB85;
	selp.f32	%f5, 0f00000000, %f3, %p2;
	neg.f32 	%f6, %f5;
	mov.f32 	%f7, 0f3F317200;
	fma.rn.f32 	%f8, %f6, %f7, %f1;
	mov.f32 	%f9, 0f35BFBE8E;
	fma.rn.f32 	%f10, %f6, %f9, %f8;
	setp.eq.f32	%p3, %f5, 0f43000000;
	add.f32 	%f11, %f5, 0fBF800000;
	selp.f32	%f12, %f11, %f5, %p3;
	mov.f32 	%f13, 0f3C095663;
	mov.f32 	%f14, 0f3AB5EBE6;
	fma.rn.f32 	%f15, %f14, %f10, %f13;
	mov.f32 	%f16, 0f3D2AABE3;
	fma.rn.f32 	%f17, %f15, %f10, %f16;
	mov.f32 	%f18, 0f3E2AA9F6;
	fma.rn.f32 	%f19, %f17, %f10, %f18;
	mov.f32 	%f20, 0f3EFFFFFE;
	fma.rn.f32 	%f21, %f19, %f10, %f20;
	mul.f32 	%f22, %f10, %f21;
	fma.rn.f32 	%f23, %f22, %f10, %f10;
	ex2.approx.f32 	%f24, %f12;
	add.f32 	%f25, %f24, 0fBF800000;
	fma.rn.f32 	%f26, %f23, %f24, %f25;
	add.f32 	%f27, %f26, %f26;
	selp.f32	%f28, %f27, %f26, %p3;
	setp.gt.f32	%p4, %f12, 0f43000000;
	selp.f32	%f29, 0f7F800000, %f28, %p4;
	setp.lt.f32	%p5, %f12, 0fC1C80000;
	selp.f32	%f30, 0fBF800000, %f29, %p5;
	setp.eq.f32	%p6, %f1, 0f00000000;
	add.f32 	%f31, %f1, %f1;
	selp.f32	%f32, %f31, %f30, %p6;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f32;

BB45_2:
	ret;
}

	// .globl	vec_fabsf
.visible .entry vec_fabsf(
	.param .u32 vec_fabsf_param_0,
	.param .u32 vec_fabsf_param_1,
	.param .u32 vec_fabsf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_fabsf_param_0];
	ld.param.u32 	%r2, [vec_fabsf_param_1];
	ld.param.u32 	%r3, [vec_fabsf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB46_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB46_2:
	ret;
}

	// .globl	vec_floorf
.visible .entry vec_floorf(
	.param .u32 vec_floorf_param_0,
	.param .u32 vec_floorf_param_1,
	.param .u32 vec_floorf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_floorf_param_0];
	ld.param.u32 	%r2, [vec_floorf_param_1];
	ld.param.u32 	%r3, [vec_floorf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB47_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	cvt.rmi.f32.f32	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB47_2:
	ret;
}

	// .globl	vec_j0f
.visible .entry vec_j0f(
	.param .u32 vec_j0f_param_0,
	.param .u32 vec_j0f_param_1,
	.param .u32 vec_j0f_param_2
)
{
	.local .align 4 .b8 	__local_depot48[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<25>;
	.reg .f32 	%f<129>;
	.reg .b32 	%r<225>;


	mov.u32 	%r224, __local_depot48;
	cvta.local.u32 	%SP, %r224;
	ld.param.u32 	%r87, [vec_j0f_param_0];
	ld.param.u32 	%r85, [vec_j0f_param_1];
	ld.param.u32 	%r86, [vec_j0f_param_2];
	add.u32 	%r88, %SP, 0;
	cvta.to.local.u32 	%r1, %r88;
	mov.u32 	%r89, %ntid.x;
	mov.u32 	%r90, %ctaid.x;
	mov.u32 	%r91, %tid.x;
	mad.lo.s32 	%r2, %r89, %r90, %r91;
	setp.ge.u32	%p1, %r2, %r87;
	@%p1 bra 	BB48_38;

	cvta.to.global.u32 	%r92, %r86;
	shl.b32 	%r93, %r2, 2;
	add.s32 	%r94, %r92, %r93;
	ld.global.f32 	%f28, [%r94];
	abs.f32 	%f1, %f28;
	setp.gtu.f32	%p2, %f1, 0f41000000;
	@%p2 bra 	BB48_3;
	bra.uni 	BB48_2;

BB48_3:
	abs.f32 	%f65, %f1;
	mov.f32 	%f128, 0f00000000;
	setp.eq.f32	%p3, %f65, 0f7F800000;
	@%p3 bra 	BB48_37;

	// inline asm
	rcp.approx.ftz.f32 %f66,%f1;
	// inline asm
	mul.f32 	%f68, %f66, %f66;
	mov.f32 	%f69, 0fBF03B7C2;
	mov.f32 	%f70, 0f4056FE93;
	fma.rn.f32 	%f71, %f70, %f68, %f69;
	mov.f32 	%f72, 0f3DD3B3F3;
	fma.rn.f32 	%f73, %f71, %f68, %f72;
	mov.f32 	%f74, 0fBD7FFFB6;
	fma.rn.f32 	%f75, %f73, %f68, %f74;
	mov.f32 	%f76, 0f3F800000;
	fma.rn.f32 	%f77, %f75, %f68, %f76;
	mov.f32 	%f78, 0fBE52412D;
	mov.f32 	%f79, 0f3F91E009;
	fma.rn.f32 	%f80, %f79, %f68, %f78;
	mov.f32 	%f81, 0f3D854ED1;
	fma.rn.f32 	%f82, %f80, %f68, %f81;
	mov.f32 	%f83, 0fBDFFFFFF;
	fma.rn.f32 	%f84, %f82, %f68, %f83;
	fma.rn.f32 	%f3, %f84, %f66, %f1;
	rsqrt.approx.f32 	%f85, %f1;
	mul.f32 	%f86, %f85, 0f3F4C422A;
	mul.f32 	%f4, %f77, %f86;
	mul.f32 	%f87, %f3, 0f3F22F983;
	cvt.rni.s32.f32	%r209, %f87;
	cvt.rn.f32.s32	%f88, %r209;
	neg.f32 	%f89, %f88;
	mov.f32 	%f90, 0f3FC90FDA;
	fma.rn.f32 	%f91, %f89, %f90, %f3;
	mov.f32 	%f92, 0f33A22168;
	fma.rn.f32 	%f93, %f89, %f92, %f91;
	mov.f32 	%f94, 0f27C234C5;
	fma.rn.f32 	%f122, %f89, %f94, %f93;
	abs.f32 	%f95, %f3;
	setp.leu.f32	%p4, %f95, 0f47CE4780;
	@%p4 bra 	BB48_14;

	mov.b32 	 %r5, %f3;
	bfe.u32 	%r98, %r5, 23, 8;
	add.s32 	%r99, %r98, -128;
	shl.b32 	%r100, %r5, 8;
	or.b32  	%r6, %r100, -2147483648;
	shr.u32 	%r7, %r99, 5;
	mov.u32 	%r201, 0;
	mov.u32 	%r200, 6;
	mov.u32 	%r199, __cudart_i2opi_f;
	mov.u32 	%r214, %r1;

BB48_6:
	.pragma "nounroll";
	mov.u32 	%r10, %r214;
	ld.const.u32 	%r103, [%r199];
	// inline asm
	{
	mad.lo.cc.u32   %r101, %r103, %r6, %r201;
	madc.hi.u32     %r201, %r103, %r6,  0;
	}
	// inline asm
	st.local.u32 	[%r10], %r101;
	add.s32 	%r13, %r10, 4;
	add.s32 	%r199, %r199, 4;
	add.s32 	%r200, %r200, -1;
	setp.ne.s32	%p5, %r200, 0;
	mov.u32 	%r214, %r13;
	@%p5 bra 	BB48_6;

	and.b32  	%r16, %r5, -2147483648;
	add.s32 	%r196, %r1, 24;
	st.local.u32 	[%r196], %r201;
	bfe.u32 	%r17, %r5, 23, 5;
	mov.u32 	%r106, 4;
	sub.s32 	%r107, %r106, %r7;
	shl.b32 	%r108, %r107, 2;
	add.s32 	%r109, %r108, %r1;
	ld.local.u32 	%r202, [%r109+8];
	ld.local.u32 	%r203, [%r109+4];
	setp.eq.s32	%p6, %r17, 0;
	@%p6 bra 	BB48_9;

	mov.u32 	%r110, 32;
	sub.s32 	%r111, %r110, %r17;
	shr.u32 	%r112, %r203, %r111;
	shl.b32 	%r113, %r202, %r17;
	add.s32 	%r202, %r112, %r113;
	add.s32 	%r197, %r109, 8;
	ld.local.u32 	%r114, [%r197+-8];
	shr.u32 	%r115, %r114, %r111;
	shl.b32 	%r116, %r203, %r17;
	add.s32 	%r203, %r115, %r116;

BB48_9:
	shr.u32 	%r117, %r203, 30;
	shl.b32 	%r118, %r202, 2;
	add.s32 	%r204, %r117, %r118;
	shl.b32 	%r26, %r203, 2;
	shr.u32 	%r119, %r204, 31;
	shr.u32 	%r120, %r202, 30;
	add.s32 	%r27, %r119, %r120;
	setp.eq.s32	%p7, %r119, 0;
	mov.u32 	%r205, %r16;
	mov.u32 	%r206, %r26;
	@%p7 bra 	BB48_11;

	not.b32 	%r121, %r204;
	neg.s32 	%r28, %r26;
	setp.eq.s32	%p8, %r26, 0;
	selp.u32	%r122, 1, 0, %p8;
	add.s32 	%r204, %r122, %r121;
	xor.b32  	%r30, %r16, -2147483648;
	mov.u32 	%r205, %r30;
	mov.u32 	%r206, %r28;

BB48_11:
	mov.u32 	%r32, %r205;
	neg.s32 	%r123, %r27;
	setp.eq.s32	%p9, %r16, 0;
	selp.b32	%r209, %r27, %r123, %p9;
	clz.b32 	%r208, %r204;
	setp.eq.s32	%p10, %r208, 0;
	shl.b32 	%r124, %r204, %r208;
	mov.u32 	%r125, 32;
	sub.s32 	%r126, %r125, %r208;
	shr.u32 	%r127, %r206, %r126;
	add.s32 	%r128, %r127, %r124;
	selp.b32	%r36, %r204, %r128, %p10;
	mov.u32 	%r129, -921707870;
	mul.hi.u32 	%r207, %r36, %r129;
	setp.lt.s32	%p11, %r207, 1;
	@%p11 bra 	BB48_13;

	mul.lo.s32 	%r130, %r36, -921707870;
	shr.u32 	%r131, %r130, 31;
	shl.b32 	%r132, %r207, 1;
	add.s32 	%r207, %r131, %r132;
	add.s32 	%r208, %r208, 1;

BB48_13:
	mov.u32 	%r133, 126;
	sub.s32 	%r134, %r133, %r208;
	shl.b32 	%r135, %r134, 23;
	add.s32 	%r136, %r207, 1;
	shr.u32 	%r137, %r136, 7;
	add.s32 	%r138, %r137, 1;
	shr.u32 	%r139, %r138, 1;
	add.s32 	%r140, %r139, %r135;
	or.b32  	%r141, %r140, %r32;
	mov.b32 	 %f122, %r141;

BB48_14:
	and.b32  	%r142, %r209, 3;
	cvt.rn.f32.s32	%f96, %r142;
	add.f32 	%f97, %f122, 0fBF490FDB;
	fma.rn.f32 	%f123, %f96, 0f3FC90FDB, %f97;
	abs.f32 	%f98, %f123;
	setp.neu.f32	%p12, %f98, 0f7F800000;
	@%p12 bra 	BB48_16;

	mov.f32 	%f99, 0f00000000;
	mul.rn.f32 	%f123, %f123, %f99;

BB48_16:
	mul.f32 	%f100, %f123, 0f3F22F983;
	cvt.rni.s32.f32	%r223, %f100;
	cvt.rn.f32.s32	%f101, %r223;
	neg.f32 	%f102, %f101;
	fma.rn.f32 	%f104, %f102, %f90, %f123;
	fma.rn.f32 	%f106, %f102, %f92, %f104;
	fma.rn.f32 	%f124, %f102, %f94, %f106;
	abs.f32 	%f108, %f123;
	setp.leu.f32	%p13, %f108, 0f47CE4780;
	@%p13 bra 	BB48_26;

	mov.b32 	 %r44, %f123;
	shr.u32 	%r45, %r44, 23;
	bfe.u32 	%r146, %r44, 23, 8;
	add.s32 	%r147, %r146, -128;
	shl.b32 	%r148, %r44, 8;
	or.b32  	%r46, %r148, -2147483648;
	shr.u32 	%r47, %r147, 5;
	mov.u32 	%r215, 0;
	mov.u32 	%r211, 6;
	mov.u32 	%r210, __cudart_i2opi_f;
	mov.u32 	%r213, %r1;

BB48_18:
	.pragma "nounroll";
	ld.const.u32 	%r151, [%r210];
	// inline asm
	{
	mad.lo.cc.u32   %r149, %r151, %r46, %r215;
	madc.hi.u32     %r215, %r151, %r46,  0;
	}
	// inline asm
	st.local.u32 	[%r213], %r149;
	add.s32 	%r213, %r213, 4;
	add.s32 	%r210, %r210, 4;
	add.s32 	%r211, %r211, -1;
	setp.ne.s32	%p14, %r211, 0;
	@%p14 bra 	BB48_18;

	and.b32  	%r56, %r44, -2147483648;
	cvta.to.local.u32 	%r155, %r88;
	mov.u32 	%r156, 4;
	sub.s32 	%r157, %r156, %r47;
	shl.b32 	%r158, %r157, 2;
	add.s32 	%r159, %r158, %r155;
	st.local.u32 	[%r155+24], %r215;
	ld.local.u32 	%r216, [%r159+8];
	ld.local.u32 	%r217, [%r159+4];
	and.b32  	%r60, %r45, 31;
	setp.eq.s32	%p15, %r60, 0;
	@%p15 bra 	BB48_21;

	mov.u32 	%r160, 32;
	sub.s32 	%r161, %r160, %r60;
	shr.u32 	%r162, %r217, %r161;
	shl.b32 	%r163, %r216, %r60;
	add.s32 	%r216, %r162, %r163;
	add.s32 	%r198, %r159, 8;
	ld.local.u32 	%r164, [%r198+-8];
	shr.u32 	%r165, %r164, %r161;
	shl.b32 	%r166, %r217, %r60;
	add.s32 	%r217, %r165, %r166;

BB48_21:
	shr.u32 	%r167, %r217, 30;
	shl.b32 	%r168, %r216, 2;
	add.s32 	%r218, %r167, %r168;
	shl.b32 	%r66, %r217, 2;
	shr.u32 	%r169, %r218, 31;
	shr.u32 	%r170, %r216, 30;
	add.s32 	%r67, %r169, %r170;
	setp.eq.s32	%p16, %r169, 0;
	mov.u32 	%r219, %r56;
	mov.u32 	%r220, %r66;
	@%p16 bra 	BB48_23;

	not.b32 	%r171, %r218;
	neg.s32 	%r68, %r66;
	setp.eq.s32	%p17, %r66, 0;
	selp.u32	%r172, 1, 0, %p17;
	add.s32 	%r218, %r172, %r171;
	xor.b32  	%r70, %r56, -2147483648;
	mov.u32 	%r219, %r70;
	mov.u32 	%r220, %r68;

BB48_23:
	mov.u32 	%r72, %r219;
	neg.s32 	%r173, %r67;
	setp.eq.s32	%p18, %r56, 0;
	selp.b32	%r223, %r67, %r173, %p18;
	clz.b32 	%r222, %r218;
	setp.eq.s32	%p19, %r222, 0;
	shl.b32 	%r174, %r218, %r222;
	mov.u32 	%r175, 32;
	sub.s32 	%r176, %r175, %r222;
	shr.u32 	%r177, %r220, %r176;
	add.s32 	%r178, %r177, %r174;
	selp.b32	%r76, %r218, %r178, %p19;
	mov.u32 	%r179, -921707870;
	mul.hi.u32 	%r221, %r76, %r179;
	setp.lt.s32	%p20, %r221, 1;
	@%p20 bra 	BB48_25;

	mul.lo.s32 	%r180, %r76, -921707870;
	shr.u32 	%r181, %r180, 31;
	shl.b32 	%r182, %r221, 1;
	add.s32 	%r221, %r181, %r182;
	add.s32 	%r222, %r222, 1;

BB48_25:
	mov.u32 	%r183, 126;
	sub.s32 	%r184, %r183, %r222;
	shl.b32 	%r185, %r184, 23;
	add.s32 	%r186, %r221, 1;
	shr.u32 	%r187, %r186, 7;
	add.s32 	%r188, %r187, 1;
	shr.u32 	%r189, %r188, 1;
	add.s32 	%r190, %r189, %r185;
	or.b32  	%r191, %r190, %r72;
	mov.b32 	 %f124, %r191;

BB48_26:
	mul.rn.f32 	%f14, %f124, %f124;
	add.s32 	%r83, %r223, 1;
	and.b32  	%r84, %r83, 1;
	setp.eq.s32	%p21, %r84, 0;
	@%p21 bra 	BB48_28;

	mov.f32 	%f109, 0fBAB6061A;
	mov.f32 	%f110, 0f37CCF5CE;
	fma.rn.f32 	%f125, %f110, %f14, %f109;
	bra.uni 	BB48_29;

BB48_2:
	add.f32 	%f29, %f1, 0fC019E8A9;
	add.f32 	%f30, %f29, 0fB3E971B3;
	mov.f32 	%f31, 0fA9ACA9B3;
	mov.f32 	%f32, 0fA6B3B8E7;
	fma.rn.f32 	%f33, %f32, %f30, %f31;
	mov.f32 	%f34, 0f2C3F0E18;
	fma.rn.f32 	%f35, %f33, %f30, %f34;
	mov.f32 	%f36, 0fACD41781;
	fma.rn.f32 	%f37, %f35, %f30, %f36;
	mov.f32 	%f38, 0fAFE90F38;
	fma.rn.f32 	%f39, %f37, %f30, %f38;
	mov.f32 	%f40, 0f3020305B;
	fma.rn.f32 	%f41, %f39, %f30, %f40;
	mov.f32 	%f42, 0f33797143;
	fma.rn.f32 	%f43, %f41, %f30, %f42;
	mov.f32 	%f44, 0f30F76F85;
	fma.rn.f32 	%f45, %f43, %f30, %f44;
	mov.f32 	%f46, 0fB6B6DFC6;
	fma.rn.f32 	%f47, %f45, %f30, %f46;
	mov.f32 	%f48, 0fB6F665C9;
	fma.rn.f32 	%f49, %f47, %f30, %f48;
	mov.f32 	%f50, 0f399E2DEB;
	fma.rn.f32 	%f51, %f49, %f30, %f50;
	mov.f32 	%f52, 0f3A4AE334;
	fma.rn.f32 	%f53, %f51, %f30, %f52;
	mov.f32 	%f54, 0fBBEEAA1B;
	fma.rn.f32 	%f55, %f53, %f30, %f54;
	mov.f32 	%f56, 0fBCDA7747;
	fma.rn.f32 	%f57, %f55, %f30, %f56;
	mul.f32 	%f58, %f30, %f57;
	add.f32 	%f59, %f1, 0fC0B0A47B;
	add.f32 	%f60, %f59, 0f339A7A37;
	mul.f32 	%f61, %f60, %f58;
	add.f32 	%f62, %f1, 0fC10A75AB;
	add.f32 	%f63, %f62, 0fB4CCCDED;
	mul.f32 	%f128, %f63, %f61;
	bra.uni 	BB48_37;

BB48_28:
	mov.f32 	%f111, 0f3C08839E;
	mov.f32 	%f112, 0fB94CA1F9;
	fma.rn.f32 	%f125, %f112, %f14, %f111;

BB48_29:
	@%p21 bra 	BB48_31;

	mov.f32 	%f113, 0f3D2AAAA5;
	fma.rn.f32 	%f114, %f125, %f14, %f113;
	mov.f32 	%f115, 0fBF000000;
	fma.rn.f32 	%f126, %f114, %f14, %f115;
	bra.uni 	BB48_32;

BB48_31:
	mov.f32 	%f116, 0fBE2AAAA3;
	fma.rn.f32 	%f117, %f125, %f14, %f116;
	mov.f32 	%f118, 0f00000000;
	fma.rn.f32 	%f126, %f117, %f14, %f118;

BB48_32:
	fma.rn.f32 	%f127, %f126, %f124, %f124;
	@%p21 bra 	BB48_34;

	fma.rn.f32 	%f127, %f126, %f14, %f76;

BB48_34:
	and.b32  	%r192, %r83, 2;
	setp.eq.s32	%p24, %r192, 0;
	@%p24 bra 	BB48_36;

	mov.f32 	%f120, 0f00000000;
	mov.f32 	%f121, 0fBF800000;
	fma.rn.f32 	%f127, %f127, %f121, %f120;

BB48_36:
	mul.f32 	%f128, %f4, %f127;

BB48_37:
	cvta.to.global.u32 	%r193, %r85;
	add.s32 	%r195, %r193, %r93;
	st.global.f32 	[%r195], %f128;

BB48_38:
	ret;
}

	// .globl	vec_j1f
.visible .entry vec_j1f(
	.param .u32 vec_j1f_param_0,
	.param .u32 vec_j1f_param_1,
	.param .u32 vec_j1f_param_2
)
{
	.local .align 4 .b8 	__local_depot49[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<27>;
	.reg .f32 	%f<129>;
	.reg .b32 	%r<235>;


	mov.u32 	%r234, __local_depot49;
	cvta.local.u32 	%SP, %r234;
	ld.param.u32 	%r87, [vec_j1f_param_0];
	ld.param.u32 	%r85, [vec_j1f_param_1];
	ld.param.u32 	%r86, [vec_j1f_param_2];
	mov.u32 	%r88, %ntid.x;
	mov.u32 	%r89, %ctaid.x;
	mov.u32 	%r90, %tid.x;
	mad.lo.s32 	%r1, %r88, %r89, %r90;
	setp.ge.u32	%p1, %r1, %r87;
	@%p1 bra 	BB49_38;

	cvta.to.global.u32 	%r91, %r86;
	shl.b32 	%r92, %r1, 2;
	add.s32 	%r93, %r91, %r92;
	ld.global.f32 	%f1, [%r93];
	abs.f32 	%f2, %f1;
	setp.gtu.f32	%p2, %f2, 0f40FB3333;
	@%p2 bra 	BB49_3;
	bra.uni 	BB49_2;

BB49_3:
	abs.f32 	%f63, %f2;
	mov.f32 	%f128, 0f00000000;
	setp.eq.f32	%p3, %f63, 0f7F800000;
	@%p3 bra 	BB49_37;

	// inline asm
	rcp.approx.ftz.f32 %f64,%f2;
	// inline asm
	mul.f32 	%f66, %f64, %f64;
	mov.f32 	%f67, 0f3F3FF7E9;
	mov.f32 	%f68, 0fC082CB37;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0fBE458BAE;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mov.f32 	%f72, 0f3E3FFF8B;
	fma.rn.f32 	%f73, %f71, %f66, %f72;
	mov.f32 	%f74, 0f3F800000;
	fma.rn.f32 	%f4, %f73, %f66, %f74;
	mov.f32 	%f75, 0f3EB914AD;
	mov.f32 	%f76, 0fBFCA3BA2;
	fma.rn.f32 	%f77, %f76, %f66, %f75;
	mov.f32 	%f78, 0fBE27F2EC;
	fma.rn.f32 	%f79, %f77, %f66, %f78;
	mov.f32 	%f80, 0f3EBFFFFD;
	fma.rn.f32 	%f81, %f79, %f66, %f80;
	fma.rn.f32 	%f5, %f81, %f64, %f2;
	rsqrt.approx.f32 	%f6, %f2;
	mul.f32 	%f82, %f5, 0f3F22F983;
	cvt.rni.s32.f32	%r221, %f82;
	cvt.rn.f32.s32	%f83, %r221;
	neg.f32 	%f84, %f83;
	mov.f32 	%f85, 0f3FC90FDA;
	fma.rn.f32 	%f86, %f84, %f85, %f5;
	mov.f32 	%f87, 0f33A22168;
	fma.rn.f32 	%f88, %f84, %f87, %f86;
	mov.f32 	%f89, 0f27C234C5;
	fma.rn.f32 	%f122, %f84, %f89, %f88;
	abs.f32 	%f90, %f5;
	setp.leu.f32	%p4, %f90, 0f47CE4780;
	@%p4 bra 	BB49_14;

	add.u32 	%r97, %SP, 0;
	cvta.to.local.u32 	%r212, %r97;
	mov.b32 	 %r4, %f5;
	bfe.u32 	%r98, %r4, 23, 8;
	add.s32 	%r99, %r98, -128;
	shl.b32 	%r100, %r4, 8;
	or.b32  	%r5, %r100, -2147483648;
	shr.u32 	%r6, %r99, 5;
	mov.u32 	%r213, 0;
	mov.u32 	%r211, 6;
	mov.u32 	%r210, __cudart_i2opi_f;

BB49_6:
	.pragma "nounroll";
	ld.const.u32 	%r103, [%r210];
	// inline asm
	{
	mad.lo.cc.u32   %r101, %r103, %r5, %r213;
	madc.hi.u32     %r213, %r103, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%r212], %r101;
	add.s32 	%r212, %r212, 4;
	add.s32 	%r210, %r210, 4;
	add.s32 	%r211, %r211, -1;
	setp.ne.s32	%p5, %r211, 0;
	@%p5 bra 	BB49_6;

	and.b32  	%r15, %r4, -2147483648;
	cvta.to.local.u32 	%r107, %r97;
	mov.u32 	%r108, 4;
	sub.s32 	%r109, %r108, %r6;
	shl.b32 	%r110, %r109, 2;
	add.s32 	%r111, %r110, %r107;
	st.local.u32 	[%r107+24], %r213;
	bfe.u32 	%r16, %r4, 23, 5;
	ld.local.u32 	%r214, [%r111+8];
	ld.local.u32 	%r215, [%r111+4];
	setp.eq.s32	%p6, %r16, 0;
	@%p6 bra 	BB49_9;

	mov.u32 	%r112, 32;
	sub.s32 	%r113, %r112, %r16;
	shr.u32 	%r114, %r215, %r113;
	shl.b32 	%r115, %r214, %r16;
	add.s32 	%r214, %r114, %r115;
	add.s32 	%r208, %r111, 8;
	ld.local.u32 	%r116, [%r208+-8];
	shr.u32 	%r117, %r116, %r113;
	shl.b32 	%r118, %r215, %r16;
	add.s32 	%r215, %r117, %r118;

BB49_9:
	shr.u32 	%r119, %r215, 30;
	shl.b32 	%r120, %r214, 2;
	add.s32 	%r216, %r119, %r120;
	shl.b32 	%r25, %r215, 2;
	shr.u32 	%r121, %r216, 31;
	shr.u32 	%r122, %r214, 30;
	add.s32 	%r26, %r121, %r122;
	setp.eq.s32	%p7, %r121, 0;
	mov.u32 	%r217, %r15;
	mov.u32 	%r218, %r25;
	@%p7 bra 	BB49_11;

	not.b32 	%r123, %r216;
	neg.s32 	%r27, %r25;
	setp.eq.s32	%p8, %r25, 0;
	selp.u32	%r124, 1, 0, %p8;
	add.s32 	%r216, %r124, %r123;
	xor.b32  	%r29, %r15, -2147483648;
	mov.u32 	%r217, %r29;
	mov.u32 	%r218, %r27;

BB49_11:
	mov.u32 	%r31, %r217;
	neg.s32 	%r125, %r26;
	setp.eq.s32	%p9, %r15, 0;
	selp.b32	%r221, %r26, %r125, %p9;
	clz.b32 	%r220, %r216;
	setp.eq.s32	%p10, %r220, 0;
	shl.b32 	%r126, %r216, %r220;
	mov.u32 	%r127, 32;
	sub.s32 	%r128, %r127, %r220;
	shr.u32 	%r129, %r218, %r128;
	add.s32 	%r130, %r129, %r126;
	selp.b32	%r35, %r216, %r130, %p10;
	mov.u32 	%r131, -921707870;
	mul.hi.u32 	%r219, %r35, %r131;
	setp.lt.s32	%p11, %r219, 1;
	@%p11 bra 	BB49_13;

	mul.lo.s32 	%r132, %r35, -921707870;
	shr.u32 	%r133, %r132, 31;
	shl.b32 	%r134, %r219, 1;
	add.s32 	%r219, %r133, %r134;
	add.s32 	%r220, %r220, 1;

BB49_13:
	mov.u32 	%r135, 126;
	sub.s32 	%r136, %r135, %r220;
	shl.b32 	%r137, %r136, 23;
	add.s32 	%r138, %r219, 1;
	shr.u32 	%r139, %r138, 7;
	add.s32 	%r140, %r139, 1;
	shr.u32 	%r141, %r140, 1;
	add.s32 	%r142, %r141, %r137;
	or.b32  	%r143, %r142, %r31;
	mov.b32 	 %f122, %r143;

BB49_14:
	mul.f32 	%f91, %f6, 0f3F4C422A;
	mul.f32 	%f10, %f4, %f91;
	and.b32  	%r144, %r221, 3;
	cvt.rn.f32.s32	%f92, %r144;
	add.f32 	%f93, %f122, 0fC016CBE4;
	fma.rn.f32 	%f123, %f92, 0f3FC90FDB, %f93;
	abs.f32 	%f94, %f123;
	setp.neu.f32	%p12, %f94, 0f7F800000;
	@%p12 bra 	BB49_16;

	mov.f32 	%f95, 0f00000000;
	mul.rn.f32 	%f123, %f123, %f95;

BB49_16:
	mul.f32 	%f96, %f123, 0f3F22F983;
	cvt.rni.s32.f32	%r233, %f96;
	cvt.rn.f32.s32	%f97, %r233;
	neg.f32 	%f98, %f97;
	fma.rn.f32 	%f100, %f98, %f85, %f123;
	fma.rn.f32 	%f102, %f98, %f87, %f100;
	fma.rn.f32 	%f124, %f98, %f89, %f102;
	abs.f32 	%f104, %f123;
	setp.leu.f32	%p13, %f104, 0f47CE4780;
	@%p13 bra 	BB49_26;

	mov.b32 	 %r43, %f123;
	shr.u32 	%r44, %r43, 23;
	bfe.u32 	%r148, %r43, 23, 8;
	add.s32 	%r149, %r148, -128;
	shl.b32 	%r150, %r43, 8;
	or.b32  	%r45, %r150, -2147483648;
	shr.u32 	%r46, %r149, 5;
	add.u32 	%r151, %SP, 0;
	cvta.to.local.u32 	%r224, %r151;
	mov.u32 	%r225, 0;
	mov.u32 	%r223, 6;
	mov.u32 	%r222, __cudart_i2opi_f;

BB49_18:
	.pragma "nounroll";
	ld.const.u32 	%r154, [%r222];
	// inline asm
	{
	mad.lo.cc.u32   %r152, %r154, %r45, %r225;
	madc.hi.u32     %r225, %r154, %r45,  0;
	}
	// inline asm
	st.local.u32 	[%r224], %r152;
	add.s32 	%r224, %r224, 4;
	add.s32 	%r222, %r222, 4;
	add.s32 	%r223, %r223, -1;
	setp.ne.s32	%p14, %r223, 0;
	@%p14 bra 	BB49_18;

	and.b32  	%r56, %r43, -2147483648;
	cvta.to.local.u32 	%r158, %r151;
	mov.u32 	%r159, 4;
	sub.s32 	%r160, %r159, %r46;
	shl.b32 	%r161, %r160, 2;
	add.s32 	%r162, %r161, %r158;
	st.local.u32 	[%r158+24], %r225;
	ld.local.u32 	%r226, [%r162+8];
	ld.local.u32 	%r227, [%r162+4];
	and.b32  	%r60, %r44, 31;
	setp.eq.s32	%p15, %r60, 0;
	@%p15 bra 	BB49_21;

	mov.u32 	%r163, 32;
	sub.s32 	%r164, %r163, %r60;
	shr.u32 	%r165, %r227, %r164;
	shl.b32 	%r166, %r226, %r60;
	add.s32 	%r226, %r165, %r166;
	add.s32 	%r209, %r162, 8;
	ld.local.u32 	%r167, [%r209+-8];
	shr.u32 	%r168, %r167, %r164;
	shl.b32 	%r169, %r227, %r60;
	add.s32 	%r227, %r168, %r169;

BB49_21:
	shr.u32 	%r170, %r227, 30;
	shl.b32 	%r171, %r226, 2;
	add.s32 	%r228, %r170, %r171;
	shl.b32 	%r66, %r227, 2;
	shr.u32 	%r172, %r228, 31;
	shr.u32 	%r173, %r226, 30;
	add.s32 	%r67, %r172, %r173;
	setp.eq.s32	%p16, %r172, 0;
	mov.u32 	%r229, %r56;
	mov.u32 	%r230, %r66;
	@%p16 bra 	BB49_23;

	not.b32 	%r174, %r228;
	neg.s32 	%r68, %r66;
	setp.eq.s32	%p17, %r66, 0;
	selp.u32	%r175, 1, 0, %p17;
	add.s32 	%r228, %r175, %r174;
	xor.b32  	%r70, %r56, -2147483648;
	mov.u32 	%r229, %r70;
	mov.u32 	%r230, %r68;

BB49_23:
	mov.u32 	%r72, %r229;
	neg.s32 	%r176, %r67;
	setp.eq.s32	%p18, %r56, 0;
	selp.b32	%r233, %r67, %r176, %p18;
	clz.b32 	%r232, %r228;
	setp.eq.s32	%p19, %r232, 0;
	shl.b32 	%r177, %r228, %r232;
	mov.u32 	%r178, 32;
	sub.s32 	%r179, %r178, %r232;
	shr.u32 	%r180, %r230, %r179;
	add.s32 	%r181, %r180, %r177;
	selp.b32	%r76, %r228, %r181, %p19;
	mov.u32 	%r182, -921707870;
	mul.hi.u32 	%r231, %r76, %r182;
	setp.lt.s32	%p20, %r231, 1;
	@%p20 bra 	BB49_25;

	mul.lo.s32 	%r183, %r76, -921707870;
	shr.u32 	%r184, %r183, 31;
	shl.b32 	%r185, %r231, 1;
	add.s32 	%r231, %r184, %r185;
	add.s32 	%r232, %r232, 1;

BB49_25:
	mov.u32 	%r186, 126;
	sub.s32 	%r187, %r186, %r232;
	shl.b32 	%r188, %r187, 23;
	add.s32 	%r189, %r231, 1;
	shr.u32 	%r190, %r189, 7;
	add.s32 	%r191, %r190, 1;
	shr.u32 	%r192, %r191, 1;
	add.s32 	%r193, %r192, %r188;
	or.b32  	%r194, %r193, %r72;
	mov.b32 	 %f124, %r194;

BB49_26:
	mul.rn.f32 	%f17, %f124, %f124;
	add.s32 	%r83, %r233, 1;
	and.b32  	%r84, %r83, 1;
	setp.eq.s32	%p21, %r84, 0;
	@%p21 bra 	BB49_28;

	mov.f32 	%f105, 0fBAB6061A;
	mov.f32 	%f106, 0f37CCF5CE;
	fma.rn.f32 	%f125, %f106, %f17, %f105;
	bra.uni 	BB49_29;

BB49_2:
	add.f32 	%f31, %f2, 0fC0753AAC;
	add.f32 	%f32, %f31, 0f33A5090F;
	mov.f32 	%f33, 0f2B81BF42;
	mov.f32 	%f34, 0f29AF3463;
	fma.rn.f32 	%f35, %f34, %f32, %f33;
	mov.f32 	%f36, 0fADE21EC1;
	fma.rn.f32 	%f37, %f35, %f32, %f36;
	mov.f32 	%f38, 0fAF5DDEFF;
	fma.rn.f32 	%f39, %f37, %f32, %f38;
	mov.f32 	%f40, 0f319B0C9D;
	fma.rn.f32 	%f41, %f39, %f32, %f40;
	mov.f32 	%f42, 0f32E81173;
	fma.rn.f32 	%f43, %f41, %f32, %f42;
	mov.f32 	%f44, 0fB50F8DC8;
	fma.rn.f32 	%f45, %f43, %f32, %f44;
	mov.f32 	%f46, 0fB61E653D;
	fma.rn.f32 	%f47, %f45, %f32, %f46;
	mov.f32 	%f48, 0f382CD9C5;
	fma.rn.f32 	%f49, %f47, %f32, %f48;
	mov.f32 	%f50, 0f38F9EB10;
	fma.rn.f32 	%f51, %f49, %f32, %f50;
	mov.f32 	%f52, 0fBAECEB9C;
	fma.rn.f32 	%f53, %f51, %f32, %f52;
	mov.f32 	%f54, 0fBB276FFD;
	fma.rn.f32 	%f55, %f53, %f32, %f54;
	mov.f32 	%f56, 0f3D073993;
	fma.rn.f32 	%f57, %f55, %f32, %f56;
	add.f32 	%f58, %f2, 0fC0E07FB0;
	add.f32 	%f59, %f58, 0f3444B8DB;
	mul.f32 	%f60, %f59, %f57;
	mul.f32 	%f61, %f32, %f60;
	mul.f32 	%f128, %f2, %f61;
	bra.uni 	BB49_37;

BB49_28:
	mov.f32 	%f107, 0f3C08839E;
	mov.f32 	%f108, 0fB94CA1F9;
	fma.rn.f32 	%f125, %f108, %f17, %f107;

BB49_29:
	@%p21 bra 	BB49_31;

	mov.f32 	%f109, 0f3D2AAAA5;
	fma.rn.f32 	%f110, %f125, %f17, %f109;
	mov.f32 	%f111, 0fBF000000;
	fma.rn.f32 	%f126, %f110, %f17, %f111;
	bra.uni 	BB49_32;

BB49_31:
	mov.f32 	%f112, 0fBE2AAAA3;
	fma.rn.f32 	%f113, %f125, %f17, %f112;
	mov.f32 	%f114, 0f00000000;
	fma.rn.f32 	%f126, %f113, %f17, %f114;

BB49_32:
	fma.rn.f32 	%f127, %f126, %f124, %f124;
	@%p21 bra 	BB49_34;

	fma.rn.f32 	%f127, %f126, %f17, %f74;

BB49_34:
	and.b32  	%r195, %r83, 2;
	setp.eq.s32	%p24, %r195, 0;
	@%p24 bra 	BB49_36;

	mov.f32 	%f116, 0f00000000;
	mov.f32 	%f117, 0fBF800000;
	fma.rn.f32 	%f127, %f127, %f117, %f116;

BB49_36:
	mul.f32 	%f128, %f10, %f127;

BB49_37:
	neg.f32 	%f118, %f128;
	setp.lt.f32	%p25, %f1, 0f00000000;
	selp.f32	%f119, %f118, %f128, %p25;
	mov.b32 	 %r196, %f1;
	and.b32  	%r197, %r196, -2147483648;
	mov.b32 	 %r198, %f119;
	and.b32  	%r199, %r198, 2147483647;
	or.b32  	%r200, %r199, %r197;
	mov.b32 	 %f120, %r200;
	setp.lt.f32	%p26, %f2, 0f0DA24260;
	selp.f32	%f121, %f120, %f119, %p26;
	cvta.to.global.u32 	%r205, %r85;
	add.s32 	%r207, %r205, %r92;
	st.global.f32 	[%r207], %f121;

BB49_38:
	ret;
}

	// .globl	vec_lgammaf
.visible .entry vec_lgammaf(
	.param .u32 vec_lgammaf_param_0,
	.param .u32 vec_lgammaf_param_1,
	.param .u32 vec_lgammaf_param_2
)
{
	.reg .pred 	%p<34>;
	.reg .f32 	%f<271>;
	.reg .b32 	%r<33>;


	ld.param.u32 	%r6, [vec_lgammaf_param_0];
	ld.param.u32 	%r4, [vec_lgammaf_param_1];
	ld.param.u32 	%r5, [vec_lgammaf_param_2];
	mov.u32 	%r7, %tid.x;
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r7;
	setp.ge.u32	%p1, %r1, %r6;
	@%p1 bra 	BB50_38;

	cvta.to.global.u32 	%r10, %r5;
	shl.b32 	%r11, %r1, 2;
	add.s32 	%r12, %r10, %r11;
	ld.global.f32 	%f1, [%r12];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f40400000;
	@%p2 bra 	BB50_8;
	bra.uni 	BB50_2;

BB50_8:
	setp.ltu.f32	%p10, %f2, 0f3FC00000;
	@%p10 bra 	BB50_10;
	bra.uni 	BB50_9;

BB50_10:
	setp.ltu.f32	%p11, %f2, 0f3F333333;
	@%p11 bra 	BB50_12;
	bra.uni 	BB50_11;

BB50_12:
	mov.f32 	%f143, 0fBBB34878;
	mov.f32 	%f144, 0f3B6B1C86;
	fma.rn.f32 	%f145, %f144, %f2, %f143;
	mov.f32 	%f146, 0fBD36CAEF;
	fma.rn.f32 	%f147, %f145, %f2, %f146;
	mov.f32 	%f148, 0f3E2B5555;
	fma.rn.f32 	%f149, %f147, %f2, %f148;
	mov.f32 	%f150, 0fBD2C96C7;
	fma.rn.f32 	%f151, %f149, %f2, %f150;
	mov.f32 	%f152, 0fBF27E6EB;
	fma.rn.f32 	%f153, %f151, %f2, %f152;
	mov.f32 	%f154, 0f3F13C463;
	fma.rn.f32 	%f155, %f153, %f2, %f154;
	mul.f32 	%f156, %f2, %f155;
	fma.rn.f32 	%f11, %f156, %f2, %f2;
	setp.gt.f32	%p12, %f11, 0f00000000;
	setp.lt.f32	%p13, %f11, 0f7F800000;
	and.pred  	%p14, %p12, %p13;
	@%p14 bra 	BB50_14;
	bra.uni 	BB50_13;

BB50_14:
	setp.lt.f32	%p15, %f11, 0f00800000;
	mul.f32 	%f159, %f11, 0f4B800000;
	selp.f32	%f160, %f159, %f11, %p15;
	selp.f32	%f161, 0fC3170000, 0fC2FE0000, %p15;
	mov.b32 	 %r17, %f160;
	and.b32  	%r18, %r17, 8388607;
	or.b32  	%r19, %r18, 1065353216;
	mov.b32 	 %f162, %r19;
	shr.u32 	%r20, %r17, 23;
	cvt.rn.f32.u32	%f163, %r20;
	add.f32 	%f164, %f161, %f163;
	setp.gt.f32	%p16, %f162, 0f3FAE147B;
	mul.f32 	%f165, %f162, 0f3F000000;
	add.f32 	%f166, %f164, 0f3F800000;
	selp.f32	%f167, %f165, %f162, %p16;
	selp.f32	%f168, %f166, %f164, %p16;
	add.f32 	%f158, %f167, 0f3F800000;
	add.f32 	%f169, %f167, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f157,%f158;
	// inline asm
	mul.f32 	%f170, %f169, %f169;
	neg.f32 	%f171, %f170;
	mul.rn.f32 	%f172, %f157, %f171;
	add.rn.f32 	%f173, %f169, %f172;
	mul.f32 	%f174, %f173, %f173;
	mov.f32 	%f175, 0f3C4C6A36;
	mov.f32 	%f176, 0f3B1E94E6;
	fma.rn.f32 	%f177, %f176, %f174, %f175;
	mov.f32 	%f178, 0f3DAAAB1A;
	fma.rn.f32 	%f179, %f177, %f174, %f178;
	mul.f32 	%f180, %f174, %f179;
	fma.rn.f32 	%f181, %f180, %f173, %f172;
	add.f32 	%f182, %f169, %f181;
	mov.f32 	%f183, 0f3F317218;
	fma.rn.f32 	%f262, %f168, %f183, %f182;
	bra.uni 	BB50_15;

BB50_2:
	setp.ltu.f32	%p3, %f2, 0f40F9999A;
	@%p3 bra 	BB50_7;
	bra.uni 	BB50_3;

BB50_7:
	add.f32 	%f84, %f2, 0fC0400000;
	mov.f32 	%f85, 0fC640F6F8;
	mov.f32 	%f86, 0fC43B38FB;
	fma.rn.f32 	%f87, %f86, %f84, %f85;
	mov.f32 	%f88, 0fC7206560;
	fma.rn.f32 	%f89, %f87, %f84, %f88;
	mov.f32 	%f90, 0fC73CB6AA;
	fma.rn.f32 	%f91, %f89, %f84, %f90;
	mov.f32 	%f92, 0fC80BAE5A;
	fma.rn.f32 	%f93, %f91, %f84, %f92;
	add.f32 	%f94, %f84, 0fC381A020;
	mov.f32 	%f95, 0fC62864B8;
	fma.rn.f32 	%f96, %f94, %f84, %f95;
	mov.f32 	%f97, 0fC7B50686;
	fma.rn.f32 	%f98, %f96, %f84, %f97;
	mov.f32 	%f99, 0fC8498465;
	fma.rn.f32 	%f83, %f98, %f84, %f99;
	// inline asm
	rcp.approx.ftz.f32 %f82,%f83;
	// inline asm
	fma.rn.f32 	%f270, %f93, %f82, %f84;
	bra.uni 	BB50_16;

BB50_9:
	add.f32 	%f100, %f2, 0fC0000000;
	mov.f32 	%f101, 0fB967A002;
	mov.f32 	%f102, 0f385007FA;
	fma.rn.f32 	%f103, %f102, %f100, %f101;
	mov.f32 	%f104, 0f3A0DE6FC;
	fma.rn.f32 	%f105, %f103, %f100, %f104;
	mov.f32 	%f106, 0fBA9DE0E2;
	fma.rn.f32 	%f107, %f105, %f100, %f106;
	mov.f32 	%f108, 0f3B3D05B7;
	fma.rn.f32 	%f109, %f107, %f100, %f108;
	mov.f32 	%f110, 0fBBF1EB10;
	fma.rn.f32 	%f111, %f109, %f100, %f110;
	mov.f32 	%f112, 0f3CA89A28;
	fma.rn.f32 	%f113, %f111, %f100, %f112;
	mov.f32 	%f114, 0fBD89F01A;
	fma.rn.f32 	%f115, %f113, %f100, %f114;
	mov.f32 	%f116, 0f3EA51A66;
	fma.rn.f32 	%f117, %f115, %f100, %f116;
	mov.f32 	%f118, 0f3ED87730;
	fma.rn.f32 	%f119, %f117, %f100, %f118;
	mul.f32 	%f270, %f100, %f119;
	bra.uni 	BB50_16;

BB50_3:
	// inline asm
	rcp.approx.ftz.f32 %f40,%f2;
	// inline asm
	mul.f32 	%f42, %f40, %f40;
	mov.f32 	%f43, 0fBB360953;
	mov.f32 	%f44, 0f3A4BE755;
	fma.rn.f32 	%f45, %f44, %f42, %f43;
	mov.f32 	%f46, 0f3DAAAAA3;
	fma.rn.f32 	%f47, %f45, %f42, %f46;
	mov.f32 	%f48, 0f3F6B3F8E;
	fma.rn.f32 	%f3, %f47, %f40, %f48;
	setp.lt.f32	%p4, %f2, 0f7F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	and.pred  	%p6, %p5, %p4;
	@%p6 bra 	BB50_5;
	bra.uni 	BB50_4;

BB50_5:
	setp.lt.f32	%p7, %f2, 0f00800000;
	mul.f32 	%f51, %f2, 0f4B800000;
	selp.f32	%f52, %f51, %f2, %p7;
	selp.f32	%f53, 0fC3170000, 0fC2FE0000, %p7;
	mov.b32 	 %r13, %f52;
	and.b32  	%r14, %r13, 8388607;
	or.b32  	%r15, %r14, 1065353216;
	mov.b32 	 %f54, %r15;
	shr.u32 	%r16, %r13, 23;
	cvt.rn.f32.u32	%f55, %r16;
	add.f32 	%f56, %f53, %f55;
	setp.gt.f32	%p8, %f54, 0f3FAE147B;
	mul.f32 	%f57, %f54, 0f3F000000;
	add.f32 	%f58, %f56, 0f3F800000;
	selp.f32	%f59, %f57, %f54, %p8;
	selp.f32	%f60, %f58, %f56, %p8;
	add.f32 	%f50, %f59, 0f3F800000;
	add.f32 	%f61, %f59, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f49,%f50;
	// inline asm
	mul.f32 	%f62, %f61, %f61;
	neg.f32 	%f63, %f62;
	mul.rn.f32 	%f64, %f49, %f63;
	add.rn.f32 	%f65, %f61, %f64;
	mul.f32 	%f66, %f65, %f65;
	mov.f32 	%f67, 0f3C4C6A36;
	mov.f32 	%f68, 0f3B1E94E6;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0f3DAAAB1A;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mul.f32 	%f72, %f66, %f71;
	fma.rn.f32 	%f73, %f72, %f65, %f64;
	add.f32 	%f74, %f61, %f73;
	mov.f32 	%f75, 0f3F317218;
	fma.rn.f32 	%f261, %f60, %f75, %f74;
	bra.uni 	BB50_6;

BB50_11:
	mov.f32 	%f120, 0f3F800000;
	sub.f32 	%f121, %f120, %f2;
	mov.f32 	%f122, 0f3DD47577;
	mov.f32 	%f123, 0f3D3BEF76;
	fma.rn.f32 	%f124, %f123, %f121, %f122;
	mov.f32 	%f125, 0f3DFB8079;
	fma.rn.f32 	%f126, %f124, %f121, %f125;
	mov.f32 	%f127, 0f3E0295B5;
	fma.rn.f32 	%f128, %f126, %f121, %f127;
	mov.f32 	%f129, 0f3E12A765;
	fma.rn.f32 	%f130, %f128, %f121, %f129;
	mov.f32 	%f131, 0f3E2D6867;
	fma.rn.f32 	%f132, %f130, %f121, %f131;
	mov.f32 	%f133, 0f3E5462BF;
	fma.rn.f32 	%f134, %f132, %f121, %f133;
	mov.f32 	%f135, 0f3E8A8A72;
	fma.rn.f32 	%f136, %f134, %f121, %f135;
	mov.f32 	%f137, 0f3ECD26A4;
	fma.rn.f32 	%f138, %f136, %f121, %f137;
	mov.f32 	%f139, 0f3F528D32;
	fma.rn.f32 	%f140, %f138, %f121, %f139;
	mov.f32 	%f141, 0f3F13C468;
	fma.rn.f32 	%f142, %f140, %f121, %f141;
	mul.f32 	%f270, %f121, %f142;
	bra.uni 	BB50_16;

BB50_4:
	lg2.approx.f32 	%f261, %f2;

BB50_6:
	mul.f32 	%f76, %f261, 0f3F000000;
	add.f32 	%f77, %f2, 0fBF000000;
	mul.rn.f32 	%f78, %f76, %f77;
	sub.f32 	%f79, %f78, %f2;
	add.rn.f32 	%f80, %f78, %f3;
	add.f32 	%f81, %f79, %f80;
	setp.eq.f32	%p9, %f2, 0f7F800000;
	selp.f32	%f270, %f2, %f81, %p9;
	bra.uni 	BB50_16;

BB50_13:
	lg2.approx.f32 	%f262, %f11;

BB50_15:
	neg.f32 	%f270, %f262;

BB50_16:
	mov.f32 	%f16, %f270;
	setp.ge.f32	%p17, %f1, 0f00000000;
	mov.f32 	%f269, %f16;
	@%p17 bra 	BB50_37;

	cvt.rmi.f32.f32	%f185, %f2;
	setp.eq.f32	%p18, %f2, %f185;
	mov.f32 	%f184, 0f7F800000;
	mov.f32 	%f269, %f184;
	@%p18 bra 	BB50_37;

	setp.lt.f32	%p19, %f2, 0f1FEC1E4A;
	@%p19 bra 	BB50_33;
	bra.uni 	BB50_19;

BB50_33:
	setp.gt.f32	%p29, %f2, 0f00000000;
	setp.lt.f32	%p30, %f2, 0f7F800000;
	and.pred  	%p31, %p29, %p30;
	@%p31 bra 	BB50_35;
	bra.uni 	BB50_34;

BB50_35:
	setp.lt.f32	%p32, %f2, 0f00800000;
	mul.f32 	%f236, %f2, 0f4B800000;
	selp.f32	%f237, %f236, %f2, %p32;
	selp.f32	%f238, 0fC3170000, 0fC2FE0000, %p32;
	mov.b32 	 %r26, %f237;
	and.b32  	%r27, %r26, 8388607;
	or.b32  	%r28, %r27, 1065353216;
	mov.b32 	 %f239, %r28;
	shr.u32 	%r29, %r26, 23;
	cvt.rn.f32.u32	%f240, %r29;
	add.f32 	%f241, %f238, %f240;
	setp.gt.f32	%p33, %f239, 0f3FAE147B;
	mul.f32 	%f242, %f239, 0f3F000000;
	add.f32 	%f243, %f241, 0f3F800000;
	selp.f32	%f244, %f242, %f239, %p33;
	selp.f32	%f245, %f243, %f241, %p33;
	add.f32 	%f235, %f244, 0f3F800000;
	add.f32 	%f246, %f244, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f234,%f235;
	// inline asm
	mul.f32 	%f247, %f246, %f246;
	neg.f32 	%f248, %f247;
	mul.rn.f32 	%f249, %f234, %f248;
	add.rn.f32 	%f250, %f246, %f249;
	mul.f32 	%f251, %f250, %f250;
	mov.f32 	%f252, 0f3C4C6A36;
	mov.f32 	%f253, 0f3B1E94E6;
	fma.rn.f32 	%f254, %f253, %f251, %f252;
	mov.f32 	%f255, 0f3DAAAB1A;
	fma.rn.f32 	%f256, %f254, %f251, %f255;
	mul.f32 	%f257, %f251, %f256;
	fma.rn.f32 	%f258, %f257, %f250, %f249;
	add.f32 	%f259, %f246, %f258;
	mov.f32 	%f260, 0f3F317218;
	fma.rn.f32 	%f267, %f245, %f260, %f259;
	bra.uni 	BB50_36;

BB50_19:
	add.f32 	%f186, %f2, %f2;
	cvt.rni.f32.f32	%f187, %f186;
	cvt.rzi.s32.f32	%r2, %f187;
	neg.f32 	%f188, %f187;
	mov.f32 	%f189, 0f3F000000;
	fma.rn.f32 	%f190, %f188, %f189, %f2;
	mul.f32 	%f17, %f190, 0f40490FDB;
	mul.rn.f32 	%f18, %f17, %f17;
	and.b32  	%r3, %r2, 1;
	setp.eq.s32	%p20, %r3, 0;
	@%p20 bra 	BB50_21;

	mov.f32 	%f191, 0fBAB6061A;
	mov.f32 	%f192, 0f37CCF5CE;
	fma.rn.f32 	%f263, %f192, %f18, %f191;
	bra.uni 	BB50_22;

BB50_34:
	lg2.approx.f32 	%f267, %f2;

BB50_36:
	neg.f32 	%f269, %f267;
	bra.uni 	BB50_37;

BB50_21:
	mov.f32 	%f193, 0f3C08839E;
	mov.f32 	%f194, 0fB94CA1F9;
	fma.rn.f32 	%f263, %f194, %f18, %f193;

BB50_22:
	@%p20 bra 	BB50_24;

	mov.f32 	%f195, 0f3D2AAAA5;
	fma.rn.f32 	%f196, %f263, %f18, %f195;
	mov.f32 	%f197, 0fBF000000;
	fma.rn.f32 	%f264, %f196, %f18, %f197;
	bra.uni 	BB50_25;

BB50_24:
	mov.f32 	%f198, 0fBE2AAAA3;
	fma.rn.f32 	%f199, %f263, %f18, %f198;
	mov.f32 	%f200, 0f00000000;
	fma.rn.f32 	%f264, %f199, %f18, %f200;

BB50_25:
	fma.rn.f32 	%f265, %f264, %f17, %f17;
	@%p20 bra 	BB50_27;

	mov.f32 	%f201, 0f3F800000;
	fma.rn.f32 	%f265, %f264, %f18, %f201;

BB50_27:
	and.b32  	%r21, %r2, 2;
	setp.eq.s32	%p23, %r21, 0;
	@%p23 bra 	BB50_29;

	mov.f32 	%f202, 0f00000000;
	mov.f32 	%f203, 0fBF800000;
	fma.rn.f32 	%f265, %f265, %f203, %f202;

BB50_29:
	abs.f32 	%f204, %f265;
	mul.f32 	%f30, %f2, %f204;
	setp.gt.f32	%p24, %f30, 0f00000000;
	setp.lt.f32	%p25, %f30, 0f7F800000;
	and.pred  	%p26, %p24, %p25;
	@%p26 bra 	BB50_31;
	bra.uni 	BB50_30;

BB50_31:
	setp.lt.f32	%p27, %f30, 0f00800000;
	mul.f32 	%f207, %f30, 0f4B800000;
	selp.f32	%f208, %f207, %f30, %p27;
	selp.f32	%f209, 0fC3170000, 0fC2FE0000, %p27;
	mov.b32 	 %r22, %f208;
	and.b32  	%r23, %r22, 8388607;
	or.b32  	%r24, %r23, 1065353216;
	mov.b32 	 %f210, %r24;
	shr.u32 	%r25, %r22, 23;
	cvt.rn.f32.u32	%f211, %r25;
	add.f32 	%f212, %f209, %f211;
	setp.gt.f32	%p28, %f210, 0f3FAE147B;
	mul.f32 	%f213, %f210, 0f3F000000;
	add.f32 	%f214, %f212, 0f3F800000;
	selp.f32	%f215, %f213, %f210, %p28;
	selp.f32	%f216, %f214, %f212, %p28;
	add.f32 	%f206, %f215, 0f3F800000;
	add.f32 	%f217, %f215, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f205,%f206;
	// inline asm
	mul.f32 	%f218, %f217, %f217;
	neg.f32 	%f219, %f218;
	mul.rn.f32 	%f220, %f205, %f219;
	add.rn.f32 	%f221, %f217, %f220;
	mul.f32 	%f222, %f221, %f221;
	mov.f32 	%f223, 0f3C4C6A36;
	mov.f32 	%f224, 0f3B1E94E6;
	fma.rn.f32 	%f225, %f224, %f222, %f223;
	mov.f32 	%f226, 0f3DAAAB1A;
	fma.rn.f32 	%f227, %f225, %f222, %f226;
	mul.f32 	%f228, %f222, %f227;
	fma.rn.f32 	%f229, %f228, %f221, %f220;
	add.f32 	%f230, %f217, %f229;
	mov.f32 	%f231, 0f3F317218;
	fma.rn.f32 	%f266, %f216, %f231, %f230;
	bra.uni 	BB50_32;

BB50_30:
	lg2.approx.f32 	%f266, %f30;

BB50_32:
	mov.f32 	%f232, 0f3F928682;
	sub.f32 	%f233, %f232, %f266;
	sub.f32 	%f269, %f233, %f16;

BB50_37:
	cvta.to.global.u32 	%r30, %r4;
	add.s32 	%r32, %r30, %r11;
	st.global.f32 	[%r32], %f269;

BB50_38:
	ret;
}

	// .globl	vec_log10f
.visible .entry vec_log10f(
	.param .u32 vec_log10f_param_0,
	.param .u32 vec_log10f_param_1,
	.param .u32 vec_log10f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<34>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_log10f_param_0];
	ld.param.u32 	%r2, [vec_log10f_param_1];
	ld.param.u32 	%r3, [vec_log10f_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB51_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB51_3;
	bra.uni 	BB51_2;

BB51_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r11, %f8;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f10, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f11, %r14;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f33, %f16, %f31, %f30;
	bra.uni 	BB51_4;

BB51_2:
	lg2.approx.f32 	%f33, %f1;

BB51_4:
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r17, %r15, %r9;
	mul.f32 	%f32, %f33, 0f3EDE5BD9;
	st.global.f32 	[%r17], %f32;

BB51_5:
	ret;
}

	// .globl	vec_log1pf
.visible .entry vec_log1pf(
	.param .u32 vec_log1pf_param_0,
	.param .u32 vec_log1pf_param_1,
	.param .u32 vec_log1pf_param_2
)
{
	.reg .pred 	%p<10>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_log1pf_param_0];
	ld.param.u32 	%r2, [vec_log1pf_param_1];
	ld.param.u32 	%r3, [vec_log1pf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB52_7;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	setp.le.f32	%p2, %f1, 0f3F266666;
	setp.ge.f32	%p3, %f1, 0fBEC9BA5E;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB52_5;
	bra.uni 	BB52_2;

BB52_5:
	add.f32 	%f34, %f1, 0f40000000;
	div.approx.f32 	%f35, %f1, %f34;
	neg.f32 	%f36, %f1;
	mul.rn.f32 	%f37, %f36, %f35;
	add.rn.f32 	%f38, %f1, %f37;
	mul.f32 	%f39, %f38, %f38;
	mov.f32 	%f40, 0f3C4C4BE0;
	mov.f32 	%f41, 0f3B2063C3;
	fma.rn.f32 	%f42, %f41, %f39, %f40;
	mov.f32 	%f43, 0f3DAAAB50;
	fma.rn.f32 	%f44, %f42, %f39, %f43;
	mul.f32 	%f45, %f39, %f44;
	fma.rn.f32 	%f46, %f45, %f38, %f37;
	add.f32 	%f47, %f1, %f46;
	bra.uni 	BB52_6;

BB52_2:
	add.f32 	%f2, %f1, 0f3F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	setp.lt.f32	%p6, %f2, 0f7F800000;
	and.pred  	%p7, %p5, %p6;
	@%p7 bra 	BB52_4;
	bra.uni 	BB52_3;

BB52_4:
	setp.lt.f32	%p8, %f2, 0f00800000;
	mul.f32 	%f9, %f2, 0f4B800000;
	selp.f32	%f10, %f9, %f2, %p8;
	selp.f32	%f11, 0fC3170000, 0fC2FE0000, %p8;
	mov.b32 	 %r11, %f10;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f12, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f13, %r14;
	add.f32 	%f14, %f11, %f13;
	setp.gt.f32	%p9, %f12, 0f3FAE147B;
	mul.f32 	%f15, %f12, 0f3F000000;
	add.f32 	%f16, %f14, 0f3F800000;
	selp.f32	%f17, %f15, %f12, %p9;
	selp.f32	%f18, %f16, %f14, %p9;
	add.f32 	%f8, %f17, 0f3F800000;
	add.f32 	%f19, %f17, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f7,%f8;
	// inline asm
	mul.f32 	%f20, %f19, %f19;
	neg.f32 	%f21, %f20;
	mul.rn.f32 	%f22, %f7, %f21;
	add.rn.f32 	%f23, %f19, %f22;
	mul.f32 	%f24, %f23, %f23;
	mov.f32 	%f25, 0f3C4C6A36;
	mov.f32 	%f26, 0f3B1E94E6;
	fma.rn.f32 	%f27, %f26, %f24, %f25;
	mov.f32 	%f28, 0f3DAAAB1A;
	fma.rn.f32 	%f29, %f27, %f24, %f28;
	mul.f32 	%f30, %f24, %f29;
	fma.rn.f32 	%f31, %f30, %f23, %f22;
	add.f32 	%f32, %f19, %f31;
	mov.f32 	%f33, 0f3F317218;
	fma.rn.f32 	%f47, %f18, %f33, %f32;
	bra.uni 	BB52_6;

BB52_3:
	lg2.approx.f32 	%f47, %f2;

BB52_6:
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r17, %r15, %r9;
	st.global.f32 	[%r17], %f47;

BB52_7:
	ret;
}

	// .globl	vec_log2f
.visible .entry vec_log2f(
	.param .u32 vec_log2f_param_0,
	.param .u32 vec_log2f_param_1,
	.param .u32 vec_log2f_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<34>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_log2f_param_0];
	ld.param.u32 	%r2, [vec_log2f_param_1];
	ld.param.u32 	%r3, [vec_log2f_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB53_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB53_3;
	bra.uni 	BB53_2;

BB53_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r11, %f8;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f10, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f11, %r14;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f33, %f16, %f31, %f30;
	bra.uni 	BB53_4;

BB53_2:
	lg2.approx.f32 	%f33, %f1;

BB53_4:
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r17, %r15, %r9;
	mul.f32 	%f32, %f33, 0f3FB8AA3B;
	st.global.f32 	[%r17], %f32;

BB53_5:
	ret;
}

	// .globl	vec_logbf
.visible .entry vec_logbf(
	.param .u32 vec_logbf_param_0,
	.param .u32 vec_logbf_param_1,
	.param .u32 vec_logbf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<10>;
	.reg .b32 	%r<20>;


	ld.param.u32 	%r5, [vec_logbf_param_0];
	ld.param.u32 	%r3, [vec_logbf_param_1];
	ld.param.u32 	%r4, [vec_logbf_param_2];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB54_5;

	cvta.to.global.u32 	%r9, %r4;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	ld.global.f32 	%f1, [%r11];
	abs.f32 	%f5, %f1;
	mov.b32 	 %r2, %f5;
	setp.lt.u32	%p2, %r2, 8388608;
	@%p2 bra 	BB54_3;
	bra.uni 	BB54_2;

BB54_3:
	clz.b32 	%r14, %r2;
	mov.u32 	%r15, -118;
	sub.s32 	%r16, %r15, %r14;
	cvt.rn.f32.s32	%f8, %r16;
	setp.eq.f32	%p4, %f1, 0f00000000;
	selp.f32	%f9, 0fFF800000, %f8, %p4;
	bra.uni 	BB54_4;

BB54_2:
	shr.u32 	%r12, %r2, 23;
	add.s32 	%r13, %r12, -127;
	cvt.rn.f32.s32	%f6, %r13;
	mul.f32 	%f7, %f1, %f1;
	setp.gt.u32	%p3, %r2, 2139095039;
	selp.f32	%f9, %f7, %f6, %p3;

BB54_4:
	cvta.to.global.u32 	%r17, %r3;
	add.s32 	%r19, %r17, %r10;
	st.global.f32 	[%r19], %f9;

BB54_5:
	ret;
}

	// .globl	vec_logf
.visible .entry vec_logf(
	.param .u32 vec_logf_param_0,
	.param .u32 vec_logf_param_1,
	.param .u32 vec_logf_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<18>;


	ld.param.u32 	%r4, [vec_logf_param_0];
	ld.param.u32 	%r2, [vec_logf_param_1];
	ld.param.u32 	%r3, [vec_logf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB55_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	setp.gt.f32	%p2, %f1, 0f00000000;
	setp.lt.f32	%p3, %f1, 0f7F800000;
	and.pred  	%p4, %p2, %p3;
	@%p4 bra 	BB55_3;
	bra.uni 	BB55_2;

BB55_3:
	setp.lt.f32	%p5, %f1, 0f00800000;
	mul.f32 	%f7, %f1, 0f4B800000;
	selp.f32	%f8, %f7, %f1, %p5;
	selp.f32	%f9, 0fC3170000, 0fC2FE0000, %p5;
	mov.b32 	 %r11, %f8;
	and.b32  	%r12, %r11, 8388607;
	or.b32  	%r13, %r12, 1065353216;
	mov.b32 	 %f10, %r13;
	shr.u32 	%r14, %r11, 23;
	cvt.rn.f32.u32	%f11, %r14;
	add.f32 	%f12, %f9, %f11;
	setp.gt.f32	%p6, %f10, 0f3FAE147B;
	mul.f32 	%f13, %f10, 0f3F000000;
	add.f32 	%f14, %f12, 0f3F800000;
	selp.f32	%f15, %f13, %f10, %p6;
	selp.f32	%f16, %f14, %f12, %p6;
	add.f32 	%f6, %f15, 0f3F800000;
	add.f32 	%f17, %f15, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f18, %f17, %f17;
	neg.f32 	%f19, %f18;
	mul.rn.f32 	%f20, %f5, %f19;
	add.rn.f32 	%f21, %f17, %f20;
	mul.f32 	%f22, %f21, %f21;
	mov.f32 	%f23, 0f3C4C6A36;
	mov.f32 	%f24, 0f3B1E94E6;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3DAAAB1A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mul.f32 	%f28, %f22, %f27;
	fma.rn.f32 	%f29, %f28, %f21, %f20;
	add.f32 	%f30, %f17, %f29;
	mov.f32 	%f31, 0f3F317218;
	fma.rn.f32 	%f32, %f16, %f31, %f30;
	bra.uni 	BB55_4;

BB55_2:
	lg2.approx.f32 	%f32, %f1;

BB55_4:
	cvta.to.global.u32 	%r15, %r2;
	add.s32 	%r17, %r15, %r9;
	st.global.f32 	[%r17], %f32;

BB55_5:
	ret;
}

	// .globl	vec_normcdff
.visible .entry vec_normcdff(
	.param .u32 vec_normcdff_param_0,
	.param .u32 vec_normcdff_param_1,
	.param .u32 vec_normcdff_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<81>;
	.reg .b32 	%r<17>;


	ld.param.u32 	%r4, [vec_normcdff_param_0];
	ld.param.u32 	%r2, [vec_normcdff_param_1];
	ld.param.u32 	%r3, [vec_normcdff_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB56_4;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f13, [%r10];
	abs.f32 	%f14, %f13;
	setp.gt.f32	%p2, %f14, 0f41680000;
	mov.b32 	 %r11, %f13;
	and.b32  	%r12, %r11, -2147483648;
	or.b32  	%r13, %r12, 1097334784;
	mov.b32 	 %f15, %r13;
	selp.f32	%f16, %f15, %f13, %p2;
	mov.f32 	%f17, 0fBF3504F3;
	mul.rn.f32 	%f1, %f16, %f17;
	neg.f32 	%f18, %f1;
	fma.rn.f32 	%f19, %f16, %f17, %f18;
	mov.f32 	%f20, 0fB24FE77A;
	fma.rn.f32 	%f2, %f16, %f20, %f19;
	add.rn.f32 	%f3, %f1, %f2;
	abs.f32 	%f21, %f3;
	add.f32 	%f8, %f21, 0f40800000;
	// inline asm
	rcp.approx.ftz.f32 %f7,%f8;
	// inline asm
	add.f32 	%f22, %f21, 0fC0800000;
	mul.rn.f32 	%f23, %f22, %f7;
	add.f32 	%f24, %f23, 0f3F800000;
	mov.f32 	%f25, 0fC0800000;
	fma.rn.f32 	%f26, %f25, %f24, %f21;
	neg.f32 	%f27, %f23;
	fma.rn.f32 	%f28, %f27, %f21, %f26;
	fma.rn.f32 	%f29, %f7, %f28, %f23;
	mov.f32 	%f30, 0f3BE6E05B;
	mov.f32 	%f31, 0f3A69A091;
	fma.rn.f32 	%f32, %f31, %f29, %f30;
	mov.f32 	%f33, 0fBC81FB4B;
	fma.rn.f32 	%f34, %f32, %f29, %f33;
	mov.f32 	%f35, 0f3D15373B;
	fma.rn.f32 	%f36, %f34, %f29, %f35;
	mov.f32 	%f37, 0fBD887C5A;
	fma.rn.f32 	%f38, %f36, %f29, %f37;
	mov.f32 	%f39, 0f3DC021D5;
	fma.rn.f32 	%f40, %f38, %f29, %f39;
	mov.f32 	%f41, 0fBDCED424;
	fma.rn.f32 	%f42, %f40, %f29, %f41;
	mov.f32 	%f43, 0f3D8B74DE;
	fma.rn.f32 	%f44, %f42, %f29, %f43;
	mov.f32 	%f45, 0f3C7BF170;
	fma.rn.f32 	%f46, %f44, %f29, %f45;
	mov.f32 	%f47, 0fBE0EF8D4;
	fma.rn.f32 	%f48, %f46, %f29, %f47;
	mov.f32 	%f49, 0f3F9DD2C9;
	fma.rn.f32 	%f50, %f48, %f29, %f49;
	mov.f32 	%f51, 0f3F800000;
	mov.f32 	%f52, 0f40000000;
	fma.rn.f32 	%f10, %f52, %f21, %f51;
	// inline asm
	rcp.approx.ftz.f32 %f9,%f10;
	// inline asm
	mul.rn.f32 	%f53, %f50, %f9;
	mul.f32 	%f54, %f53, 0fC0000000;
	fma.rn.f32 	%f55, %f21, %f54, %f50;
	sub.f32 	%f56, %f55, %f53;
	fma.rn.f32 	%f57, %f56, %f9, %f53;
	mul.f32 	%f58, %f21, %f21;
	neg.f32 	%f59, %f58;
	mul.f32 	%f60, %f58, 0fBFB8AA3B;
	cvt.rzi.f32.f32	%f61, %f60;
	mov.f32 	%f62, 0fBF317200;
	fma.rn.f32 	%f63, %f61, %f62, %f59;
	mov.f32 	%f64, 0fB5BFBE8E;
	fma.rn.f32 	%f65, %f61, %f64, %f63;
	mul.f32 	%f12, %f65, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f11,%f12;
	// inline asm
	add.f32 	%f66, %f61, 0f00000000;
	ex2.approx.f32 	%f67, %f66;
	mul.f32 	%f68, %f11, %f67;
	neg.f32 	%f69, %f21;
	fma.rn.f32 	%f70, %f69, %f21, %f58;
	fma.rn.f32 	%f71, %f68, %f70, %f68;
	mul.f32 	%f72, %f57, %f71;
	setp.gt.f32	%p3, %f21, 0f4120E148;
	selp.f32	%f73, 0f00000000, %f72, %p3;
	setp.lt.f32	%p4, %f3, 0f00000000;
	sub.f32 	%f74, %f52, %f73;
	selp.f32	%f80, %f74, %f73, %p4;
	setp.geu.f32	%p5, %f16, 0fBF800000;
	@%p5 bra 	BB56_3;

	sub.f32 	%f75, %f1, %f3;
	add.rn.f32 	%f76, %f75, %f2;
	mul.f32 	%f77, %f3, 0fC0000000;
	mul.f32 	%f78, %f77, %f80;
	fma.rn.f32 	%f80, %f78, %f76, %f80;

BB56_3:
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r16, %r14, %r9;
	mul.f32 	%f79, %f80, 0f3F000000;
	st.global.f32 	[%r16], %f79;

BB56_4:
	ret;
}

	// .globl	vec_normcdfinvf
.visible .entry vec_normcdfinvf(
	.param .u32 vec_normcdfinvf_param_0,
	.param .u32 vec_normcdfinvf_param_1,
	.param .u32 vec_normcdfinvf_param_2
)
{
	.reg .pred 	%p<6>;
	.reg .f32 	%f<60>;
	.reg .b32 	%r<14>;


	ld.param.u32 	%r4, [vec_normcdfinvf_param_0];
	ld.param.u32 	%r2, [vec_normcdfinvf_param_1];
	ld.param.u32 	%r3, [vec_normcdfinvf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB57_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f7, [%r10];
	add.f32 	%f1, %f7, %f7;
	neg.f32 	%f2, %f1;
	mov.f32 	%f8, 0f40000000;
	add.rn.f32 	%f3, %f8, %f2;
	setp.le.f32	%p2, %f1, 0f3FFF9097;
	setp.ge.f32	%p3, %f1, 0f3B5ED289;
	and.pred  	%p4, %p3, %p2;
	@%p4 bra 	BB57_3;
	bra.uni 	BB57_2;

BB57_3:
	mul.rn.f32 	%f35, %f3, %f1;
	// inline asm
	lg2.approx.ftz.f32 %f34,%f35;
	// inline asm
	neg.f32 	%f36, %f34;
	mov.f32 	%f37, 0f3221F645;
	mov.f32 	%f38, 0fAF8A6370;
	fma.rn.f32 	%f39, %f38, %f36, %f37;
	mov.f32 	%f40, 0fB4016FDA;
	fma.rn.f32 	%f41, %f39, %f36, %f40;
	mov.f32 	%f42, 0f3468F846;
	fma.rn.f32 	%f43, %f41, %f36, %f42;
	mov.f32 	%f44, 0f370742AA;
	fma.rn.f32 	%f45, %f43, %f36, %f44;
	mov.f32 	%f46, 0fB804DB4D;
	fma.rn.f32 	%f47, %f45, %f36, %f46;
	mov.f32 	%f48, 0fBA4AFEA1;
	fma.rn.f32 	%f49, %f47, %f36, %f48;
	mov.f32 	%f50, 0f3BB5C027;
	fma.rn.f32 	%f51, %f49, %f36, %f50;
	mov.f32 	%f52, 0f3E24AE0F;
	fma.rn.f32 	%f53, %f51, %f36, %f52;
	mov.f32 	%f54, 0f3F62DFC4;
	fma.rn.f32 	%f55, %f53, %f36, %f54;
	fma.rn.f32 	%f59, %f55, %f2, %f55;
	bra.uni 	BB57_4;

BB57_2:
	setp.gt.f32	%p5, %f1, 0f3F800000;
	selp.f32	%f13, %f3, %f1, %p5;
	lg2.approx.f32 	%f14, %f13;
	neg.f32 	%f10, %f14;
	// inline asm
	rsqrt.approx.ftz.f32 %f9,%f10;
	// inline asm
	mov.f32 	%f15, 0f42FEF829;
	mov.f32 	%f16, 0fC27C73F1;
	fma.rn.f32 	%f17, %f16, %f9, %f15;
	mov.f32 	%f18, 0fC2E4361C;
	fma.rn.f32 	%f19, %f17, %f9, %f18;
	mov.f32 	%f20, 0f42714D9B;
	fma.rn.f32 	%f21, %f19, %f9, %f20;
	mov.f32 	%f22, 0fC1AE51B3;
	fma.rn.f32 	%f23, %f21, %f9, %f22;
	mov.f32 	%f24, 0f40CEF504;
	fma.rn.f32 	%f25, %f23, %f9, %f24;
	mov.f32 	%f26, 0fBFEA9E05;
	fma.rn.f32 	%f27, %f25, %f9, %f26;
	mov.f32 	%f28, 0fBCF871F4;
	fma.rn.f32 	%f29, %f27, %f9, %f28;
	mov.f32 	%f30, 0f3F553775;
	fma.rn.f32 	%f31, %f29, %f9, %f30;
	// inline asm
	rcp.approx.ftz.f32 %f11,%f9;
	// inline asm
	mul.rn.f32 	%f32, %f31, %f11;
	neg.f32 	%f33, %f32;
	selp.f32	%f59, %f33, %f32, %p5;

BB57_4:
	cvta.to.global.u32 	%r11, %r2;
	mul.f32 	%f56, %f59, 0fBFB504F3;
	mov.f32 	%f57, 0f00000000;
	add.rn.f32 	%f58, %f56, %f57;
	add.s32 	%r13, %r11, %r9;
	st.global.f32 	[%r13], %f58;

BB57_5:
	ret;
}

	// .globl	vec_rcbrtf
.visible .entry vec_rcbrtf(
	.param .u32 vec_rcbrtf_param_0,
	.param .u32 vec_rcbrtf_param_1,
	.param .u32 vec_rcbrtf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<21>;
	.reg .b32 	%r<15>;


	ld.param.u32 	%r4, [vec_rcbrtf_param_0];
	ld.param.u32 	%r2, [vec_rcbrtf_param_1];
	ld.param.u32 	%r3, [vec_rcbrtf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB58_4;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f7, %f1;
	lg2.approx.f32 	%f8, %f7;
	mul.f32 	%f6, %f8, 0fBEAAAAAB;
	// inline asm
	ex2.approx.ftz.f32 %f5,%f6;
	// inline asm
	mul.f32 	%f9, %f7, %f5;
	neg.f32 	%f10, %f9;
	mul.f32 	%f11, %f5, %f5;
	mov.f32 	%f12, 0f3F800000;
	fma.rn.f32 	%f13, %f11, %f10, %f12;
	mul.f32 	%f14, %f5, 0f3EAAAAAB;
	fma.rn.f32 	%f15, %f13, %f14, %f5;
	mov.b32 	 %r11, %f1;
	setp.lt.s32	%p2, %r11, 0;
	neg.f32 	%f16, %f15;
	selp.f32	%f20, %f16, %f15, %p2;
	add.f32 	%f17, %f1, %f1;
	setp.neu.f32	%p3, %f17, %f1;
	@%p3 bra 	BB58_3;

	// inline asm
	rcp.approx.ftz.f32 %f20,%f1;
	// inline asm

BB58_3:
	cvta.to.global.u32 	%r12, %r2;
	add.s32 	%r14, %r12, %r9;
	st.global.f32 	[%r14], %f20;

BB58_4:
	ret;
}

	// .globl	vec_rintf
.visible .entry vec_rintf(
	.param .u32 vec_rintf_param_0,
	.param .u32 vec_rintf_param_1,
	.param .u32 vec_rintf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_rintf_param_0];
	ld.param.u32 	%r2, [vec_rintf_param_1];
	ld.param.u32 	%r3, [vec_rintf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB59_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	cvt.rni.f32.f32	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB59_2:
	ret;
}

	// .globl	vec_roundf
.visible .entry vec_roundf(
	.param .u32 vec_roundf_param_0,
	.param .u32 vec_roundf_param_1,
	.param .u32 vec_roundf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<10>;
	.reg .b32 	%r<17>;


	ld.param.u32 	%r4, [vec_roundf_param_0];
	ld.param.u32 	%r2, [vec_roundf_param_1];
	ld.param.u32 	%r3, [vec_roundf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB60_4;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f5, %f1;
	mov.b32 	 %r11, %f1;
	and.b32  	%r12, %r11, -2147483648;
	or.b32  	%r13, %r12, 1056964608;
	mov.b32 	 %f6, %r13;
	add.f32 	%f7, %f1, %f6;
	cvt.rzi.f32.f32	%f8, %f7;
	setp.gt.f32	%p2, %f5, 0f4B000000;
	selp.f32	%f9, %f1, %f8, %p2;
	setp.geu.f32	%p3, %f5, 0f3F000000;
	@%p3 bra 	BB60_3;

	cvt.rzi.f32.f32	%f9, %f1;

BB60_3:
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r16, %r14, %r9;
	st.global.f32 	[%r16], %f9;

BB60_4:
	ret;
}

	// .globl	vec_rsqrtf
.visible .entry vec_rsqrtf(
	.param .u32 vec_rsqrtf_param_0,
	.param .u32 vec_rsqrtf_param_1,
	.param .u32 vec_rsqrtf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_rsqrtf_param_0];
	ld.param.u32 	%r2, [vec_rsqrtf_param_1];
	ld.param.u32 	%r3, [vec_rsqrtf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB61_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	rsqrt.approx.f32 	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB61_2:
	ret;
}

	// .globl	vec_sinf
.visible .entry vec_sinf(
	.param .u32 vec_sinf_param_0,
	.param .u32 vec_sinf_param_1,
	.param .u32 vec_sinf_param_2
)
{
	.local .align 4 .b8 	__local_depot62[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<15>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<119>;


	mov.u32 	%r118, __local_depot62;
	cvta.local.u32 	%SP, %r118;
	ld.param.u32 	%r46, [vec_sinf_param_0];
	ld.param.u32 	%r44, [vec_sinf_param_1];
	ld.param.u32 	%r45, [vec_sinf_param_2];
	add.u32 	%r47, %SP, 0;
	cvta.to.local.u32 	%r1, %r47;
	mov.u32 	%r48, %ntid.x;
	mov.u32 	%r49, %ctaid.x;
	mov.u32 	%r50, %tid.x;
	mad.lo.s32 	%r2, %r48, %r49, %r50;
	setp.ge.u32	%p1, %r2, %r46;
	@%p1 bra 	BB62_24;

	cvta.to.global.u32 	%r51, %r45;
	shl.b32 	%r52, %r2, 2;
	add.s32 	%r53, %r51, %r52;
	ld.global.f32 	%f43, [%r53];
	abs.f32 	%f19, %f43;
	setp.neu.f32	%p2, %f19, 0f7F800000;
	@%p2 bra 	BB62_3;

	mov.f32 	%f20, 0f00000000;
	mul.rn.f32 	%f43, %f43, %f20;

BB62_3:
	mul.f32 	%f21, %f43, 0f3F22F983;
	cvt.rni.s32.f32	%r117, %f21;
	cvt.rn.f32.s32	%f22, %r117;
	neg.f32 	%f23, %f22;
	mov.f32 	%f24, 0f3FC90FDA;
	fma.rn.f32 	%f25, %f23, %f24, %f43;
	mov.f32 	%f26, 0f33A22168;
	fma.rn.f32 	%f27, %f23, %f26, %f25;
	mov.f32 	%f28, 0f27C234C5;
	fma.rn.f32 	%f44, %f23, %f28, %f27;
	abs.f32 	%f29, %f43;
	setp.leu.f32	%p3, %f29, 0f47CE4780;
	@%p3 bra 	BB62_13;

	mov.b32 	 %r4, %f43;
	shr.u32 	%r5, %r4, 23;
	bfe.u32 	%r57, %r4, 23, 8;
	add.s32 	%r58, %r57, -128;
	shl.b32 	%r59, %r4, 8;
	or.b32  	%r6, %r59, -2147483648;
	shr.u32 	%r7, %r58, 5;
	mov.u32 	%r109, 0;
	mov.u32 	%r107, 6;
	mov.u32 	%r106, __cudart_i2opi_f;
	mov.u32 	%r108, %r1;

BB62_5:
	.pragma "nounroll";
	mov.u32 	%r10, %r108;
	ld.const.u32 	%r62, [%r106];
	// inline asm
	{
	mad.lo.cc.u32   %r60, %r62, %r6, %r109;
	madc.hi.u32     %r109, %r62, %r6,  0;
	}
	// inline asm
	st.local.u32 	[%r10], %r60;
	add.s32 	%r13, %r10, 4;
	add.s32 	%r106, %r106, 4;
	add.s32 	%r107, %r107, -1;
	setp.ne.s32	%p4, %r107, 0;
	mov.u32 	%r108, %r13;
	@%p4 bra 	BB62_5;

	and.b32  	%r16, %r4, -2147483648;
	mov.u32 	%r65, 4;
	sub.s32 	%r66, %r65, %r7;
	shl.b32 	%r67, %r66, 2;
	add.s32 	%r68, %r67, %r1;
	st.local.u32 	[%r1+24], %r109;
	ld.local.u32 	%r110, [%r68+8];
	ld.local.u32 	%r111, [%r68+4];
	and.b32  	%r20, %r5, 31;
	setp.eq.s32	%p5, %r20, 0;
	@%p5 bra 	BB62_8;

	mov.u32 	%r69, 32;
	sub.s32 	%r70, %r69, %r20;
	shr.u32 	%r71, %r111, %r70;
	shl.b32 	%r72, %r110, %r20;
	add.s32 	%r110, %r71, %r72;
	add.s32 	%r105, %r68, 8;
	ld.local.u32 	%r73, [%r105+-8];
	shr.u32 	%r74, %r73, %r70;
	shl.b32 	%r75, %r111, %r20;
	add.s32 	%r111, %r74, %r75;

BB62_8:
	shr.u32 	%r76, %r111, 30;
	shl.b32 	%r77, %r110, 2;
	add.s32 	%r112, %r76, %r77;
	shl.b32 	%r26, %r111, 2;
	shr.u32 	%r78, %r112, 31;
	shr.u32 	%r79, %r110, 30;
	add.s32 	%r27, %r78, %r79;
	setp.eq.s32	%p6, %r78, 0;
	mov.u32 	%r113, %r16;
	mov.u32 	%r114, %r26;
	@%p6 bra 	BB62_10;

	not.b32 	%r80, %r112;
	neg.s32 	%r28, %r26;
	setp.eq.s32	%p7, %r26, 0;
	selp.u32	%r81, 1, 0, %p7;
	add.s32 	%r112, %r81, %r80;
	xor.b32  	%r30, %r16, -2147483648;
	mov.u32 	%r113, %r30;
	mov.u32 	%r114, %r28;

BB62_10:
	mov.u32 	%r32, %r113;
	neg.s32 	%r82, %r27;
	setp.eq.s32	%p8, %r16, 0;
	selp.b32	%r117, %r27, %r82, %p8;
	clz.b32 	%r116, %r112;
	setp.eq.s32	%p9, %r116, 0;
	shl.b32 	%r83, %r112, %r116;
	mov.u32 	%r84, 32;
	sub.s32 	%r85, %r84, %r116;
	shr.u32 	%r86, %r114, %r85;
	add.s32 	%r87, %r86, %r83;
	selp.b32	%r36, %r112, %r87, %p9;
	mov.u32 	%r88, -921707870;
	mul.hi.u32 	%r115, %r36, %r88;
	setp.lt.s32	%p10, %r115, 1;
	@%p10 bra 	BB62_12;

	mul.lo.s32 	%r89, %r36, -921707870;
	shr.u32 	%r90, %r89, 31;
	shl.b32 	%r91, %r115, 1;
	add.s32 	%r115, %r90, %r91;
	add.s32 	%r116, %r116, 1;

BB62_12:
	mov.u32 	%r92, 126;
	sub.s32 	%r93, %r92, %r116;
	shl.b32 	%r94, %r93, 23;
	add.s32 	%r95, %r115, 1;
	shr.u32 	%r96, %r95, 7;
	add.s32 	%r97, %r96, 1;
	shr.u32 	%r98, %r97, 1;
	add.s32 	%r99, %r98, %r94;
	or.b32  	%r100, %r99, %r32;
	mov.b32 	 %f44, %r100;

BB62_13:
	mul.rn.f32 	%f7, %f44, %f44;
	and.b32  	%r43, %r117, 1;
	setp.eq.s32	%p11, %r43, 0;
	@%p11 bra 	BB62_15;

	mov.f32 	%f30, 0fBAB6061A;
	mov.f32 	%f31, 0f37CCF5CE;
	fma.rn.f32 	%f45, %f31, %f7, %f30;
	bra.uni 	BB62_16;

BB62_15:
	mov.f32 	%f32, 0f3C08839E;
	mov.f32 	%f33, 0fB94CA1F9;
	fma.rn.f32 	%f45, %f33, %f7, %f32;

BB62_16:
	@%p11 bra 	BB62_18;

	mov.f32 	%f34, 0f3D2AAAA5;
	fma.rn.f32 	%f35, %f45, %f7, %f34;
	mov.f32 	%f36, 0fBF000000;
	fma.rn.f32 	%f46, %f35, %f7, %f36;
	bra.uni 	BB62_19;

BB62_18:
	mov.f32 	%f37, 0fBE2AAAA3;
	fma.rn.f32 	%f38, %f45, %f7, %f37;
	mov.f32 	%f39, 0f00000000;
	fma.rn.f32 	%f46, %f38, %f7, %f39;

BB62_19:
	fma.rn.f32 	%f47, %f46, %f44, %f44;
	@%p11 bra 	BB62_21;

	mov.f32 	%f40, 0f3F800000;
	fma.rn.f32 	%f47, %f46, %f7, %f40;

BB62_21:
	and.b32  	%r101, %r117, 2;
	setp.eq.s32	%p14, %r101, 0;
	@%p14 bra 	BB62_23;

	mov.f32 	%f41, 0f00000000;
	mov.f32 	%f42, 0fBF800000;
	fma.rn.f32 	%f47, %f47, %f42, %f41;

BB62_23:
	cvta.to.global.u32 	%r102, %r44;
	add.s32 	%r104, %r102, %r52;
	st.global.f32 	[%r104], %f47;

BB62_24:
	ret;
}

	// .globl	vec_sinhf
.visible .entry vec_sinhf(
	.param .u32 vec_sinhf_param_0,
	.param .u32 vec_sinhf_param_1,
	.param .u32 vec_sinhf_param_2
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<32>;
	.reg .b32 	%r<19>;


	ld.param.u32 	%r4, [vec_sinhf_param_0];
	ld.param.u32 	%r2, [vec_sinhf_param_1];
	ld.param.u32 	%r3, [vec_sinhf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB63_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F800000;
	@%p2 bra 	BB63_3;
	bra.uni 	BB63_2;

BB63_3:
	mul.f32 	%f22, %f1, %f1;
	mov.f32 	%f23, 0f394FFF49;
	mov.f32 	%f24, 0f363D0ADA;
	fma.rn.f32 	%f25, %f24, %f22, %f23;
	mov.f32 	%f26, 0f3C08889A;
	fma.rn.f32 	%f27, %f25, %f22, %f26;
	mov.f32 	%f28, 0f3E2AAAAB;
	fma.rn.f32 	%f29, %f27, %f22, %f28;
	mul.f32 	%f30, %f22, %f29;
	fma.rn.f32 	%f31, %f30, %f1, %f1;
	bra.uni 	BB63_4;

BB63_2:
	mul.f32 	%f8, %f2, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f9, %f8;
	mov.f32 	%f10, 0fBF317200;
	fma.rn.f32 	%f11, %f9, %f10, %f2;
	mov.f32 	%f12, 0fB5BFBE8E;
	fma.rn.f32 	%f13, %f9, %f12, %f11;
	mul.f32 	%f7, %f13, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	add.f32 	%f14, %f9, 0fC0000000;
	ex2.approx.f32 	%f15, %f14;
	mul.f32 	%f16, %f6, %f15;
	mov.f32 	%f17, 0f3E000000;
	div.approx.f32 	%f18, %f17, %f16;
	neg.f32 	%f19, %f18;
	mov.f32 	%f20, 0f40000000;
	fma.rn.f32 	%f21, %f20, %f16, %f19;
	mov.b32 	 %r11, %f21;
	setp.ltu.f32	%p3, %f2, 0f42B40000;
	selp.b32	%r12, %r11, 2139095040, %p3;
	mov.b32 	 %r13, %f1;
	and.b32  	%r14, %r13, -2147483648;
	or.b32  	%r15, %r12, %r14;
	mov.b32 	 %f31, %r15;

BB63_4:
	cvta.to.global.u32 	%r16, %r2;
	add.s32 	%r18, %r16, %r9;
	st.global.f32 	[%r18], %f31;

BB63_5:
	ret;
}

	// .globl	vec_sinpif
.visible .entry vec_sinpif(
	.param .u32 vec_sinpif_param_0,
	.param .u32 vec_sinpif_param_1,
	.param .u32 vec_sinpif_param_2
)
{
	.reg .pred 	%p<7>;
	.reg .f32 	%f<42>;
	.reg .b32 	%r<17>;


	ld.param.u32 	%r6, [vec_sinpif_param_0];
	ld.param.u32 	%r4, [vec_sinpif_param_1];
	ld.param.u32 	%r5, [vec_sinpif_param_2];
	mov.u32 	%r7, %tid.x;
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r7;
	setp.ge.u32	%p1, %r1, %r6;
	@%p1 bra 	BB64_14;

	cvta.to.global.u32 	%r10, %r5;
	shl.b32 	%r11, %r1, 2;
	add.s32 	%r12, %r10, %r11;
	ld.global.f32 	%f1, [%r12];
	add.f32 	%f17, %f1, %f1;
	cvt.rni.f32.f32	%f18, %f17;
	cvt.rzi.s32.f32	%r2, %f18;
	neg.f32 	%f19, %f18;
	mov.f32 	%f20, 0f3F000000;
	fma.rn.f32 	%f21, %f19, %f20, %f1;
	mul.f32 	%f22, %f21, 0f34222169;
	mov.f32 	%f23, 0f40490FDA;
	fma.rn.f32 	%f2, %f21, %f23, %f22;
	mul.rn.f32 	%f3, %f2, %f2;
	and.b32  	%r3, %r2, 1;
	setp.eq.s32	%p2, %r3, 0;
	@%p2 bra 	BB64_3;

	mov.f32 	%f24, 0fBAB6061A;
	mov.f32 	%f25, 0f37CCF5CE;
	fma.rn.f32 	%f39, %f25, %f3, %f24;
	bra.uni 	BB64_4;

BB64_3:
	mov.f32 	%f26, 0f3C08839E;
	mov.f32 	%f27, 0fB94CA1F9;
	fma.rn.f32 	%f39, %f27, %f3, %f26;

BB64_4:
	@%p2 bra 	BB64_6;

	mov.f32 	%f28, 0f3D2AAAA5;
	fma.rn.f32 	%f29, %f39, %f3, %f28;
	mov.f32 	%f30, 0fBF000000;
	fma.rn.f32 	%f40, %f29, %f3, %f30;
	bra.uni 	BB64_7;

BB64_6:
	mov.f32 	%f31, 0fBE2AAAA3;
	fma.rn.f32 	%f32, %f39, %f3, %f31;
	mov.f32 	%f33, 0f00000000;
	fma.rn.f32 	%f40, %f32, %f3, %f33;

BB64_7:
	fma.rn.f32 	%f41, %f40, %f2, %f2;
	@%p2 bra 	BB64_9;

	mov.f32 	%f34, 0f3F800000;
	fma.rn.f32 	%f41, %f40, %f3, %f34;

BB64_9:
	and.b32  	%r13, %r2, 2;
	setp.eq.s32	%p5, %r13, 0;
	@%p5 bra 	BB64_11;

	mov.f32 	%f35, 0f00000000;
	mov.f32 	%f36, 0fBF800000;
	fma.rn.f32 	%f41, %f41, %f36, %f35;

BB64_11:
	cvt.rzi.f32.f32	%f37, %f1;
	setp.neu.f32	%p6, %f37, %f1;
	@%p6 bra 	BB64_13;

	mov.f32 	%f38, 0f00000000;
	mul.rn.f32 	%f41, %f1, %f38;

BB64_13:
	cvta.to.global.u32 	%r14, %r4;
	add.s32 	%r16, %r14, %r11;
	st.global.f32 	[%r16], %f41;

BB64_14:
	ret;
}

	// .globl	vec_sqrtf
.visible .entry vec_sqrtf(
	.param .u32 vec_sqrtf_param_0,
	.param .u32 vec_sqrtf_param_1,
	.param .u32 vec_sqrtf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_sqrtf_param_0];
	ld.param.u32 	%r2, [vec_sqrtf_param_1];
	ld.param.u32 	%r3, [vec_sqrtf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB65_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	sqrt.rn.f32 	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB65_2:
	ret;
}

	// .globl	vec_tanf
.visible .entry vec_tanf(
	.param .u32 vec_tanf_param_0,
	.param .u32 vec_tanf_param_1,
	.param .u32 vec_tanf_param_2
)
{
	.local .align 4 .b8 	__local_depot66[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<12>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<118>;


	mov.u32 	%r117, __local_depot66;
	cvta.local.u32 	%SP, %r117;
	ld.param.u32 	%r45, [vec_tanf_param_0];
	ld.param.u32 	%r43, [vec_tanf_param_1];
	ld.param.u32 	%r44, [vec_tanf_param_2];
	add.u32 	%r46, %SP, 0;
	cvta.to.local.u32 	%r1, %r46;
	mov.u32 	%r47, %ntid.x;
	mov.u32 	%r48, %ctaid.x;
	mov.u32 	%r49, %tid.x;
	mad.lo.s32 	%r2, %r47, %r48, %r49;
	setp.ge.u32	%p1, %r2, %r45;
	@%p1 bra 	BB66_16;

	cvta.to.global.u32 	%r50, %r44;
	shl.b32 	%r51, %r2, 2;
	add.s32 	%r52, %r50, %r51;
	ld.global.f32 	%f30, [%r52];
	abs.f32 	%f10, %f30;
	setp.neu.f32	%p2, %f10, 0f7F800000;
	@%p2 bra 	BB66_3;

	mov.f32 	%f11, 0f00000000;
	mul.rn.f32 	%f30, %f30, %f11;

BB66_3:
	mul.f32 	%f12, %f30, 0f3F22F983;
	cvt.rni.s32.f32	%r116, %f12;
	cvt.rn.f32.s32	%f13, %r116;
	neg.f32 	%f14, %f13;
	mov.f32 	%f15, 0f3FC90FDA;
	fma.rn.f32 	%f16, %f14, %f15, %f30;
	mov.f32 	%f17, 0f33A22168;
	fma.rn.f32 	%f18, %f14, %f17, %f16;
	mov.f32 	%f19, 0f27C234C5;
	fma.rn.f32 	%f31, %f14, %f19, %f18;
	abs.f32 	%f20, %f30;
	setp.leu.f32	%p3, %f20, 0f47CE4780;
	@%p3 bra 	BB66_13;

	mov.b32 	 %r4, %f30;
	shr.u32 	%r5, %r4, 23;
	bfe.u32 	%r56, %r4, 23, 8;
	add.s32 	%r57, %r56, -128;
	shl.b32 	%r58, %r4, 8;
	or.b32  	%r6, %r58, -2147483648;
	shr.u32 	%r7, %r57, 5;
	mov.u32 	%r108, 0;
	mov.u32 	%r106, 6;
	mov.u32 	%r105, __cudart_i2opi_f;
	mov.u32 	%r107, %r1;

BB66_5:
	.pragma "nounroll";
	mov.u32 	%r10, %r107;
	ld.const.u32 	%r61, [%r105];
	// inline asm
	{
	mad.lo.cc.u32   %r59, %r61, %r6, %r108;
	madc.hi.u32     %r108, %r61, %r6,  0;
	}
	// inline asm
	st.local.u32 	[%r10], %r59;
	add.s32 	%r13, %r10, 4;
	add.s32 	%r105, %r105, 4;
	add.s32 	%r106, %r106, -1;
	setp.ne.s32	%p4, %r106, 0;
	mov.u32 	%r107, %r13;
	@%p4 bra 	BB66_5;

	and.b32  	%r16, %r4, -2147483648;
	mov.u32 	%r64, 4;
	sub.s32 	%r65, %r64, %r7;
	shl.b32 	%r66, %r65, 2;
	add.s32 	%r67, %r66, %r1;
	st.local.u32 	[%r1+24], %r108;
	ld.local.u32 	%r109, [%r67+8];
	ld.local.u32 	%r110, [%r67+4];
	and.b32  	%r20, %r5, 31;
	setp.eq.s32	%p5, %r20, 0;
	@%p5 bra 	BB66_8;

	mov.u32 	%r68, 32;
	sub.s32 	%r69, %r68, %r20;
	shr.u32 	%r70, %r110, %r69;
	shl.b32 	%r71, %r109, %r20;
	add.s32 	%r109, %r70, %r71;
	add.s32 	%r104, %r67, 8;
	ld.local.u32 	%r72, [%r104+-8];
	shr.u32 	%r73, %r72, %r69;
	shl.b32 	%r74, %r110, %r20;
	add.s32 	%r110, %r73, %r74;

BB66_8:
	shr.u32 	%r75, %r110, 30;
	shl.b32 	%r76, %r109, 2;
	add.s32 	%r111, %r75, %r76;
	shl.b32 	%r26, %r110, 2;
	shr.u32 	%r77, %r111, 31;
	shr.u32 	%r78, %r109, 30;
	add.s32 	%r27, %r77, %r78;
	setp.eq.s32	%p6, %r77, 0;
	mov.u32 	%r112, %r16;
	mov.u32 	%r113, %r26;
	@%p6 bra 	BB66_10;

	not.b32 	%r79, %r111;
	neg.s32 	%r28, %r26;
	setp.eq.s32	%p7, %r26, 0;
	selp.u32	%r80, 1, 0, %p7;
	add.s32 	%r111, %r80, %r79;
	xor.b32  	%r30, %r16, -2147483648;
	mov.u32 	%r112, %r30;
	mov.u32 	%r113, %r28;

BB66_10:
	mov.u32 	%r32, %r112;
	neg.s32 	%r81, %r27;
	setp.eq.s32	%p8, %r16, 0;
	selp.b32	%r116, %r27, %r81, %p8;
	clz.b32 	%r115, %r111;
	setp.eq.s32	%p9, %r115, 0;
	shl.b32 	%r82, %r111, %r115;
	mov.u32 	%r83, 32;
	sub.s32 	%r84, %r83, %r115;
	shr.u32 	%r85, %r113, %r84;
	add.s32 	%r86, %r85, %r82;
	selp.b32	%r36, %r111, %r86, %p9;
	mov.u32 	%r87, -921707870;
	mul.hi.u32 	%r114, %r36, %r87;
	setp.lt.s32	%p10, %r114, 1;
	@%p10 bra 	BB66_12;

	mul.lo.s32 	%r88, %r36, -921707870;
	shr.u32 	%r89, %r88, 31;
	shl.b32 	%r90, %r114, 1;
	add.s32 	%r114, %r89, %r90;
	add.s32 	%r115, %r115, 1;

BB66_12:
	mov.u32 	%r91, 126;
	sub.s32 	%r92, %r91, %r115;
	shl.b32 	%r93, %r92, 23;
	add.s32 	%r94, %r114, 1;
	shr.u32 	%r95, %r94, 7;
	add.s32 	%r96, %r95, 1;
	shr.u32 	%r97, %r96, 1;
	add.s32 	%r98, %r97, %r93;
	or.b32  	%r99, %r98, %r32;
	mov.b32 	 %f31, %r99;

BB66_13:
	mul.f32 	%f21, %f31, %f31;
	mov.f32 	%f22, 0fBF52B7F4;
	mov.f32 	%f23, 0f3B86D46D;
	fma.rn.f32 	%f24, %f23, %f21, %f22;
	add.f32 	%f25, %f21, 0fC01E09D0;
	rcp.rn.f32 	%f26, %f25;
	mul.f32 	%f27, %f24, %f26;
	mul.f32 	%f28, %f21, %f27;
	fma.rn.f32 	%f32, %f28, %f31, %f31;
	and.b32  	%r100, %r116, 1;
	setp.eq.b32	%p11, %r100, 1;
	@!%p11 bra 	BB66_15;
	bra.uni 	BB66_14;

BB66_14:
	mov.f32 	%f29, 0fBF800000;
	div.rn.f32 	%f32, %f29, %f32;

BB66_15:
	cvta.to.global.u32 	%r101, %r43;
	add.s32 	%r103, %r101, %r51;
	st.global.f32 	[%r103], %f32;

BB66_16:
	ret;
}

	// .globl	vec_tanhf
.visible .entry vec_tanhf(
	.param .u32 vec_tanhf_param_0,
	.param .u32 vec_tanhf_param_1,
	.param .u32 vec_tanhf_param_2
)
{
	.reg .pred 	%p<5>;
	.reg .f32 	%f<33>;
	.reg .b32 	%r<19>;


	ld.param.u32 	%r4, [vec_tanhf_param_0];
	ld.param.u32 	%r2, [vec_tanhf_param_1];
	ld.param.u32 	%r3, [vec_tanhf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB67_5;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	abs.f32 	%f2, %f1;
	setp.ltu.f32	%p2, %f2, 0f3F0CCCCD;
	@%p2 bra 	BB67_3;
	bra.uni 	BB67_2;

BB67_3:
	mul.f32 	%f21, %f1, %f1;
	mov.f32 	%f22, 0fBD57BE66;
	mov.f32 	%f23, 0f3C86A81B;
	fma.rn.f32 	%f24, %f23, %f21, %f22;
	mov.f32 	%f25, 0f3E08677B;
	fma.rn.f32 	%f26, %f24, %f21, %f25;
	mov.f32 	%f27, 0fBEAAAA29;
	fma.rn.f32 	%f28, %f26, %f21, %f27;
	mul.f32 	%f29, %f21, %f28;
	fma.rn.f32 	%f30, %f29, %f1, %f1;
	add.f32 	%f31, %f1, %f1;
	setp.eq.f32	%p4, %f1, 0f00000000;
	selp.f32	%f32, %f31, %f30, %p4;
	bra.uni 	BB67_4;

BB67_2:
	add.f32 	%f10, %f2, %f2;
	mul.f32 	%f11, %f10, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f12, %f11;
	mov.f32 	%f13, 0fBF317200;
	fma.rn.f32 	%f14, %f12, %f13, %f10;
	mov.f32 	%f15, 0fB5BFBE8E;
	fma.rn.f32 	%f16, %f12, %f15, %f14;
	mul.f32 	%f7, %f16, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f6,%f7;
	// inline asm
	ex2.approx.f32 	%f17, %f12;
	mov.f32 	%f18, 0f3F800000;
	fma.rn.f32 	%f9, %f6, %f17, %f18;
	// inline asm
	rcp.approx.ftz.f32 %f8,%f9;
	// inline asm
	mov.f32 	%f19, 0fC0000000;
	fma.rn.f32 	%f20, %f8, %f19, %f18;
	mov.b32 	 %r11, %f20;
	setp.ltu.f32	%p3, %f2, 0f42B00000;
	selp.b32	%r12, %r11, 1065353216, %p3;
	mov.b32 	 %r13, %f1;
	and.b32  	%r14, %r13, -2147483648;
	or.b32  	%r15, %r12, %r14;
	mov.b32 	 %f32, %r15;

BB67_4:
	cvta.to.global.u32 	%r16, %r2;
	add.s32 	%r18, %r16, %r9;
	st.global.f32 	[%r18], %f32;

BB67_5:
	ret;
}

	// .globl	vec_tgammaf
.visible .entry vec_tgammaf(
	.param .u32 vec_tgammaf_param_0,
	.param .u32 vec_tgammaf_param_1,
	.param .u32 vec_tgammaf_param_2
)
{
	.reg .pred 	%p<19>;
	.reg .f32 	%f<101>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r4, [vec_tgammaf_param_0];
	ld.param.u32 	%r2, [vec_tgammaf_param_1];
	ld.param.u32 	%r3, [vec_tgammaf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB68_13;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	setp.ltu.f32	%p2, %f1, 0f00000000;
	@%p2 bra 	BB68_6;
	bra.uni 	BB68_2;

BB68_6:
	cvt.rmi.f32.f32	%f50, %f1;
	setp.eq.f32	%p10, %f50, %f1;
	selp.f32	%f51, 0f7FFFFFFF, %f1, %p10;
	setp.lt.f32	%p11, %f51, 0fC2246666;
	selp.f32	%f14, 0fC2246666, %f51, %p11;
	setp.lt.f32	%p12, %f14, 0fC2081EB8;
	add.f32 	%f52, %f14, 0f40C00000;
	selp.f32	%f97, %f52, %f14, %p12;
	setp.geu.f32	%p13, %f97, 0fBF000000;
	mov.f32 	%f96, %f97;
	@%p13 bra 	BB68_9;

	mov.f32 	%f98, %f97;
	mov.f32 	%f99, %f97;

BB68_8:
	add.f32 	%f98, %f98, 0f3F800000;
	mul.f32 	%f99, %f99, %f98;
	setp.lt.f32	%p14, %f98, 0fBF000000;
	mov.f32 	%f97, %f99;
	mov.f32 	%f96, %f98;
	@%p14 bra 	BB68_8;

BB68_9:
	mov.f32 	%f53, 0f3BE86AA4;
	mov.f32 	%f54, 0fBA8AA19E;
	fma.rn.f32 	%f55, %f54, %f96, %f53;
	mov.f32 	%f56, 0fBC1E2998;
	fma.rn.f32 	%f57, %f55, %f96, %f56;
	mov.f32 	%f58, 0fBD2CBE4A;
	fma.rn.f32 	%f59, %f57, %f96, %f58;
	mov.f32 	%f60, 0f3E2A8A17;
	fma.rn.f32 	%f61, %f59, %f96, %f60;
	mov.f32 	%f62, 0fBD2C0CBB;
	fma.rn.f32 	%f63, %f61, %f96, %f62;
	mov.f32 	%f64, 0fBF27E7A3;
	fma.rn.f32 	%f65, %f63, %f96, %f64;
	mov.f32 	%f66, 0f3F13C468;
	fma.rn.f32 	%f67, %f65, %f96, %f66;
	mov.f32 	%f68, 0f3F800000;
	fma.rn.f32 	%f69, %f67, %f96, %f68;
	mul.f32 	%f70, %f97, %f69;
	rcp.rn.f32 	%f100, %f70;
	setp.geu.f32	%p15, %f14, 0fC2081EB8;
	@%p15 bra 	BB68_12;

	add.f32 	%f71, %f14, 0f3F800000;
	mul.f32 	%f72, %f14, %f71;
	add.f32 	%f73, %f14, 0f40000000;
	mul.f32 	%f74, %f73, %f72;
	add.f32 	%f75, %f14, 0f40400000;
	mul.f32 	%f76, %f75, %f74;
	add.f32 	%f77, %f14, 0f40800000;
	mul.f32 	%f78, %f77, %f76;
	add.f32 	%f79, %f14, 0f40A00000;
	mul.f32 	%f80, %f79, %f78;
	rcp.rn.f32 	%f81, %f80;
	mul.f32 	%f100, %f100, %f81;
	setp.geu.f32	%p16, %f1, 0fC2280000;
	@%p16 bra 	BB68_12;

	cvt.rzi.s32.f32	%r11, %f1;
	and.b32  	%r12, %r11, 1;
	setp.eq.b32	%p17, %r12, 1;
	not.pred 	%p18, %p17;
	selp.f32	%f100, 0f80000000, %f100, %p18;
	bra.uni 	BB68_12;

BB68_2:
	setp.gt.f32	%p3, %f1, 0f42100000;
	selp.f32	%f2, 0f42100000, %f1, %p3;
	setp.gt.f32	%p4, %f2, 0f42081EB8;
	add.f32 	%f3, %f2, 0fBF800000;
	selp.f32	%f88, %f3, %f2, %p4;
	add.f32 	%f89, %f88, 0fBF800000;
	mov.f32 	%f27, 0f3F800000;
	mov.f32 	%f84, %f27;
	setp.leu.f32	%p5, %f88, 0f3FC00000;
	mov.f32 	%f83, %f27;
	@%p5 bra 	BB68_5;

	mov.f32 	%f90, %f89;

BB68_4:
	mov.f32 	%f88, %f90;
	mul.f32 	%f84, %f88, %f84;
	add.f32 	%f90, %f88, 0fBF800000;
	setp.gt.f32	%p6, %f88, 0f3FC00000;
	mov.f32 	%f82, %f84;
	mov.f32 	%f83, %f82;
	mov.f32 	%f89, %f90;
	@%p6 bra 	BB68_4;

BB68_5:
	mov.f32 	%f11, %f83;
	setp.ltu.f32	%p7, %f2, 0f3F000000;
	selp.f32	%f28, %f88, %f89, %p7;
	mov.f32 	%f29, 0f3BE86AA4;
	mov.f32 	%f30, 0fBA8AA19E;
	fma.rn.f32 	%f31, %f30, %f28, %f29;
	mov.f32 	%f32, 0fBC1E2998;
	fma.rn.f32 	%f33, %f31, %f28, %f32;
	mov.f32 	%f34, 0fBD2CBE4A;
	fma.rn.f32 	%f35, %f33, %f28, %f34;
	mov.f32 	%f36, 0f3E2A8A17;
	fma.rn.f32 	%f37, %f35, %f28, %f36;
	mov.f32 	%f38, 0fBD2C0CBB;
	fma.rn.f32 	%f39, %f37, %f28, %f38;
	mov.f32 	%f40, 0fBF27E7A3;
	fma.rn.f32 	%f41, %f39, %f28, %f40;
	mov.f32 	%f42, 0f3F13C468;
	fma.rn.f32 	%f43, %f41, %f28, %f42;
	fma.rn.f32 	%f45, %f43, %f28, %f27;
	mul.f32 	%f46, %f2, %f45;
	setp.lt.f32	%p8, %f2, 0f3F000000;
	selp.f32	%f47, %f46, %f45, %p8;
	div.approx.f32 	%f48, %f11, %f47;
	mul.f32 	%f49, %f3, %f48;
	selp.f32	%f100, %f49, %f48, %p4;

BB68_12:
	cvta.to.global.u32 	%r13, %r2;
	add.s32 	%r15, %r13, %r9;
	st.global.f32 	[%r15], %f100;

BB68_13:
	ret;
}

	// .globl	vec_truncf
.visible .entry vec_truncf(
	.param .u32 vec_truncf_param_0,
	.param .u32 vec_truncf_param_1,
	.param .u32 vec_truncf_param_2
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<3>;
	.reg .b32 	%r<13>;


	ld.param.u32 	%r4, [vec_truncf_param_0];
	ld.param.u32 	%r2, [vec_truncf_param_1];
	ld.param.u32 	%r3, [vec_truncf_param_2];
	mov.u32 	%r5, %tid.x;
	mov.u32 	%r6, %ntid.x;
	mov.u32 	%r7, %ctaid.x;
	mad.lo.s32 	%r1, %r6, %r7, %r5;
	setp.ge.u32	%p1, %r1, %r4;
	@%p1 bra 	BB69_2;

	cvta.to.global.u32 	%r8, %r3;
	shl.b32 	%r9, %r1, 2;
	add.s32 	%r10, %r8, %r9;
	ld.global.f32 	%f1, [%r10];
	cvt.rzi.f32.f32	%f2, %f1;
	cvta.to.global.u32 	%r11, %r2;
	add.s32 	%r12, %r11, %r9;
	st.global.f32 	[%r12], %f2;

BB69_2:
	ret;
}

	// .globl	vec_y0f
.visible .entry vec_y0f(
	.param .u32 vec_y0f_param_0,
	.param .u32 vec_y0f_param_1,
	.param .u32 vec_y0f_param_2
)
{
	.local .align 4 .b8 	__local_depot70[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<57>;
	.reg .f32 	%f<336>;
	.reg .b32 	%r<442>;


	mov.u32 	%r441, __local_depot70;
	cvta.local.u32 	%SP, %r441;
	ld.param.u32 	%r168, [vec_y0f_param_0];
	ld.param.u32 	%r166, [vec_y0f_param_1];
	ld.param.u32 	%r167, [vec_y0f_param_2];
	add.u32 	%r169, %SP, 0;
	cvta.to.local.u32 	%r419, %r169;
	mov.u32 	%r170, %ntid.x;
	mov.u32 	%r171, %ctaid.x;
	mov.u32 	%r172, %tid.x;
	mad.lo.s32 	%r2, %r170, %r171, %r172;
	setp.ge.u32	%p1, %r2, %r168;
	@%p1 bra 	BB70_85;

	cvta.to.global.u32 	%r173, %r167;
	shl.b32 	%r174, %r2, 2;
	add.s32 	%r175, %r173, %r174;
	ld.global.f32 	%f1, [%r175];
	abs.f32 	%f2, %f1;
	setp.gtu.f32	%p2, %f2, 0f3EE4C176;
	@%p2 bra 	BB70_42;
	bra.uni 	BB70_2;

BB70_42:
	setp.gtu.f32	%p31, %f2, 0f3FF67AF8;
	@%p31 bra 	BB70_44;
	bra.uni 	BB70_43;

BB70_44:
	setp.gtu.f32	%p32, %f2, 0f40B0B31E;
	@%p32 bra 	BB70_46;
	bra.uni 	BB70_45;

BB70_46:
	setp.gtu.f32	%p33, %f2, 0f410A7798;
	@%p33 bra 	BB70_48;
	bra.uni 	BB70_47;

BB70_48:
	abs.f32 	%f265, %f2;
	mov.f32 	%f335, 0f00000000;
	setp.eq.f32	%p34, %f265, 0f7F800000;
	@%p34 bra 	BB70_82;

	// inline asm
	rcp.approx.ftz.f32 %f266,%f2;
	// inline asm
	mul.f32 	%f268, %f266, %f266;
	mov.f32 	%f269, 0f3DD0D5F0;
	mov.f32 	%f270, 0fBECC69F3;
	fma.rn.f32 	%f271, %f270, %f268, %f269;
	mov.f32 	%f272, 0fBD7FF855;
	fma.rn.f32 	%f273, %f271, %f268, %f272;
	mov.f32 	%f274, 0f3F800000;
	fma.rn.f32 	%f275, %f273, %f268, %f274;
	mov.f32 	%f276, 0fBE50D31C;
	mov.f32 	%f277, 0f3F8CCD61;
	fma.rn.f32 	%f278, %f277, %f268, %f276;
	mov.f32 	%f279, 0f3D854783;
	fma.rn.f32 	%f280, %f278, %f268, %f279;
	mov.f32 	%f281, 0fBDFFFFFB;
	fma.rn.f32 	%f282, %f280, %f268, %f281;
	fma.rn.f32 	%f38, %f282, %f266, %f2;
	rsqrt.approx.f32 	%f283, %f2;
	mul.f32 	%f284, %f283, 0f3F4C422A;
	mul.f32 	%f39, %f275, %f284;
	mul.f32 	%f285, %f38, 0f3F22F983;
	cvt.rni.s32.f32	%r428, %f285;
	cvt.rn.f32.s32	%f286, %r428;
	neg.f32 	%f287, %f286;
	mov.f32 	%f288, 0f3FC90FDA;
	fma.rn.f32 	%f289, %f287, %f288, %f38;
	mov.f32 	%f290, 0f33A22168;
	fma.rn.f32 	%f291, %f287, %f290, %f289;
	mov.f32 	%f292, 0f27C234C5;
	fma.rn.f32 	%f329, %f287, %f292, %f291;
	abs.f32 	%f293, %f38;
	setp.leu.f32	%p35, %f293, 0f47CE4780;
	@%p35 bra 	BB70_59;

	mov.b32 	 %r85, %f38;
	bfe.u32 	%r285, %r85, 23, 8;
	add.s32 	%r286, %r285, -128;
	shl.b32 	%r287, %r85, 8;
	or.b32  	%r86, %r287, -2147483648;
	shr.u32 	%r87, %r286, 5;
	mov.u32 	%r420, 0;
	mov.u32 	%r418, 6;
	mov.u32 	%r417, __cudart_i2opi_f;

BB70_51:
	.pragma "nounroll";
	ld.const.u32 	%r290, [%r417];
	// inline asm
	{
	mad.lo.cc.u32   %r288, %r290, %r86, %r420;
	madc.hi.u32     %r420, %r290, %r86,  0;
	}
	// inline asm
	st.local.u32 	[%r419], %r288;
	add.s32 	%r419, %r419, 4;
	add.s32 	%r417, %r417, 4;
	add.s32 	%r418, %r418, -1;
	setp.ne.s32	%p36, %r418, 0;
	@%p36 bra 	BB70_51;

	and.b32  	%r96, %r85, -2147483648;
	cvta.to.local.u32 	%r294, %r169;
	mov.u32 	%r295, 4;
	sub.s32 	%r296, %r295, %r87;
	shl.b32 	%r297, %r296, 2;
	add.s32 	%r298, %r297, %r294;
	st.local.u32 	[%r294+24], %r420;
	bfe.u32 	%r97, %r85, 23, 5;
	ld.local.u32 	%r421, [%r298+8];
	ld.local.u32 	%r422, [%r298+4];
	setp.eq.s32	%p37, %r97, 0;
	@%p37 bra 	BB70_54;

	mov.u32 	%r299, 32;
	sub.s32 	%r300, %r299, %r97;
	shr.u32 	%r301, %r422, %r300;
	shl.b32 	%r302, %r421, %r97;
	add.s32 	%r421, %r301, %r302;
	add.s32 	%r392, %r298, 8;
	ld.local.u32 	%r303, [%r392+-8];
	shr.u32 	%r304, %r303, %r300;
	shl.b32 	%r305, %r422, %r97;
	add.s32 	%r422, %r304, %r305;

BB70_54:
	shr.u32 	%r306, %r422, 30;
	shl.b32 	%r307, %r421, 2;
	add.s32 	%r423, %r306, %r307;
	shl.b32 	%r106, %r422, 2;
	shr.u32 	%r308, %r423, 31;
	shr.u32 	%r309, %r421, 30;
	add.s32 	%r107, %r308, %r309;
	setp.eq.s32	%p38, %r308, 0;
	mov.u32 	%r424, %r96;
	mov.u32 	%r425, %r106;
	@%p38 bra 	BB70_56;

	not.b32 	%r310, %r423;
	neg.s32 	%r108, %r106;
	setp.eq.s32	%p39, %r106, 0;
	selp.u32	%r311, 1, 0, %p39;
	add.s32 	%r423, %r311, %r310;
	xor.b32  	%r110, %r96, -2147483648;
	mov.u32 	%r424, %r110;
	mov.u32 	%r425, %r108;

BB70_56:
	mov.u32 	%r112, %r424;
	neg.s32 	%r312, %r107;
	setp.eq.s32	%p40, %r96, 0;
	selp.b32	%r428, %r107, %r312, %p40;
	clz.b32 	%r427, %r423;
	setp.eq.s32	%p41, %r427, 0;
	shl.b32 	%r313, %r423, %r427;
	mov.u32 	%r314, 32;
	sub.s32 	%r315, %r314, %r427;
	shr.u32 	%r316, %r425, %r315;
	add.s32 	%r317, %r316, %r313;
	selp.b32	%r116, %r423, %r317, %p41;
	mov.u32 	%r318, -921707870;
	mul.hi.u32 	%r426, %r116, %r318;
	setp.lt.s32	%p42, %r426, 1;
	@%p42 bra 	BB70_58;

	mul.lo.s32 	%r319, %r116, -921707870;
	shr.u32 	%r320, %r319, 31;
	shl.b32 	%r321, %r426, 1;
	add.s32 	%r426, %r320, %r321;
	add.s32 	%r427, %r427, 1;

BB70_58:
	mov.u32 	%r322, 126;
	sub.s32 	%r323, %r322, %r427;
	shl.b32 	%r324, %r323, 23;
	add.s32 	%r325, %r426, 1;
	shr.u32 	%r326, %r325, 7;
	add.s32 	%r327, %r326, 1;
	shr.u32 	%r328, %r327, 1;
	add.s32 	%r329, %r328, %r324;
	or.b32  	%r330, %r329, %r112;
	mov.b32 	 %f329, %r330;

BB70_59:
	and.b32  	%r331, %r428, 3;
	cvt.rn.f32.s32	%f294, %r331;
	add.f32 	%f295, %f329, 0fC016CBE4;
	fma.rn.f32 	%f330, %f294, 0f3FC90FDB, %f295;
	abs.f32 	%f296, %f330;
	setp.neu.f32	%p43, %f296, 0f7F800000;
	@%p43 bra 	BB70_61;

	mov.f32 	%f297, 0f00000000;
	mul.rn.f32 	%f330, %f330, %f297;

BB70_61:
	mul.f32 	%f298, %f330, 0f3F22F983;
	cvt.rni.s32.f32	%r440, %f298;
	cvt.rn.f32.s32	%f299, %r440;
	neg.f32 	%f300, %f299;
	fma.rn.f32 	%f302, %f300, %f288, %f330;
	fma.rn.f32 	%f304, %f300, %f290, %f302;
	fma.rn.f32 	%f331, %f300, %f292, %f304;
	abs.f32 	%f306, %f330;
	setp.leu.f32	%p44, %f306, 0f47CE4780;
	@%p44 bra 	BB70_71;

	mov.b32 	 %r124, %f330;
	shr.u32 	%r125, %r124, 23;
	bfe.u32 	%r335, %r124, 23, 8;
	add.s32 	%r336, %r335, -128;
	shl.b32 	%r337, %r124, 8;
	or.b32  	%r126, %r337, -2147483648;
	shr.u32 	%r127, %r336, 5;
	cvta.to.local.u32 	%r431, %r169;
	mov.u32 	%r432, 0;
	mov.u32 	%r430, 6;
	mov.u32 	%r429, __cudart_i2opi_f;

BB70_63:
	.pragma "nounroll";
	ld.const.u32 	%r341, [%r429];
	// inline asm
	{
	mad.lo.cc.u32   %r339, %r341, %r126, %r432;
	madc.hi.u32     %r432, %r341, %r126,  0;
	}
	// inline asm
	st.local.u32 	[%r431], %r339;
	add.s32 	%r431, %r431, 4;
	add.s32 	%r429, %r429, 4;
	add.s32 	%r430, %r430, -1;
	setp.ne.s32	%p45, %r430, 0;
	@%p45 bra 	BB70_63;

	and.b32  	%r137, %r124, -2147483648;
	cvta.to.local.u32 	%r345, %r169;
	mov.u32 	%r346, 4;
	sub.s32 	%r347, %r346, %r127;
	shl.b32 	%r348, %r347, 2;
	add.s32 	%r349, %r348, %r345;
	st.local.u32 	[%r345+24], %r432;
	ld.local.u32 	%r433, [%r349+8];
	ld.local.u32 	%r434, [%r349+4];
	and.b32  	%r141, %r125, 31;
	setp.eq.s32	%p46, %r141, 0;
	@%p46 bra 	BB70_66;

	mov.u32 	%r350, 32;
	sub.s32 	%r351, %r350, %r141;
	shr.u32 	%r352, %r434, %r351;
	shl.b32 	%r353, %r433, %r141;
	add.s32 	%r433, %r352, %r353;
	add.s32 	%r393, %r349, 8;
	ld.local.u32 	%r354, [%r393+-8];
	shr.u32 	%r355, %r354, %r351;
	shl.b32 	%r356, %r434, %r141;
	add.s32 	%r434, %r355, %r356;

BB70_66:
	shr.u32 	%r357, %r434, 30;
	shl.b32 	%r358, %r433, 2;
	add.s32 	%r435, %r357, %r358;
	shl.b32 	%r147, %r434, 2;
	shr.u32 	%r359, %r435, 31;
	shr.u32 	%r360, %r433, 30;
	add.s32 	%r148, %r359, %r360;
	setp.eq.s32	%p47, %r359, 0;
	mov.u32 	%r436, %r137;
	mov.u32 	%r437, %r147;
	@%p47 bra 	BB70_68;

	not.b32 	%r361, %r435;
	neg.s32 	%r149, %r147;
	setp.eq.s32	%p48, %r147, 0;
	selp.u32	%r362, 1, 0, %p48;
	add.s32 	%r435, %r362, %r361;
	xor.b32  	%r151, %r137, -2147483648;
	mov.u32 	%r436, %r151;
	mov.u32 	%r437, %r149;

BB70_68:
	mov.u32 	%r153, %r436;
	neg.s32 	%r363, %r148;
	setp.eq.s32	%p49, %r137, 0;
	selp.b32	%r440, %r148, %r363, %p49;
	clz.b32 	%r439, %r435;
	setp.eq.s32	%p50, %r439, 0;
	shl.b32 	%r364, %r435, %r439;
	mov.u32 	%r365, 32;
	sub.s32 	%r366, %r365, %r439;
	shr.u32 	%r367, %r437, %r366;
	add.s32 	%r368, %r367, %r364;
	selp.b32	%r157, %r435, %r368, %p50;
	mov.u32 	%r369, -921707870;
	mul.hi.u32 	%r438, %r157, %r369;
	setp.lt.s32	%p51, %r438, 1;
	@%p51 bra 	BB70_70;

	mul.lo.s32 	%r370, %r157, -921707870;
	shr.u32 	%r371, %r370, 31;
	shl.b32 	%r372, %r438, 1;
	add.s32 	%r438, %r371, %r372;
	add.s32 	%r439, %r439, 1;

BB70_70:
	mov.u32 	%r373, 126;
	sub.s32 	%r374, %r373, %r439;
	shl.b32 	%r375, %r374, 23;
	add.s32 	%r376, %r438, 1;
	shr.u32 	%r377, %r376, 7;
	add.s32 	%r378, %r377, 1;
	shr.u32 	%r379, %r378, 1;
	add.s32 	%r380, %r379, %r375;
	or.b32  	%r381, %r380, %r153;
	mov.b32 	 %f331, %r381;

BB70_71:
	mul.rn.f32 	%f49, %f331, %f331;
	add.s32 	%r164, %r440, 1;
	and.b32  	%r165, %r164, 1;
	setp.eq.s32	%p52, %r165, 0;
	@%p52 bra 	BB70_73;

	mov.f32 	%f307, 0fBAB6061A;
	mov.f32 	%f308, 0f37CCF5CE;
	fma.rn.f32 	%f332, %f308, %f49, %f307;
	bra.uni 	BB70_74;

BB70_2:
	mul.f32 	%f65, %f2, %f2;
	mov.f32 	%f66, 0fB71F49B6;
	mov.f32 	%f67, 0f33DBE5AC;
	fma.rn.f32 	%f68, %f67, %f65, %f66;
	mov.f32 	%f69, 0f3A0D3100;
	fma.rn.f32 	%f70, %f68, %f65, %f69;
	mov.f32 	%f71, 0fBC83AD8E;
	fma.rn.f32 	%f72, %f70, %f65, %f71;
	mov.f32 	%f73, 0f3E35DE5A;
	fma.rn.f32 	%f74, %f72, %f65, %f73;
	mov.f32 	%f75, 0fBD9726B5;
	fma.rn.f32 	%f3, %f74, %f65, %f75;
	setp.lt.f32	%p3, %f2, 0f7F800000;
	setp.gt.f32	%p4, %f2, 0f00000000;
	and.pred  	%p5, %p4, %p3;
	@%p5 bra 	BB70_4;
	bra.uni 	BB70_3;

BB70_4:
	setp.lt.f32	%p6, %f2, 0f00800000;
	mul.f32 	%f78, %f2, 0f4B800000;
	selp.f32	%f79, %f78, %f2, %p6;
	selp.f32	%f80, 0fC3170000, 0fC2FE0000, %p6;
	mov.b32 	 %r176, %f79;
	and.b32  	%r177, %r176, 8388607;
	or.b32  	%r178, %r177, 1065353216;
	mov.b32 	 %f81, %r178;
	shr.u32 	%r179, %r176, 23;
	cvt.rn.f32.u32	%f82, %r179;
	add.f32 	%f83, %f80, %f82;
	setp.gt.f32	%p7, %f81, 0f3FAE147B;
	mul.f32 	%f84, %f81, 0f3F000000;
	add.f32 	%f85, %f83, 0f3F800000;
	selp.f32	%f86, %f84, %f81, %p7;
	selp.f32	%f87, %f85, %f83, %p7;
	add.f32 	%f77, %f86, 0f3F800000;
	add.f32 	%f88, %f86, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f76,%f77;
	// inline asm
	mul.f32 	%f89, %f88, %f88;
	neg.f32 	%f90, %f89;
	mul.rn.f32 	%f91, %f76, %f90;
	add.rn.f32 	%f92, %f88, %f91;
	mul.f32 	%f93, %f92, %f92;
	mov.f32 	%f94, 0f3C4C6A36;
	mov.f32 	%f95, 0f3B1E94E6;
	fma.rn.f32 	%f96, %f95, %f93, %f94;
	mov.f32 	%f97, 0f3DAAAB1A;
	fma.rn.f32 	%f98, %f96, %f93, %f97;
	mul.f32 	%f99, %f93, %f98;
	fma.rn.f32 	%f100, %f99, %f92, %f91;
	add.f32 	%f101, %f88, %f100;
	mov.f32 	%f102, 0f3F317218;
	fma.rn.f32 	%f321, %f87, %f102, %f101;
	bra.uni 	BB70_5;

BB70_43:
	add.f32 	%f197, %f2, 0fBF64C176;
	add.f32 	%f198, %f197, 0f32657D03;
	mov.f32 	%f199, 0fBE02574C;
	mov.f32 	%f200, 0f3CDDC8B3;
	fma.rn.f32 	%f201, %f200, %f198, %f199;
	mov.f32 	%f202, 0f3E7F2CC9;
	fma.rn.f32 	%f203, %f201, %f198, %f202;
	mov.f32 	%f204, 0fBE8BF29B;
	fma.rn.f32 	%f205, %f203, %f198, %f204;
	mov.f32 	%f206, 0f3E5BCE93;
	fma.rn.f32 	%f207, %f205, %f198, %f206;
	mov.f32 	%f208, 0fBE38C4FF;
	fma.rn.f32 	%f209, %f207, %f198, %f208;
	mov.f32 	%f210, 0f3E42774D;
	fma.rn.f32 	%f211, %f209, %f198, %f210;
	mov.f32 	%f212, 0fBE525CB2;
	fma.rn.f32 	%f213, %f211, %f198, %f212;
	mov.f32 	%f214, 0f3E60F43D;
	fma.rn.f32 	%f215, %f213, %f198, %f214;
	mov.f32 	%f216, 0fBE679145;
	fma.rn.f32 	%f217, %f215, %f198, %f216;
	mov.f32 	%f218, 0f3E61D24A;
	fma.rn.f32 	%f219, %f217, %f198, %f218;
	mov.f32 	%f220, 0fBEFBF1AD;
	fma.rn.f32 	%f221, %f219, %f198, %f220;
	mov.f32 	%f222, 0f3F6121BB;
	fma.rn.f32 	%f223, %f221, %f198, %f222;
	mul.f32 	%f335, %f198, %f223;
	bra.uni 	BB70_82;

BB70_3:
	lg2.approx.f32 	%f321, %f2;

BB70_5:
	abs.f32 	%f7, %f2;
	setp.gtu.f32	%p8, %f7, 0f41000000;
	@%p8 bra 	BB70_7;
	bra.uni 	BB70_6;

BB70_7:
	abs.f32 	%f139, %f7;
	mov.f32 	%f328, 0f00000000;
	setp.eq.f32	%p9, %f139, 0f7F800000;
	@%p9 bra 	BB70_41;

	// inline asm
	rcp.approx.ftz.f32 %f140,%f7;
	// inline asm
	mul.f32 	%f142, %f140, %f140;
	mov.f32 	%f143, 0fBF03B7C2;
	mov.f32 	%f144, 0f4056FE93;
	fma.rn.f32 	%f145, %f144, %f142, %f143;
	mov.f32 	%f146, 0f3DD3B3F3;
	fma.rn.f32 	%f147, %f145, %f142, %f146;
	mov.f32 	%f148, 0fBD7FFFB6;
	fma.rn.f32 	%f149, %f147, %f142, %f148;
	mov.f32 	%f150, 0f3F800000;
	fma.rn.f32 	%f151, %f149, %f142, %f150;
	mov.f32 	%f152, 0fBE52412D;
	mov.f32 	%f153, 0f3F91E009;
	fma.rn.f32 	%f154, %f153, %f142, %f152;
	mov.f32 	%f155, 0f3D854ED1;
	fma.rn.f32 	%f156, %f154, %f142, %f155;
	mov.f32 	%f157, 0fBDFFFFFF;
	fma.rn.f32 	%f158, %f156, %f142, %f157;
	fma.rn.f32 	%f9, %f158, %f140, %f7;
	rsqrt.approx.f32 	%f159, %f7;
	mul.f32 	%f160, %f159, 0f3F4C422A;
	mul.f32 	%f10, %f151, %f160;
	mul.f32 	%f161, %f9, 0f3F22F983;
	cvt.rni.s32.f32	%r404, %f161;
	cvt.rn.f32.s32	%f162, %r404;
	neg.f32 	%f163, %f162;
	mov.f32 	%f164, 0f3FC90FDA;
	fma.rn.f32 	%f165, %f163, %f164, %f9;
	mov.f32 	%f166, 0f33A22168;
	fma.rn.f32 	%f167, %f163, %f166, %f165;
	mov.f32 	%f168, 0f27C234C5;
	fma.rn.f32 	%f322, %f163, %f168, %f167;
	abs.f32 	%f169, %f9;
	setp.leu.f32	%p10, %f169, 0f47CE4780;
	@%p10 bra 	BB70_18;

	mov.b32 	 %r4, %f9;
	shl.b32 	%r183, %r4, 8;
	or.b32  	%r5, %r183, -2147483648;
	mov.u32 	%r396, 0;
	mov.u32 	%r395, 6;
	mov.u32 	%r394, __cudart_i2opi_f;

BB70_10:
	.pragma "nounroll";
	ld.const.u32 	%r186, [%r394];
	// inline asm
	{
	mad.lo.cc.u32   %r184, %r186, %r5, %r396;
	madc.hi.u32     %r396, %r186, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%r419], %r184;
	add.s32 	%r419, %r419, 4;
	add.s32 	%r394, %r394, 4;
	add.s32 	%r395, %r395, -1;
	setp.ne.s32	%p11, %r395, 0;
	@%p11 bra 	BB70_10;

	and.b32  	%r14, %r4, -2147483648;
	bfe.u32 	%r189, %r4, 23, 8;
	add.s32 	%r190, %r189, -128;
	shr.u32 	%r191, %r190, 5;
	mov.u32 	%r192, 4;
	sub.s32 	%r193, %r192, %r191;
	cvta.to.local.u32 	%r195, %r169;
	shl.b32 	%r196, %r193, 2;
	add.s32 	%r197, %r196, %r195;
	st.local.u32 	[%r195+24], %r396;
	bfe.u32 	%r15, %r4, 23, 5;
	ld.local.u32 	%r397, [%r197+8];
	ld.local.u32 	%r398, [%r197+4];
	setp.eq.s32	%p12, %r15, 0;
	@%p12 bra 	BB70_13;

	mov.u32 	%r198, 32;
	sub.s32 	%r199, %r198, %r15;
	shr.u32 	%r200, %r398, %r199;
	shl.b32 	%r201, %r397, %r15;
	add.s32 	%r397, %r200, %r201;
	add.s32 	%r390, %r197, 8;
	ld.local.u32 	%r202, [%r390+-8];
	shr.u32 	%r203, %r202, %r199;
	shl.b32 	%r204, %r398, %r15;
	add.s32 	%r398, %r203, %r204;

BB70_13:
	shr.u32 	%r205, %r398, 30;
	shl.b32 	%r206, %r397, 2;
	add.s32 	%r399, %r205, %r206;
	shl.b32 	%r24, %r398, 2;
	shr.u32 	%r207, %r399, 31;
	shr.u32 	%r208, %r397, 30;
	add.s32 	%r25, %r207, %r208;
	setp.eq.s32	%p13, %r207, 0;
	mov.u32 	%r400, %r14;
	mov.u32 	%r401, %r24;
	@%p13 bra 	BB70_15;

	not.b32 	%r209, %r399;
	neg.s32 	%r26, %r24;
	setp.eq.s32	%p14, %r24, 0;
	selp.u32	%r210, 1, 0, %p14;
	add.s32 	%r399, %r210, %r209;
	xor.b32  	%r28, %r14, -2147483648;
	mov.u32 	%r400, %r28;
	mov.u32 	%r401, %r26;

BB70_15:
	mov.u32 	%r30, %r400;
	neg.s32 	%r211, %r25;
	setp.eq.s32	%p15, %r14, 0;
	selp.b32	%r404, %r25, %r211, %p15;
	clz.b32 	%r403, %r399;
	setp.eq.s32	%p16, %r403, 0;
	shl.b32 	%r212, %r399, %r403;
	mov.u32 	%r213, 32;
	sub.s32 	%r214, %r213, %r403;
	shr.u32 	%r215, %r401, %r214;
	add.s32 	%r216, %r215, %r212;
	selp.b32	%r34, %r399, %r216, %p16;
	mov.u32 	%r217, -921707870;
	mul.hi.u32 	%r402, %r34, %r217;
	setp.lt.s32	%p17, %r402, 1;
	@%p17 bra 	BB70_17;

	mul.lo.s32 	%r218, %r34, -921707870;
	shr.u32 	%r219, %r218, 31;
	shl.b32 	%r220, %r402, 1;
	add.s32 	%r402, %r219, %r220;
	add.s32 	%r403, %r403, 1;

BB70_17:
	mov.u32 	%r221, 126;
	sub.s32 	%r222, %r221, %r403;
	shl.b32 	%r223, %r222, 23;
	add.s32 	%r224, %r402, 1;
	shr.u32 	%r225, %r224, 7;
	add.s32 	%r226, %r225, 1;
	shr.u32 	%r227, %r226, 1;
	add.s32 	%r228, %r227, %r223;
	or.b32  	%r229, %r228, %r30;
	mov.b32 	 %f322, %r229;

BB70_18:
	and.b32  	%r230, %r404, 3;
	cvt.rn.f32.s32	%f170, %r230;
	add.f32 	%f171, %f322, 0fBF490FDB;
	fma.rn.f32 	%f323, %f170, 0f3FC90FDB, %f171;
	abs.f32 	%f172, %f323;
	setp.neu.f32	%p18, %f172, 0f7F800000;
	@%p18 bra 	BB70_20;

	mov.f32 	%f173, 0f00000000;
	mul.rn.f32 	%f323, %f323, %f173;

BB70_20:
	mul.f32 	%f174, %f323, 0f3F22F983;
	cvt.rni.s32.f32	%r416, %f174;
	cvt.rn.f32.s32	%f175, %r416;
	neg.f32 	%f176, %f175;
	fma.rn.f32 	%f178, %f176, %f164, %f323;
	fma.rn.f32 	%f180, %f176, %f166, %f178;
	fma.rn.f32 	%f324, %f176, %f168, %f180;
	abs.f32 	%f182, %f323;
	setp.leu.f32	%p19, %f182, 0f47CE4780;
	@%p19 bra 	BB70_30;

	mov.b32 	 %r42, %f323;
	shr.u32 	%r43, %r42, 23;
	bfe.u32 	%r234, %r42, 23, 8;
	add.s32 	%r235, %r234, -128;
	shl.b32 	%r236, %r42, 8;
	or.b32  	%r44, %r236, -2147483648;
	shr.u32 	%r45, %r235, 5;
	cvta.to.local.u32 	%r407, %r169;
	mov.u32 	%r408, 0;
	mov.u32 	%r406, 6;
	mov.u32 	%r405, __cudart_i2opi_f;

BB70_22:
	.pragma "nounroll";
	ld.const.u32 	%r240, [%r405];
	// inline asm
	{
	mad.lo.cc.u32   %r238, %r240, %r44, %r408;
	madc.hi.u32     %r408, %r240, %r44,  0;
	}
	// inline asm
	st.local.u32 	[%r407], %r238;
	add.s32 	%r407, %r407, 4;
	add.s32 	%r405, %r405, 4;
	add.s32 	%r406, %r406, -1;
	setp.ne.s32	%p20, %r406, 0;
	@%p20 bra 	BB70_22;

	and.b32  	%r55, %r42, -2147483648;
	cvta.to.local.u32 	%r244, %r169;
	mov.u32 	%r245, 4;
	sub.s32 	%r246, %r245, %r45;
	shl.b32 	%r247, %r246, 2;
	add.s32 	%r248, %r247, %r244;
	st.local.u32 	[%r244+24], %r408;
	ld.local.u32 	%r409, [%r248+8];
	ld.local.u32 	%r410, [%r248+4];
	and.b32  	%r59, %r43, 31;
	setp.eq.s32	%p21, %r59, 0;
	@%p21 bra 	BB70_25;

	mov.u32 	%r249, 32;
	sub.s32 	%r250, %r249, %r59;
	shr.u32 	%r251, %r410, %r250;
	shl.b32 	%r252, %r409, %r59;
	add.s32 	%r409, %r251, %r252;
	add.s32 	%r391, %r248, 8;
	ld.local.u32 	%r253, [%r391+-8];
	shr.u32 	%r254, %r253, %r250;
	shl.b32 	%r255, %r410, %r59;
	add.s32 	%r410, %r254, %r255;

BB70_25:
	shr.u32 	%r256, %r410, 30;
	shl.b32 	%r257, %r409, 2;
	add.s32 	%r411, %r256, %r257;
	shl.b32 	%r65, %r410, 2;
	shr.u32 	%r258, %r411, 31;
	shr.u32 	%r259, %r409, 30;
	add.s32 	%r66, %r258, %r259;
	setp.eq.s32	%p22, %r258, 0;
	mov.u32 	%r412, %r55;
	mov.u32 	%r413, %r65;
	@%p22 bra 	BB70_27;

	not.b32 	%r260, %r411;
	neg.s32 	%r67, %r65;
	setp.eq.s32	%p23, %r65, 0;
	selp.u32	%r261, 1, 0, %p23;
	add.s32 	%r411, %r261, %r260;
	xor.b32  	%r69, %r55, -2147483648;
	mov.u32 	%r412, %r69;
	mov.u32 	%r413, %r67;

BB70_27:
	mov.u32 	%r71, %r412;
	neg.s32 	%r262, %r66;
	setp.eq.s32	%p24, %r55, 0;
	selp.b32	%r416, %r66, %r262, %p24;
	clz.b32 	%r415, %r411;
	setp.eq.s32	%p25, %r415, 0;
	shl.b32 	%r263, %r411, %r415;
	mov.u32 	%r264, 32;
	sub.s32 	%r265, %r264, %r415;
	shr.u32 	%r266, %r413, %r265;
	add.s32 	%r267, %r266, %r263;
	selp.b32	%r75, %r411, %r267, %p25;
	mov.u32 	%r268, -921707870;
	mul.hi.u32 	%r414, %r75, %r268;
	setp.lt.s32	%p26, %r414, 1;
	@%p26 bra 	BB70_29;

	mul.lo.s32 	%r269, %r75, -921707870;
	shr.u32 	%r270, %r269, 31;
	shl.b32 	%r271, %r414, 1;
	add.s32 	%r414, %r270, %r271;
	add.s32 	%r415, %r415, 1;

BB70_29:
	mov.u32 	%r272, 126;
	sub.s32 	%r273, %r272, %r415;
	shl.b32 	%r274, %r273, 23;
	add.s32 	%r275, %r414, 1;
	shr.u32 	%r276, %r275, 7;
	add.s32 	%r277, %r276, 1;
	shr.u32 	%r278, %r277, 1;
	add.s32 	%r279, %r278, %r274;
	or.b32  	%r280, %r279, %r71;
	mov.b32 	 %f324, %r280;

BB70_30:
	mul.rn.f32 	%f20, %f324, %f324;
	add.s32 	%r82, %r416, 1;
	and.b32  	%r83, %r82, 1;
	setp.eq.s32	%p27, %r83, 0;
	@%p27 bra 	BB70_32;

	mov.f32 	%f183, 0fBAB6061A;
	mov.f32 	%f184, 0f37CCF5CE;
	fma.rn.f32 	%f325, %f184, %f20, %f183;
	bra.uni 	BB70_33;

BB70_6:
	add.f32 	%f103, %f7, 0fC019E8A9;
	add.f32 	%f104, %f103, 0fB3E971B3;
	mov.f32 	%f105, 0fA9ACA9B3;
	mov.f32 	%f106, 0fA6B3B8E7;
	fma.rn.f32 	%f107, %f106, %f104, %f105;
	mov.f32 	%f108, 0f2C3F0E18;
	fma.rn.f32 	%f109, %f107, %f104, %f108;
	mov.f32 	%f110, 0fACD41781;
	fma.rn.f32 	%f111, %f109, %f104, %f110;
	mov.f32 	%f112, 0fAFE90F38;
	fma.rn.f32 	%f113, %f111, %f104, %f112;
	mov.f32 	%f114, 0f3020305B;
	fma.rn.f32 	%f115, %f113, %f104, %f114;
	mov.f32 	%f116, 0f33797143;
	fma.rn.f32 	%f117, %f115, %f104, %f116;
	mov.f32 	%f118, 0f30F76F85;
	fma.rn.f32 	%f119, %f117, %f104, %f118;
	mov.f32 	%f120, 0fB6B6DFC6;
	fma.rn.f32 	%f121, %f119, %f104, %f120;
	mov.f32 	%f122, 0fB6F665C9;
	fma.rn.f32 	%f123, %f121, %f104, %f122;
	mov.f32 	%f124, 0f399E2DEB;
	fma.rn.f32 	%f125, %f123, %f104, %f124;
	mov.f32 	%f126, 0f3A4AE334;
	fma.rn.f32 	%f127, %f125, %f104, %f126;
	mov.f32 	%f128, 0fBBEEAA1B;
	fma.rn.f32 	%f129, %f127, %f104, %f128;
	mov.f32 	%f130, 0fBCDA7747;
	fma.rn.f32 	%f131, %f129, %f104, %f130;
	mul.f32 	%f132, %f104, %f131;
	add.f32 	%f133, %f7, 0fC0B0A47B;
	add.f32 	%f134, %f133, 0f339A7A37;
	mul.f32 	%f135, %f134, %f132;
	add.f32 	%f136, %f7, 0fC10A75AB;
	add.f32 	%f137, %f136, 0fB4CCCDED;
	mul.f32 	%f328, %f137, %f135;
	bra.uni 	BB70_41;

BB70_45:
	add.f32 	%f224, %f2, 0fC07D4A9A;
	add.f32 	%f225, %f224, 0fB3D9856A;
	mov.f32 	%f226, 0fB45E2607;
	mov.f32 	%f227, 0fB449DD3F;
	fma.rn.f32 	%f228, %f227, %f225, %f226;
	mov.f32 	%f229, 0fB6857064;
	fma.rn.f32 	%f230, %f228, %f225, %f229;
	mov.f32 	%f231, 0f38554610;
	fma.rn.f32 	%f232, %f230, %f225, %f231;
	mov.f32 	%f233, 0f394ACED7;
	fma.rn.f32 	%f234, %f232, %f225, %f233;
	mov.f32 	%f235, 0fBB0F1A0C;
	fma.rn.f32 	%f236, %f234, %f225, %f235;
	mov.f32 	%f237, 0fBBE07F2E;
	fma.rn.f32 	%f238, %f236, %f225, %f237;
	mov.f32 	%f239, 0f3D6FB6B5;
	fma.rn.f32 	%f240, %f238, %f225, %f239;
	mov.f32 	%f241, 0f3D504DF1;
	fma.rn.f32 	%f242, %f240, %f225, %f241;
	mov.f32 	%f243, 0fBECE1A13;
	fma.rn.f32 	%f244, %f242, %f225, %f243;
	mul.f32 	%f335, %f225, %f244;
	bra.uni 	BB70_82;

BB70_47:
	add.f32 	%f245, %f2, 0fC0E2C0EE;
	add.f32 	%f246, %f245, 0fB39CE420;
	mov.f32 	%f247, 0f3629DA6C;
	mov.f32 	%f248, 0f3510CEBE;
	fma.rn.f32 	%f249, %f248, %f246, %f247;
	mov.f32 	%f250, 0fB84054C0;
	fma.rn.f32 	%f251, %f249, %f246, %f250;
	mov.f32 	%f252, 0fB91318AB;
	fma.rn.f32 	%f253, %f251, %f246, %f252;
	mov.f32 	%f254, 0f3B0E9921;
	fma.rn.f32 	%f255, %f253, %f246, %f254;
	mov.f32 	%f256, 0f3B5974D5;
	fma.rn.f32 	%f257, %f255, %f246, %f256;
	mov.f32 	%f258, 0fBD44B4D7;
	fma.rn.f32 	%f259, %f257, %f246, %f258;
	mov.f32 	%f260, 0fBCAD7799;
	fma.rn.f32 	%f261, %f259, %f246, %f260;
	mov.f32 	%f262, 0f3E99A665;
	fma.rn.f32 	%f263, %f261, %f246, %f262;
	mul.f32 	%f335, %f246, %f263;
	bra.uni 	BB70_82;

BB70_32:
	mov.f32 	%f185, 0f3C08839E;
	mov.f32 	%f186, 0fB94CA1F9;
	fma.rn.f32 	%f325, %f186, %f20, %f185;

BB70_33:
	@%p27 bra 	BB70_35;

	mov.f32 	%f187, 0f3D2AAAA5;
	fma.rn.f32 	%f188, %f325, %f20, %f187;
	mov.f32 	%f189, 0fBF000000;
	fma.rn.f32 	%f326, %f188, %f20, %f189;
	bra.uni 	BB70_36;

BB70_35:
	mov.f32 	%f190, 0fBE2AAAA3;
	fma.rn.f32 	%f191, %f325, %f20, %f190;
	mov.f32 	%f192, 0f00000000;
	fma.rn.f32 	%f326, %f191, %f20, %f192;

BB70_36:
	fma.rn.f32 	%f327, %f326, %f324, %f324;
	@%p27 bra 	BB70_38;

	fma.rn.f32 	%f327, %f326, %f20, %f150;

BB70_38:
	and.b32  	%r281, %r82, 2;
	setp.eq.s32	%p30, %r281, 0;
	@%p30 bra 	BB70_40;

	mov.f32 	%f194, 0f00000000;
	mov.f32 	%f195, 0fBF800000;
	fma.rn.f32 	%f327, %f327, %f195, %f194;

BB70_40:
	mul.f32 	%f328, %f10, %f327;

BB70_41:
	mul.f32 	%f196, %f321, 0f3F22F983;
	fma.rn.f32 	%f335, %f196, %f328, %f3;

BB70_82:
	setp.geu.f32	%p56, %f1, 0f00000000;
	@%p56 bra 	BB70_84;

	mov.f32 	%f320, 0fBF800000;
	sqrt.rn.f32 	%f335, %f320;

BB70_84:
	cvta.to.global.u32 	%r387, %r166;
	add.s32 	%r389, %r387, %r174;
	st.global.f32 	[%r389], %f335;

BB70_85:
	ret;

BB70_73:
	mov.f32 	%f309, 0f3C08839E;
	mov.f32 	%f310, 0fB94CA1F9;
	fma.rn.f32 	%f332, %f310, %f49, %f309;

BB70_74:
	@%p52 bra 	BB70_76;

	mov.f32 	%f311, 0f3D2AAAA5;
	fma.rn.f32 	%f312, %f332, %f49, %f311;
	mov.f32 	%f313, 0fBF000000;
	fma.rn.f32 	%f333, %f312, %f49, %f313;
	bra.uni 	BB70_77;

BB70_76:
	mov.f32 	%f314, 0fBE2AAAA3;
	fma.rn.f32 	%f315, %f332, %f49, %f314;
	mov.f32 	%f316, 0f00000000;
	fma.rn.f32 	%f333, %f315, %f49, %f316;

BB70_77:
	fma.rn.f32 	%f334, %f333, %f331, %f331;
	@%p52 bra 	BB70_79;

	fma.rn.f32 	%f334, %f333, %f49, %f274;

BB70_79:
	and.b32  	%r382, %r164, 2;
	setp.eq.s32	%p55, %r382, 0;
	@%p55 bra 	BB70_81;

	mov.f32 	%f318, 0f00000000;
	mov.f32 	%f319, 0fBF800000;
	fma.rn.f32 	%f334, %f334, %f319, %f318;

BB70_81:
	mul.f32 	%f335, %f39, %f334;
	bra.uni 	BB70_82;
}

	// .globl	vec_y1f
.visible .entry vec_y1f(
	.param .u32 vec_y1f_param_0,
	.param .u32 vec_y1f_param_1,
	.param .u32 vec_y1f_param_2
)
{
	.local .align 4 .b8 	__local_depot71[28];
	.reg .b32 	%SP;
	.reg .b32 	%SPL;
	.reg .pred 	%p<59>;
	.reg .f32 	%f<332>;
	.reg .b32 	%r<448>;


	mov.u32 	%r447, __local_depot71;
	cvta.local.u32 	%SP, %r447;
	ld.param.u32 	%r169, [vec_y1f_param_0];
	ld.param.u32 	%r167, [vec_y1f_param_1];
	ld.param.u32 	%r168, [vec_y1f_param_2];
	add.u32 	%r170, %SP, 0;
	cvta.to.local.u32 	%r437, %r170;
	mov.u32 	%r171, %ntid.x;
	mov.u32 	%r172, %ctaid.x;
	mov.u32 	%r173, %tid.x;
	mad.lo.s32 	%r2, %r171, %r172, %r173;
	setp.ge.u32	%p1, %r2, %r169;
	@%p1 bra 	BB71_87;

	cvta.to.global.u32 	%r174, %r168;
	shl.b32 	%r175, %r2, 2;
	add.s32 	%r176, %r174, %r175;
	ld.global.f32 	%f1, [%r176];
	abs.f32 	%f2, %f1;
	setp.lt.f32	%p2, %f2, 0f00800000;
	@%p2 bra 	BB71_83;
	bra.uni 	BB71_2;

BB71_83:
	mov.f32 	%f315, 0fBF22F983;
	div.rn.f32 	%f331, %f315, %f2;
	bra.uni 	BB71_84;

BB71_2:
	setp.gtu.f32	%p3, %f2, 0f3FD96AC4;
	@%p3 bra 	BB71_43;
	bra.uni 	BB71_3;

BB71_43:
	setp.gtu.f32	%p33, %f2, 0f40740EEE;
	@%p33 bra 	BB71_45;
	bra.uni 	BB71_44;

BB71_45:
	setp.gtu.f32	%p34, %f2, 0f40E06937;
	@%p34 bra 	BB71_47;
	bra.uni 	BB71_46;

BB71_47:
	setp.gtu.f32	%p35, %f2, 0f4122C2E3;
	@%p35 bra 	BB71_49;
	bra.uni 	BB71_48;

BB71_49:
	abs.f32 	%f260, %f2;
	mov.f32 	%f331, 0f00000000;
	setp.eq.f32	%p36, %f260, 0f7F800000;
	@%p36 bra 	BB71_84;

	// inline asm
	rcp.approx.ftz.f32 %f261,%f2;
	// inline asm
	mul.f32 	%f263, %f261, %f261;
	mov.f32 	%f264, 0fBE44AB90;
	mov.f32 	%f265, 0f3F267F60;
	fma.rn.f32 	%f266, %f265, %f263, %f264;
	mov.f32 	%f267, 0f3E3FFEBF;
	fma.rn.f32 	%f268, %f266, %f263, %f267;
	mov.f32 	%f269, 0f3F800000;
	fma.rn.f32 	%f270, %f268, %f263, %f269;
	mov.f32 	%f271, 0f3EBB73AB;
	mov.f32 	%f272, 0fBFE4E1AB;
	fma.rn.f32 	%f273, %f272, %f263, %f271;
	mov.f32 	%f274, 0fBE27FB6E;
	fma.rn.f32 	%f275, %f273, %f263, %f274;
	mov.f32 	%f276, 0f3EBFFFFF;
	fma.rn.f32 	%f277, %f275, %f263, %f276;
	fma.rn.f32 	%f38, %f277, %f261, %f2;
	rsqrt.approx.f32 	%f278, %f2;
	mul.f32 	%f279, %f278, 0f3F4C422A;
	mul.f32 	%f39, %f270, %f279;
	mul.f32 	%f280, %f38, 0f3F22F983;
	cvt.rni.s32.f32	%r430, %f280;
	cvt.rn.f32.s32	%f281, %r430;
	neg.f32 	%f282, %f281;
	mov.f32 	%f283, 0f3FC90FDA;
	fma.rn.f32 	%f284, %f282, %f283, %f38;
	mov.f32 	%f285, 0f33A22168;
	fma.rn.f32 	%f286, %f282, %f285, %f284;
	mov.f32 	%f287, 0f27C234C5;
	fma.rn.f32 	%f325, %f282, %f287, %f286;
	abs.f32 	%f288, %f38;
	setp.leu.f32	%p37, %f288, 0f47CE4780;
	@%p37 bra 	BB71_60;

	mov.b32 	 %r86, %f38;
	shr.u32 	%r87, %r86, 23;
	bfe.u32 	%r291, %r86, 23, 8;
	add.s32 	%r292, %r291, -128;
	shl.b32 	%r293, %r86, 8;
	or.b32  	%r88, %r293, -2147483648;
	shr.u32 	%r89, %r292, 5;
	mov.u32 	%r422, 0;
	mov.u32 	%r421, 6;
	mov.u32 	%r420, __cudart_i2opi_f;
	mov.u32 	%r436, %r437;

BB71_52:
	.pragma "nounroll";
	ld.const.u32 	%r296, [%r420];
	// inline asm
	{
	mad.lo.cc.u32   %r294, %r296, %r88, %r422;
	madc.hi.u32     %r422, %r296, %r88,  0;
	}
	// inline asm
	st.local.u32 	[%r436], %r294;
	add.s32 	%r436, %r436, 4;
	add.s32 	%r420, %r420, 4;
	add.s32 	%r421, %r421, -1;
	setp.ne.s32	%p38, %r421, 0;
	@%p38 bra 	BB71_52;

	and.b32  	%r98, %r86, -2147483648;
	add.s32 	%r393, %r437, 24;
	st.local.u32 	[%r393], %r422;
	mov.u32 	%r299, 4;
	sub.s32 	%r300, %r299, %r89;
	shl.b32 	%r301, %r300, 2;
	add.s32 	%r302, %r301, %r437;
	ld.local.u32 	%r423, [%r302+8];
	ld.local.u32 	%r424, [%r302+4];
	and.b32  	%r102, %r87, 31;
	setp.eq.s32	%p39, %r102, 0;
	@%p39 bra 	BB71_55;

	mov.u32 	%r303, 32;
	sub.s32 	%r304, %r303, %r102;
	shr.u32 	%r305, %r424, %r304;
	shl.b32 	%r306, %r423, %r102;
	add.s32 	%r423, %r305, %r306;
	add.s32 	%r394, %r302, 8;
	ld.local.u32 	%r307, [%r394+-8];
	shr.u32 	%r308, %r307, %r304;
	shl.b32 	%r309, %r424, %r102;
	add.s32 	%r424, %r308, %r309;

BB71_55:
	shr.u32 	%r310, %r424, 30;
	shl.b32 	%r311, %r423, 2;
	add.s32 	%r425, %r310, %r311;
	shl.b32 	%r108, %r424, 2;
	shr.u32 	%r312, %r425, 31;
	shr.u32 	%r313, %r423, 30;
	add.s32 	%r109, %r312, %r313;
	setp.eq.s32	%p40, %r312, 0;
	mov.u32 	%r426, %r98;
	mov.u32 	%r427, %r108;
	@%p40 bra 	BB71_57;

	not.b32 	%r314, %r425;
	neg.s32 	%r110, %r108;
	setp.eq.s32	%p41, %r108, 0;
	selp.u32	%r315, 1, 0, %p41;
	add.s32 	%r425, %r315, %r314;
	xor.b32  	%r112, %r98, -2147483648;
	mov.u32 	%r426, %r112;
	mov.u32 	%r427, %r110;

BB71_57:
	mov.u32 	%r114, %r426;
	neg.s32 	%r316, %r109;
	setp.eq.s32	%p42, %r98, 0;
	selp.b32	%r430, %r109, %r316, %p42;
	clz.b32 	%r429, %r425;
	setp.eq.s32	%p43, %r429, 0;
	shl.b32 	%r317, %r425, %r429;
	mov.u32 	%r318, 32;
	sub.s32 	%r319, %r318, %r429;
	shr.u32 	%r320, %r427, %r319;
	add.s32 	%r321, %r320, %r317;
	selp.b32	%r118, %r425, %r321, %p43;
	mov.u32 	%r322, -921707870;
	mul.hi.u32 	%r428, %r118, %r322;
	setp.lt.s32	%p44, %r428, 1;
	@%p44 bra 	BB71_59;

	mul.lo.s32 	%r323, %r118, -921707870;
	shr.u32 	%r324, %r323, 31;
	shl.b32 	%r325, %r428, 1;
	add.s32 	%r428, %r324, %r325;
	add.s32 	%r429, %r429, 1;

BB71_59:
	mov.u32 	%r326, 126;
	sub.s32 	%r327, %r326, %r429;
	shl.b32 	%r328, %r327, 23;
	add.s32 	%r329, %r428, 1;
	shr.u32 	%r330, %r329, 7;
	add.s32 	%r331, %r330, 1;
	shr.u32 	%r332, %r331, 1;
	add.s32 	%r333, %r332, %r328;
	or.b32  	%r334, %r333, %r114;
	mov.b32 	 %f325, %r334;

BB71_60:
	and.b32  	%r335, %r430, 3;
	cvt.rn.f32.s32	%f289, %r335;
	add.f32 	%f290, %f325, 0fC07B53D1;
	fma.rn.f32 	%f326, %f289, 0f3FC90FDB, %f290;
	abs.f32 	%f291, %f326;
	setp.neu.f32	%p45, %f291, 0f7F800000;
	@%p45 bra 	BB71_62;

	mov.f32 	%f292, 0f00000000;
	mul.rn.f32 	%f326, %f326, %f292;

BB71_62:
	mul.f32 	%f293, %f326, 0f3F22F983;
	cvt.rni.s32.f32	%r446, %f293;
	cvt.rn.f32.s32	%f294, %r446;
	neg.f32 	%f295, %f294;
	fma.rn.f32 	%f297, %f295, %f283, %f326;
	fma.rn.f32 	%f299, %f295, %f285, %f297;
	fma.rn.f32 	%f327, %f295, %f287, %f299;
	abs.f32 	%f301, %f326;
	setp.leu.f32	%p46, %f301, 0f47CE4780;
	@%p46 bra 	BB71_72;

	mov.b32 	 %r126, %f326;
	shr.u32 	%r127, %r126, 23;
	bfe.u32 	%r339, %r126, 23, 8;
	add.s32 	%r340, %r339, -128;
	shl.b32 	%r341, %r126, 8;
	or.b32  	%r128, %r341, -2147483648;
	shr.u32 	%r129, %r340, 5;
	mov.u32 	%r438, 0;
	mov.u32 	%r432, 6;
	mov.u32 	%r431, __cudart_i2opi_f;
	mov.u32 	%r435, %r437;

BB71_64:
	.pragma "nounroll";
	ld.const.u32 	%r344, [%r431];
	// inline asm
	{
	mad.lo.cc.u32   %r342, %r344, %r128, %r438;
	madc.hi.u32     %r438, %r344, %r128,  0;
	}
	// inline asm
	st.local.u32 	[%r435], %r342;
	add.s32 	%r435, %r435, 4;
	add.s32 	%r431, %r431, 4;
	add.s32 	%r432, %r432, -1;
	setp.ne.s32	%p47, %r432, 0;
	@%p47 bra 	BB71_64;

	and.b32  	%r138, %r126, -2147483648;
	add.s32 	%r395, %r437, 24;
	st.local.u32 	[%r395], %r438;
	mov.u32 	%r347, 4;
	sub.s32 	%r348, %r347, %r129;
	shl.b32 	%r349, %r348, 2;
	add.s32 	%r350, %r349, %r437;
	ld.local.u32 	%r439, [%r350+8];
	ld.local.u32 	%r440, [%r350+4];
	and.b32  	%r142, %r127, 31;
	setp.eq.s32	%p48, %r142, 0;
	@%p48 bra 	BB71_67;

	mov.u32 	%r351, 32;
	sub.s32 	%r352, %r351, %r142;
	shr.u32 	%r353, %r440, %r352;
	shl.b32 	%r354, %r439, %r142;
	add.s32 	%r439, %r353, %r354;
	add.s32 	%r396, %r350, 8;
	ld.local.u32 	%r355, [%r396+-8];
	shr.u32 	%r356, %r355, %r352;
	shl.b32 	%r357, %r440, %r142;
	add.s32 	%r440, %r356, %r357;

BB71_67:
	shr.u32 	%r358, %r440, 30;
	shl.b32 	%r359, %r439, 2;
	add.s32 	%r441, %r358, %r359;
	shl.b32 	%r148, %r440, 2;
	shr.u32 	%r360, %r441, 31;
	shr.u32 	%r361, %r439, 30;
	add.s32 	%r149, %r360, %r361;
	setp.eq.s32	%p49, %r360, 0;
	mov.u32 	%r442, %r138;
	mov.u32 	%r443, %r148;
	@%p49 bra 	BB71_69;

	not.b32 	%r362, %r441;
	neg.s32 	%r150, %r148;
	setp.eq.s32	%p50, %r148, 0;
	selp.u32	%r363, 1, 0, %p50;
	add.s32 	%r441, %r363, %r362;
	xor.b32  	%r152, %r138, -2147483648;
	mov.u32 	%r442, %r152;
	mov.u32 	%r443, %r150;

BB71_69:
	mov.u32 	%r154, %r442;
	neg.s32 	%r364, %r149;
	setp.eq.s32	%p51, %r138, 0;
	selp.b32	%r446, %r149, %r364, %p51;
	clz.b32 	%r445, %r441;
	setp.eq.s32	%p52, %r445, 0;
	shl.b32 	%r365, %r441, %r445;
	mov.u32 	%r366, 32;
	sub.s32 	%r367, %r366, %r445;
	shr.u32 	%r368, %r443, %r367;
	add.s32 	%r369, %r368, %r365;
	selp.b32	%r158, %r441, %r369, %p52;
	mov.u32 	%r370, -921707870;
	mul.hi.u32 	%r444, %r158, %r370;
	setp.lt.s32	%p53, %r444, 1;
	@%p53 bra 	BB71_71;

	mul.lo.s32 	%r371, %r158, -921707870;
	shr.u32 	%r372, %r371, 31;
	shl.b32 	%r373, %r444, 1;
	add.s32 	%r444, %r372, %r373;
	add.s32 	%r445, %r445, 1;

BB71_71:
	mov.u32 	%r374, 126;
	sub.s32 	%r375, %r374, %r445;
	shl.b32 	%r376, %r375, 23;
	add.s32 	%r377, %r444, 1;
	shr.u32 	%r378, %r377, 7;
	add.s32 	%r379, %r378, 1;
	shr.u32 	%r380, %r379, 1;
	add.s32 	%r381, %r380, %r376;
	or.b32  	%r382, %r381, %r154;
	mov.b32 	 %f327, %r382;

BB71_72:
	mul.rn.f32 	%f49, %f327, %f327;
	add.s32 	%r165, %r446, 1;
	and.b32  	%r166, %r165, 1;
	setp.eq.s32	%p54, %r166, 0;
	@%p54 bra 	BB71_74;

	mov.f32 	%f302, 0fBAB6061A;
	mov.f32 	%f303, 0f37CCF5CE;
	fma.rn.f32 	%f328, %f303, %f49, %f302;
	bra.uni 	BB71_75;

BB71_3:
	mul.f32 	%f66, %f2, %f2;
	mov.f32 	%f67, 0fB58527DA;
	mov.f32 	%f68, 0f321462CC;
	fma.rn.f32 	%f69, %f68, %f66, %f67;
	mov.f32 	%f70, 0f38963E95;
	fma.rn.f32 	%f71, %f69, %f66, %f70;
	mov.f32 	%f72, 0fBB41ADCB;
	fma.rn.f32 	%f73, %f71, %f66, %f72;
	mov.f32 	%f74, 0f3D5E9CBB;
	fma.rn.f32 	%f75, %f73, %f66, %f74;
	mov.f32 	%f76, 0fBE48C331;
	fma.rn.f32 	%f3, %f75, %f66, %f76;
	setp.lt.f32	%p4, %f2, 0f7F800000;
	setp.gt.f32	%p5, %f2, 0f00000000;
	and.pred  	%p6, %p5, %p4;
	@%p6 bra 	BB71_5;
	bra.uni 	BB71_4;

BB71_5:
	mov.b32 	 %r177, %f2;
	and.b32  	%r178, %r177, 8388607;
	or.b32  	%r179, %r178, 1065353216;
	mov.b32 	 %f79, %r179;
	shr.u32 	%r180, %r177, 23;
	cvt.rn.f32.u32	%f80, %r180;
	add.f32 	%f81, %f80, 0fC2FE0000;
	setp.gt.f32	%p7, %f79, 0f3FAE147B;
	mul.f32 	%f82, %f79, 0f3F000000;
	add.f32 	%f83, %f81, 0f3F800000;
	selp.f32	%f84, %f82, %f79, %p7;
	selp.f32	%f85, %f83, %f81, %p7;
	add.f32 	%f78, %f84, 0f3F800000;
	add.f32 	%f86, %f84, 0fBF800000;
	// inline asm
	rcp.approx.ftz.f32 %f77,%f78;
	// inline asm
	mul.f32 	%f87, %f86, %f86;
	neg.f32 	%f88, %f87;
	mul.rn.f32 	%f89, %f77, %f88;
	add.rn.f32 	%f90, %f86, %f89;
	mul.f32 	%f91, %f90, %f90;
	mov.f32 	%f92, 0f3C4C6A36;
	mov.f32 	%f93, 0f3B1E94E6;
	fma.rn.f32 	%f94, %f93, %f91, %f92;
	mov.f32 	%f95, 0f3DAAAB1A;
	fma.rn.f32 	%f96, %f94, %f91, %f95;
	mul.f32 	%f97, %f91, %f96;
	fma.rn.f32 	%f98, %f97, %f90, %f89;
	add.f32 	%f99, %f86, %f98;
	mov.f32 	%f100, 0f3F317218;
	fma.rn.f32 	%f317, %f85, %f100, %f99;
	bra.uni 	BB71_6;

BB71_44:
	add.f32 	%f198, %f2, 0fC00C9DF7;
	add.f32 	%f199, %f198, 0f33B200DC;
	mov.f32 	%f200, 0f39064A88;
	mov.f32 	%f201, 0fB789E29D;
	fma.rn.f32 	%f202, %f201, %f199, %f200;
	mov.f32 	%f203, 0fB9F0AB0D;
	fma.rn.f32 	%f204, %f202, %f199, %f203;
	mov.f32 	%f205, 0f3A8F6102;
	fma.rn.f32 	%f206, %f204, %f199, %f205;
	mov.f32 	%f207, 0fBB2C7045;
	fma.rn.f32 	%f208, %f206, %f199, %f207;
	mov.f32 	%f209, 0f3BF35DF7;
	fma.rn.f32 	%f210, %f208, %f199, %f209;
	mov.f32 	%f211, 0fBB9D097C;
	fma.rn.f32 	%f212, %f210, %f199, %f211;
	mov.f32 	%f213, 0fBD06968A;
	fma.rn.f32 	%f214, %f212, %f199, %f213;
	mov.f32 	%f215, 0fBDF2B7DF;
	fma.rn.f32 	%f216, %f214, %f199, %f215;
	mov.f32 	%f217, 0f3F055242;
	fma.rn.f32 	%f218, %f216, %f199, %f217;
	mul.f32 	%f331, %f199, %f218;
	bra.uni 	BB71_84;

BB71_4:
	lg2.approx.f32 	%f317, %f2;

BB71_6:
	abs.f32 	%f7, %f2;
	setp.gtu.f32	%p8, %f7, 0f40FB3333;
	@%p8 bra 	BB71_8;
	bra.uni 	BB71_7;

BB71_8:
	abs.f32 	%f133, %f7;
	mov.f32 	%f324, 0f00000000;
	setp.eq.f32	%p9, %f133, 0f7F800000;
	@%p9 bra 	BB71_42;

	// inline asm
	rcp.approx.ftz.f32 %f134,%f7;
	// inline asm
	mul.f32 	%f136, %f134, %f134;
	mov.f32 	%f137, 0f3F3FF7E9;
	mov.f32 	%f138, 0fC082CB37;
	fma.rn.f32 	%f139, %f138, %f136, %f137;
	mov.f32 	%f140, 0fBE458BAE;
	fma.rn.f32 	%f141, %f139, %f136, %f140;
	mov.f32 	%f142, 0f3E3FFF8B;
	fma.rn.f32 	%f143, %f141, %f136, %f142;
	mov.f32 	%f144, 0f3F800000;
	fma.rn.f32 	%f145, %f143, %f136, %f144;
	mov.f32 	%f146, 0f3EB914AD;
	mov.f32 	%f147, 0fBFCA3BA2;
	fma.rn.f32 	%f148, %f147, %f136, %f146;
	mov.f32 	%f149, 0fBE27F2EC;
	fma.rn.f32 	%f150, %f148, %f136, %f149;
	mov.f32 	%f151, 0f3EBFFFFD;
	fma.rn.f32 	%f152, %f150, %f136, %f151;
	fma.rn.f32 	%f9, %f152, %f134, %f7;
	rsqrt.approx.f32 	%f153, %f7;
	mul.f32 	%f154, %f153, 0f3F4C422A;
	mul.f32 	%f10, %f145, %f154;
	mul.f32 	%f155, %f9, 0f3F22F983;
	cvt.rni.s32.f32	%r407, %f155;
	cvt.rn.f32.s32	%f156, %r407;
	neg.f32 	%f157, %f156;
	mov.f32 	%f158, 0f3FC90FDA;
	fma.rn.f32 	%f159, %f157, %f158, %f9;
	mov.f32 	%f160, 0f33A22168;
	fma.rn.f32 	%f161, %f157, %f160, %f159;
	mov.f32 	%f162, 0f27C234C5;
	fma.rn.f32 	%f318, %f157, %f162, %f161;
	abs.f32 	%f163, %f9;
	setp.leu.f32	%p10, %f163, 0f47CE4780;
	@%p10 bra 	BB71_19;

	mov.b32 	 %r4, %f9;
	shl.b32 	%r184, %r4, 8;
	or.b32  	%r5, %r184, -2147483648;
	mov.u32 	%r399, 0;
	mov.u32 	%r398, 6;
	mov.u32 	%r397, __cudart_i2opi_f;

BB71_11:
	.pragma "nounroll";
	ld.const.u32 	%r187, [%r397];
	// inline asm
	{
	mad.lo.cc.u32   %r185, %r187, %r5, %r399;
	madc.hi.u32     %r399, %r187, %r5,  0;
	}
	// inline asm
	st.local.u32 	[%r437], %r185;
	add.s32 	%r437, %r437, 4;
	add.s32 	%r397, %r397, 4;
	add.s32 	%r398, %r398, -1;
	setp.ne.s32	%p11, %r398, 0;
	@%p11 bra 	BB71_11;

	and.b32  	%r14, %r4, -2147483648;
	bfe.u32 	%r190, %r4, 23, 8;
	add.s32 	%r191, %r190, -128;
	shr.u32 	%r192, %r191, 5;
	mov.u32 	%r193, 4;
	sub.s32 	%r194, %r193, %r192;
	cvta.to.local.u32 	%r196, %r170;
	shl.b32 	%r197, %r194, 2;
	add.s32 	%r198, %r197, %r196;
	st.local.u32 	[%r196+24], %r399;
	bfe.u32 	%r15, %r4, 23, 5;
	ld.local.u32 	%r400, [%r198+8];
	ld.local.u32 	%r401, [%r198+4];
	setp.eq.s32	%p12, %r15, 0;
	@%p12 bra 	BB71_14;

	mov.u32 	%r199, 32;
	sub.s32 	%r200, %r199, %r15;
	shr.u32 	%r201, %r401, %r200;
	shl.b32 	%r202, %r400, %r15;
	add.s32 	%r400, %r201, %r202;
	add.s32 	%r391, %r198, 8;
	ld.local.u32 	%r203, [%r391+-8];
	shr.u32 	%r204, %r203, %r200;
	shl.b32 	%r205, %r401, %r15;
	add.s32 	%r401, %r204, %r205;

BB71_14:
	shr.u32 	%r206, %r401, 30;
	shl.b32 	%r207, %r400, 2;
	add.s32 	%r402, %r206, %r207;
	shl.b32 	%r24, %r401, 2;
	shr.u32 	%r208, %r402, 31;
	shr.u32 	%r209, %r400, 30;
	add.s32 	%r25, %r208, %r209;
	setp.eq.s32	%p13, %r208, 0;
	mov.u32 	%r403, %r14;
	mov.u32 	%r404, %r24;
	@%p13 bra 	BB71_16;

	not.b32 	%r210, %r402;
	neg.s32 	%r26, %r24;
	setp.eq.s32	%p14, %r24, 0;
	selp.u32	%r211, 1, 0, %p14;
	add.s32 	%r402, %r211, %r210;
	xor.b32  	%r28, %r14, -2147483648;
	mov.u32 	%r403, %r28;
	mov.u32 	%r404, %r26;

BB71_16:
	mov.u32 	%r30, %r403;
	neg.s32 	%r212, %r25;
	setp.eq.s32	%p15, %r14, 0;
	selp.b32	%r407, %r25, %r212, %p15;
	clz.b32 	%r406, %r402;
	setp.eq.s32	%p16, %r406, 0;
	shl.b32 	%r213, %r402, %r406;
	mov.u32 	%r214, 32;
	sub.s32 	%r215, %r214, %r406;
	shr.u32 	%r216, %r404, %r215;
	add.s32 	%r217, %r216, %r213;
	selp.b32	%r34, %r402, %r217, %p16;
	mov.u32 	%r218, -921707870;
	mul.hi.u32 	%r405, %r34, %r218;
	setp.lt.s32	%p17, %r405, 1;
	@%p17 bra 	BB71_18;

	mul.lo.s32 	%r219, %r34, -921707870;
	shr.u32 	%r220, %r219, 31;
	shl.b32 	%r221, %r405, 1;
	add.s32 	%r405, %r220, %r221;
	add.s32 	%r406, %r406, 1;

BB71_18:
	mov.u32 	%r222, 126;
	sub.s32 	%r223, %r222, %r406;
	shl.b32 	%r224, %r223, 23;
	add.s32 	%r225, %r405, 1;
	shr.u32 	%r226, %r225, 7;
	add.s32 	%r227, %r226, 1;
	shr.u32 	%r228, %r227, 1;
	add.s32 	%r229, %r228, %r224;
	or.b32  	%r230, %r229, %r30;
	mov.b32 	 %f318, %r230;

BB71_19:
	and.b32  	%r231, %r407, 3;
	cvt.rn.f32.s32	%f164, %r231;
	add.f32 	%f165, %f318, 0fC016CBE4;
	fma.rn.f32 	%f319, %f164, 0f3FC90FDB, %f165;
	abs.f32 	%f166, %f319;
	setp.neu.f32	%p18, %f166, 0f7F800000;
	@%p18 bra 	BB71_21;

	mov.f32 	%f167, 0f00000000;
	mul.rn.f32 	%f319, %f319, %f167;

BB71_21:
	mul.f32 	%f168, %f319, 0f3F22F983;
	cvt.rni.s32.f32	%r419, %f168;
	cvt.rn.f32.s32	%f169, %r419;
	neg.f32 	%f170, %f169;
	fma.rn.f32 	%f172, %f170, %f158, %f319;
	fma.rn.f32 	%f174, %f170, %f160, %f172;
	fma.rn.f32 	%f320, %f170, %f162, %f174;
	abs.f32 	%f176, %f319;
	setp.leu.f32	%p19, %f176, 0f47CE4780;
	@%p19 bra 	BB71_31;

	mov.b32 	 %r42, %f319;
	shr.u32 	%r43, %r42, 23;
	bfe.u32 	%r235, %r42, 23, 8;
	add.s32 	%r236, %r235, -128;
	shl.b32 	%r237, %r42, 8;
	or.b32  	%r44, %r237, -2147483648;
	shr.u32 	%r45, %r236, 5;
	cvta.to.local.u32 	%r410, %r170;
	mov.u32 	%r411, 0;
	mov.u32 	%r409, 6;
	mov.u32 	%r408, __cudart_i2opi_f;

BB71_23:
	.pragma "nounroll";
	ld.const.u32 	%r241, [%r408];
	// inline asm
	{
	mad.lo.cc.u32   %r239, %r241, %r44, %r411;
	madc.hi.u32     %r411, %r241, %r44,  0;
	}
	// inline asm
	st.local.u32 	[%r410], %r239;
	add.s32 	%r410, %r410, 4;
	add.s32 	%r408, %r408, 4;
	add.s32 	%r409, %r409, -1;
	setp.ne.s32	%p20, %r409, 0;
	@%p20 bra 	BB71_23;

	and.b32  	%r55, %r42, -2147483648;
	cvta.to.local.u32 	%r245, %r170;
	mov.u32 	%r246, 4;
	sub.s32 	%r247, %r246, %r45;
	shl.b32 	%r248, %r247, 2;
	add.s32 	%r249, %r248, %r245;
	st.local.u32 	[%r245+24], %r411;
	ld.local.u32 	%r412, [%r249+8];
	ld.local.u32 	%r413, [%r249+4];
	and.b32  	%r59, %r43, 31;
	setp.eq.s32	%p21, %r59, 0;
	@%p21 bra 	BB71_26;

	mov.u32 	%r250, 32;
	sub.s32 	%r251, %r250, %r59;
	shr.u32 	%r252, %r413, %r251;
	shl.b32 	%r253, %r412, %r59;
	add.s32 	%r412, %r252, %r253;
	add.s32 	%r392, %r249, 8;
	ld.local.u32 	%r254, [%r392+-8];
	shr.u32 	%r255, %r254, %r251;
	shl.b32 	%r256, %r413, %r59;
	add.s32 	%r413, %r255, %r256;

BB71_26:
	shr.u32 	%r257, %r413, 30;
	shl.b32 	%r258, %r412, 2;
	add.s32 	%r414, %r257, %r258;
	shl.b32 	%r65, %r413, 2;
	shr.u32 	%r259, %r414, 31;
	shr.u32 	%r260, %r412, 30;
	add.s32 	%r66, %r259, %r260;
	setp.eq.s32	%p22, %r259, 0;
	mov.u32 	%r415, %r55;
	mov.u32 	%r416, %r65;
	@%p22 bra 	BB71_28;

	not.b32 	%r261, %r414;
	neg.s32 	%r67, %r65;
	setp.eq.s32	%p23, %r65, 0;
	selp.u32	%r262, 1, 0, %p23;
	add.s32 	%r414, %r262, %r261;
	xor.b32  	%r69, %r55, -2147483648;
	mov.u32 	%r415, %r69;
	mov.u32 	%r416, %r67;

BB71_28:
	mov.u32 	%r71, %r415;
	neg.s32 	%r263, %r66;
	setp.eq.s32	%p24, %r55, 0;
	selp.b32	%r419, %r66, %r263, %p24;
	clz.b32 	%r418, %r414;
	setp.eq.s32	%p25, %r418, 0;
	shl.b32 	%r264, %r414, %r418;
	mov.u32 	%r265, 32;
	sub.s32 	%r266, %r265, %r418;
	shr.u32 	%r267, %r416, %r266;
	add.s32 	%r268, %r267, %r264;
	selp.b32	%r75, %r414, %r268, %p25;
	mov.u32 	%r269, -921707870;
	mul.hi.u32 	%r417, %r75, %r269;
	setp.lt.s32	%p26, %r417, 1;
	@%p26 bra 	BB71_30;

	mul.lo.s32 	%r270, %r75, -921707870;
	shr.u32 	%r271, %r270, 31;
	shl.b32 	%r272, %r417, 1;
	add.s32 	%r417, %r271, %r272;
	add.s32 	%r418, %r418, 1;

BB71_30:
	mov.u32 	%r273, 126;
	sub.s32 	%r274, %r273, %r418;
	shl.b32 	%r275, %r274, 23;
	add.s32 	%r276, %r417, 1;
	shr.u32 	%r277, %r276, 7;
	add.s32 	%r278, %r277, 1;
	shr.u32 	%r279, %r278, 1;
	add.s32 	%r280, %r279, %r275;
	or.b32  	%r281, %r280, %r71;
	mov.b32 	 %f320, %r281;

BB71_31:
	mul.rn.f32 	%f20, %f320, %f320;
	add.s32 	%r82, %r419, 1;
	and.b32  	%r83, %r82, 1;
	setp.eq.s32	%p27, %r83, 0;
	@%p27 bra 	BB71_33;

	mov.f32 	%f177, 0fBAB6061A;
	mov.f32 	%f178, 0f37CCF5CE;
	fma.rn.f32 	%f321, %f178, %f20, %f177;
	bra.uni 	BB71_34;

BB71_7:
	add.f32 	%f101, %f7, 0fC0753AAC;
	add.f32 	%f102, %f101, 0f33A5090F;
	mov.f32 	%f103, 0f2B81BF42;
	mov.f32 	%f104, 0f29AF3463;
	fma.rn.f32 	%f105, %f104, %f102, %f103;
	mov.f32 	%f106, 0fADE21EC1;
	fma.rn.f32 	%f107, %f105, %f102, %f106;
	mov.f32 	%f108, 0fAF5DDEFF;
	fma.rn.f32 	%f109, %f107, %f102, %f108;
	mov.f32 	%f110, 0f319B0C9D;
	fma.rn.f32 	%f111, %f109, %f102, %f110;
	mov.f32 	%f112, 0f32E81173;
	fma.rn.f32 	%f113, %f111, %f102, %f112;
	mov.f32 	%f114, 0fB50F8DC8;
	fma.rn.f32 	%f115, %f113, %f102, %f114;
	mov.f32 	%f116, 0fB61E653D;
	fma.rn.f32 	%f117, %f115, %f102, %f116;
	mov.f32 	%f118, 0f382CD9C5;
	fma.rn.f32 	%f119, %f117, %f102, %f118;
	mov.f32 	%f120, 0f38F9EB10;
	fma.rn.f32 	%f121, %f119, %f102, %f120;
	mov.f32 	%f122, 0fBAECEB9C;
	fma.rn.f32 	%f123, %f121, %f102, %f122;
	mov.f32 	%f124, 0fBB276FFD;
	fma.rn.f32 	%f125, %f123, %f102, %f124;
	mov.f32 	%f126, 0f3D073993;
	fma.rn.f32 	%f127, %f125, %f102, %f126;
	add.f32 	%f128, %f7, 0fC0E07FB0;
	add.f32 	%f129, %f128, 0f3444B8DB;
	mul.f32 	%f130, %f129, %f127;
	mul.f32 	%f131, %f102, %f130;
	mul.f32 	%f324, %f7, %f131;
	bra.uni 	BB71_42;

BB71_46:
	add.f32 	%f219, %f2, 0fC0ADBFF2;
	add.f32 	%f220, %f219, 0fB4687B03;
	mov.f32 	%f221, 0fB508A416;
	mov.f32 	%f222, 0f32BE57D0;
	fma.rn.f32 	%f223, %f222, %f220, %f221;
	mov.f32 	%f224, 0fB63F8A14;
	fma.rn.f32 	%f225, %f223, %f220, %f224;
	mov.f32 	%f226, 0f38427E02;
	fma.rn.f32 	%f227, %f225, %f220, %f226;
	mov.f32 	%f228, 0f3919BB1C;
	fma.rn.f32 	%f229, %f227, %f220, %f228;
	mov.f32 	%f230, 0fBB0DF1FD;
	fma.rn.f32 	%f231, %f229, %f220, %f230;
	mov.f32 	%f232, 0fBB885189;
	fma.rn.f32 	%f233, %f231, %f220, %f232;
	mov.f32 	%f234, 0f3D50AEC1;
	fma.rn.f32 	%f235, %f233, %f220, %f234;
	mov.f32 	%f236, 0f3D005CFC;
	fma.rn.f32 	%f237, %f235, %f220, %f236;
	mov.f32 	%f238, 0fBEAE3E2B;
	fma.rn.f32 	%f239, %f237, %f220, %f238;
	mul.f32 	%f331, %f220, %f239;
	bra.uni 	BB71_84;

BB71_48:
	add.f32 	%f240, %f2, 0fC109893D;
	add.f32 	%f241, %f240, 0fB4E6169B;
	mov.f32 	%f242, 0f3602902E;
	mov.f32 	%f243, 0f350CF383;
	fma.rn.f32 	%f244, %f243, %f241, %f242;
	mov.f32 	%f245, 0fB8375F71;
	fma.rn.f32 	%f246, %f244, %f241, %f245;
	mov.f32 	%f247, 0fB8D9FAA8;
	fma.rn.f32 	%f248, %f246, %f241, %f247;
	mov.f32 	%f249, 0f3B03D19A;
	fma.rn.f32 	%f250, %f248, %f241, %f249;
	mov.f32 	%f251, 0f3B1E736D;
	fma.rn.f32 	%f252, %f250, %f241, %f251;
	mov.f32 	%f253, 0fBD31CAE5;
	fma.rn.f32 	%f254, %f252, %f241, %f253;
	mov.f32 	%f255, 0fBC8159B6;
	fma.rn.f32 	%f256, %f254, %f241, %f255;
	mov.f32 	%f257, 0f3E8AFCCA;
	fma.rn.f32 	%f258, %f256, %f241, %f257;
	mul.f32 	%f331, %f241, %f258;
	bra.uni 	BB71_84;

BB71_33:
	mov.f32 	%f179, 0f3C08839E;
	mov.f32 	%f180, 0fB94CA1F9;
	fma.rn.f32 	%f321, %f180, %f20, %f179;

BB71_34:
	@%p27 bra 	BB71_36;

	mov.f32 	%f181, 0f3D2AAAA5;
	fma.rn.f32 	%f182, %f321, %f20, %f181;
	mov.f32 	%f183, 0fBF000000;
	fma.rn.f32 	%f322, %f182, %f20, %f183;
	bra.uni 	BB71_37;

BB71_36:
	mov.f32 	%f184, 0fBE2AAAA3;
	fma.rn.f32 	%f185, %f321, %f20, %f184;
	mov.f32 	%f186, 0f00000000;
	fma.rn.f32 	%f322, %f185, %f20, %f186;

BB71_37:
	fma.rn.f32 	%f323, %f322, %f320, %f320;
	@%p27 bra 	BB71_39;

	fma.rn.f32 	%f323, %f322, %f20, %f144;

BB71_39:
	and.b32  	%r282, %r82, 2;
	setp.eq.s32	%p30, %r282, 0;
	@%p30 bra 	BB71_41;

	mov.f32 	%f188, 0f00000000;
	mov.f32 	%f189, 0fBF800000;
	fma.rn.f32 	%f323, %f323, %f189, %f188;

BB71_41:
	mul.f32 	%f324, %f10, %f323;

BB71_42:
	neg.f32 	%f190, %f324;
	setp.lt.f32	%p31, %f2, 0f00000000;
	selp.f32	%f191, %f190, %f324, %p31;
	mov.b32 	 %r283, %f2;
	and.b32  	%r284, %r283, -2147483648;
	mov.b32 	 %r285, %f191;
	and.b32  	%r286, %r285, 2147483647;
	or.b32  	%r287, %r286, %r284;
	mov.b32 	 %f192, %r287;
	setp.lt.f32	%p32, %f7, 0f0DA24260;
	selp.f32	%f193, %f192, %f191, %p32;
	mov.f32 	%f194, 0fBF800000;
	div.rn.f32 	%f195, %f194, %f2;
	fma.rn.f32 	%f196, %f317, %f193, %f195;
	mul.f32 	%f197, %f196, 0f3F22F983;
	fma.rn.f32 	%f331, %f2, %f3, %f197;

BB71_84:
	setp.geu.f32	%p58, %f1, 0f00000000;
	@%p58 bra 	BB71_86;

	mov.f32 	%f316, 0fBF800000;
	sqrt.rn.f32 	%f331, %f316;

BB71_86:
	cvta.to.global.u32 	%r388, %r167;
	add.s32 	%r390, %r388, %r175;
	st.global.f32 	[%r390], %f331;

BB71_87:
	ret;

BB71_74:
	mov.f32 	%f304, 0f3C08839E;
	mov.f32 	%f305, 0fB94CA1F9;
	fma.rn.f32 	%f328, %f305, %f49, %f304;

BB71_75:
	@%p54 bra 	BB71_77;

	mov.f32 	%f306, 0f3D2AAAA5;
	fma.rn.f32 	%f307, %f328, %f49, %f306;
	mov.f32 	%f308, 0fBF000000;
	fma.rn.f32 	%f329, %f307, %f49, %f308;
	bra.uni 	BB71_78;

BB71_77:
	mov.f32 	%f309, 0fBE2AAAA3;
	fma.rn.f32 	%f310, %f328, %f49, %f309;
	mov.f32 	%f311, 0f00000000;
	fma.rn.f32 	%f329, %f310, %f49, %f311;

BB71_78:
	fma.rn.f32 	%f330, %f329, %f327, %f327;
	@%p54 bra 	BB71_80;

	fma.rn.f32 	%f330, %f329, %f49, %f269;

BB71_80:
	and.b32  	%r383, %r165, 2;
	setp.eq.s32	%p57, %r383, 0;
	@%p57 bra 	BB71_82;

	mov.f32 	%f313, 0f00000000;
	mov.f32 	%f314, 0fBF800000;
	fma.rn.f32 	%f330, %f330, %f314, %f313;

BB71_82:
	mul.f32 	%f331, %f39, %f330;
	bra.uni 	BB71_84;
}

	// .globl	vec_copysignf
.visible .entry vec_copysignf(
	.param .u32 vec_copysignf_param_0,
	.param .u32 vec_copysignf_param_1,
	.param .u32 vec_copysignf_param_2,
	.param .u32 vec_copysignf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .b32 	%r<21>;


	ld.param.u32 	%r5, [vec_copysignf_param_0];
	ld.param.u32 	%r2, [vec_copysignf_param_1];
	ld.param.u32 	%r3, [vec_copysignf_param_2];
	ld.param.u32 	%r4, [vec_copysignf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB72_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.u32 	%r14, [%r13];
	and.b32  	%r15, %r14, -2147483648;
	ld.global.u32 	%r16, [%r11];
	and.b32  	%r17, %r16, 2147483647;
	or.b32  	%r18, %r15, %r17;
	cvta.to.global.u32 	%r19, %r2;
	add.s32 	%r20, %r19, %r10;
	st.global.u32 	[%r20], %r18;

BB72_2:
	ret;
}

	// .globl	vec_fdimf
.visible .entry vec_fdimf(
	.param .u32 vec_fdimf_param_0,
	.param .u32 vec_fdimf_param_1,
	.param .u32 vec_fdimf_param_2,
	.param .u32 vec_fdimf_param_3
)
{
	.reg .pred 	%p<3>;
	.reg .f32 	%f<5>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_fdimf_param_0];
	ld.param.u32 	%r2, [vec_fdimf_param_1];
	ld.param.u32 	%r3, [vec_fdimf_param_2];
	ld.param.u32 	%r4, [vec_fdimf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB73_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	sub.f32 	%f3, %f2, %f1;
	setp.gtu.f32	%p2, %f2, %f1;
	selp.f32	%f4, %f3, 0f00000000, %p2;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f4;

BB73_2:
	ret;
}

	// .globl	vec_fdividef
.visible .entry vec_fdividef(
	.param .u32 vec_fdividef_param_0,
	.param .u32 vec_fdividef_param_1,
	.param .u32 vec_fdividef_param_2,
	.param .u32 vec_fdividef_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_fdividef_param_0];
	ld.param.u32 	%r2, [vec_fdividef_param_1];
	ld.param.u32 	%r3, [vec_fdividef_param_2];
	ld.param.u32 	%r4, [vec_fdividef_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB74_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	div.rn.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB74_2:
	ret;
}

	// .globl	vec_fmaxf
.visible .entry vec_fmaxf(
	.param .u32 vec_fmaxf_param_0,
	.param .u32 vec_fmaxf_param_1,
	.param .u32 vec_fmaxf_param_2,
	.param .u32 vec_fmaxf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_fmaxf_param_0];
	ld.param.u32 	%r2, [vec_fmaxf_param_1];
	ld.param.u32 	%r3, [vec_fmaxf_param_2];
	ld.param.u32 	%r4, [vec_fmaxf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB75_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	max.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB75_2:
	ret;
}

	// .globl	vec_fminf
.visible .entry vec_fminf(
	.param .u32 vec_fminf_param_0,
	.param .u32 vec_fminf_param_1,
	.param .u32 vec_fminf_param_2,
	.param .u32 vec_fminf_param_3
)
{
	.reg .pred 	%p<2>;
	.reg .f32 	%f<4>;
	.reg .b32 	%r<16>;


	ld.param.u32 	%r5, [vec_fminf_param_0];
	ld.param.u32 	%r2, [vec_fminf_param_1];
	ld.param.u32 	%r3, [vec_fminf_param_2];
	ld.param.u32 	%r4, [vec_fminf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB76_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r13];
	ld.global.f32 	%f2, [%r11];
	min.f32 	%f3, %f2, %f1;
	cvta.to.global.u32 	%r14, %r2;
	add.s32 	%r15, %r14, %r10;
	st.global.f32 	[%r15], %f3;

BB76_2:
	ret;
}

	// .globl	vec_fmodf
.visible .entry vec_fmodf(
	.param .u32 vec_fmodf_param_0,
	.param .u32 vec_fmodf_param_1,
	.param .u32 vec_fmodf_param_2,
	.param .u32 vec_fmodf_param_3
)
{
	.reg .pred 	%p<20>;
	.reg .f32 	%f<48>;
	.reg .b32 	%r<34>;


	ld.param.u32 	%r7, [vec_fmodf_param_0];
	ld.param.u32 	%r4, [vec_fmodf_param_1];
	ld.param.u32 	%r5, [vec_fmodf_param_2];
	ld.param.u32 	%r6, [vec_fmodf_param_3];
	mov.u32 	%r8, %tid.x;
	mov.u32 	%r9, %ntid.x;
	mov.u32 	%r10, %ctaid.x;
	mad.lo.s32 	%r1, %r9, %r10, %r8;
	setp.ge.u32	%p1, %r1, %r7;
	@%p1 bra 	BB77_15;

	cvta.to.global.u32 	%r11, %r5;
	shl.b32 	%r12, %r1, 2;
	add.s32 	%r13, %r11, %r12;
	cvta.to.global.u32 	%r14, %r6;
	add.s32 	%r15, %r14, %r12;
	ld.global.f32 	%f1, [%r13];
	abs.f32 	%f46, %f1;
	ld.global.f32 	%f3, [%r15];
	abs.f32 	%f4, %f3;
	setp.eq.f32	%p2, %f46, 0f7F800000;
	setp.eq.f32	%p3, %f4, 0f00000000;
	or.pred  	%p4, %p2, %p3;
	mov.f32 	%f47, 0f7FFFFFFF;
	@%p4 bra 	BB77_14;

	setp.ltu.f32	%p5, %f46, %f4;
	@%p5 bra 	BB77_13;
	bra.uni 	BB77_3;

BB77_13:
	setp.gtu.f32	%p18, %f4, 0f7F800000;
	add.f32 	%f41, %f1, %f3;
	selp.f32	%f42, %f41, %f1, %p18;
	add.f32 	%f43, %f1, %f42;
	setp.leu.f32	%p19, %f46, 0f00000000;
	selp.f32	%f47, %f43, %f42, %p19;
	bra.uni 	BB77_14;

BB77_3:
	lg2.approx.f32 	%f21, %f46;
	cvt.rzi.s32.f32	%r16, %f21;
	lg2.approx.f32 	%f22, %f4;
	cvt.rzi.s32.f32	%r17, %f22;
	sub.s32 	%r2, %r16, %r17;
	abs.f32 	%f5, %f4;
	setp.eq.f32	%p6, %f5, 0f00000000;
	setp.eq.f32	%p7, %f5, 0f7F800000;
	or.pred  	%p8, %p6, %p7;
	setp.eq.s32	%p9, %r16, %r17;
	or.pred  	%p10, %p8, %p9;
	@%p10 bra 	BB77_9;
	bra.uni 	BB77_4;

BB77_9:
	setp.leu.f32	%p13, %f5, 0f00000000;
	add.f32 	%f37, %f4, %f4;
	selp.f32	%f44, %f37, %f4, %p13;
	bra.uni 	BB77_10;

BB77_4:
	abs.s32 	%r3, %r2;
	setp.lt.s32	%p11, %r3, 126;
	@%p11 bra 	BB77_8;
	bra.uni 	BB77_5;

BB77_8:
	cvt.rn.f32.s32	%f36, %r2;
	// inline asm
	ex2.approx.ftz.f32 %f35,%f36;
	// inline asm
	mul.f32 	%f44, %f4, %f35;
	bra.uni 	BB77_10;

BB77_5:
	setp.lt.s32	%p12, %r3, 252;
	@%p12 bra 	BB77_7;
	bra.uni 	BB77_6;

BB77_7:
	shr.u32 	%r23, %r2, 31;
	add.s32 	%r24, %r2, %r23;
	shr.s32 	%r25, %r24, 1;
	cvt.rn.f32.s32	%f31, %r25;
	// inline asm
	ex2.approx.ftz.f32 %f30,%f31;
	// inline asm
	mul.f32 	%f34, %f4, %f30;
	sub.s32 	%r26, %r2, %r25;
	cvt.rn.f32.s32	%f33, %r26;
	// inline asm
	ex2.approx.ftz.f32 %f32,%f33;
	// inline asm
	mul.f32 	%f44, %f34, %f32;
	bra.uni 	BB77_10;

BB77_6:
	shr.s32 	%r18, %r2, 31;
	shr.u32 	%r19, %r18, 30;
	add.s32 	%r20, %r2, %r19;
	shr.s32 	%r21, %r20, 2;
	cvt.rn.f32.s32	%f24, %r21;
	// inline asm
	ex2.approx.ftz.f32 %f23,%f24;
	// inline asm
	mul.f32 	%f27, %f4, %f23;
	mul.f32 	%f28, %f23, %f27;
	mul.f32 	%f29, %f23, %f28;
	mad.lo.s32 	%r22, %r21, -3, %r2;
	cvt.rn.f32.s32	%f26, %r22;
	// inline asm
	ex2.approx.ftz.f32 %f25,%f26;
	// inline asm
	mul.f32 	%f44, %f25, %f29;

BB77_10:
	mul.f32 	%f38, %f46, 0f3F000000;
	setp.gtu.f32	%p14, %f44, %f38;
	add.f32 	%f39, %f44, %f44;
	selp.f32	%f45, %f44, %f39, %p14;
	setp.ltu.f32	%p15, %f45, %f4;
	@%p15 bra 	BB77_12;

BB77_11:
	sub.f32 	%f40, %f46, %f45;
	setp.ltu.f32	%p16, %f46, %f45;
	selp.f32	%f46, %f46, %f40, %p16;
	mul.f32 	%f45, %f45, 0f3F000000;
	setp.ge.f32	%p17, %f45, %f4;
	@%p17 bra 	BB77_11;

BB77_12:
	mov.b32 	 %r27, %f1;
	and.b32  	%r28, %r27, -2147483648;
	mov.b32 	 %r29, %f46;
	or.b32  	%r30, %r29, %r28;
	mov.b32 	 %f47, %r30;

BB77_14:
	cvta.to.global.u32 	%r31, %r4;
	add.s32 	%r33, %r31, %r12;
	st.global.f32 	[%r33], %f47;

BB77_15:
	ret;
}

	// .globl	vec_hypotf
.visible .entry vec_hypotf(
	.param .u32 vec_hypotf_param_0,
	.param .u32 vec_hypotf_param_1,
	.param .u32 vec_hypotf_param_2,
	.param .u32 vec_hypotf_param_3
)
{
	.reg .pred 	%p<4>;
	.reg .f32 	%f<17>;
	.reg .b32 	%r<24>;


	ld.param.u32 	%r5, [vec_hypotf_param_0];
	ld.param.u32 	%r2, [vec_hypotf_param_1];
	ld.param.u32 	%r3, [vec_hypotf_param_2];
	ld.param.u32 	%r4, [vec_hypotf_param_3];
	mov.u32 	%r6, %tid.x;
	mov.u32 	%r7, %ntid.x;
	mov.u32 	%r8, %ctaid.x;
	mad.lo.s32 	%r1, %r7, %r8, %r6;
	setp.ge.u32	%p1, %r1, %r5;
	@%p1 bra 	BB78_2;

	cvta.to.global.u32 	%r9, %r3;
	shl.b32 	%r10, %r1, 2;
	add.s32 	%r11, %r9, %r10;
	cvta.to.global.u32 	%r12, %r4;
	add.s32 	%r13, %r12, %r10;
	ld.global.f32 	%f1, [%r11];
	abs.f32 	%f2, %f1;
	mov.b32 	 %r14, %f2;
	ld.global.f32 	%f3, [%r13];
	abs.f32 	%f4, %f3;
	mov.b32 	 %r15, %f4;
	min.s32 	%r16, %r15, %r14;
	mov.b32 	 %f5, %r16;
	max.s32 	%r17, %r14, %r15;
	mov.b32 	 %f6, %r17;
	and.b32  	%r18, %r17, -33554432;
	mov.u32 	%r19, 2122317824;
	sub.s32 	%r20, %r19, %r18;
	mov.b32 	 %f7, %r20;
	mul.f32 	%f8, %f5, %f7;
	mul.f32 	%f9, %f6, %f7;
	mul.f32 	%f10, %f8, %f8;
	fma.rn.f32 	%f11, %f9, %f9, %f10;
	sqrt.rn.f32 	%f12, %f11;
	add.s32 	%r21, %r18, 8388608;
	mov.b32 	 %f13, %r21;
	mul.f32 	%f14, %f12, %f13;
	setp.eq.f32	%p2, %f5, 0f00000000;
	selp.f32	%f15, %f6, %f14, %p2;
	setp.eq.f32	%p3, %f5, 0f7F800000;
	selp.f32	%f16, 0f7F800000, %f15, %p3;
	cvta.to.global.u32 	%r22, %r2;
	add.s32 	%r23, %r22, %r10;
	st.global.f32 	[%r23], %f16;

BB78_2:
	ret;
}

	// .globl	vec_nextafterf
.visible .entry vec_nextafterf(
	.param .u32 vec_nextafterf_param_0,
	.param .u32 vec_nextafterf_param_1,
	.param .u32 vec_nextafterf_param_2,
	.param .u32 vec_nextafterf_param_3
)
{
	.reg .pred 	%p<14>;
	.reg .f32 	%f<11>;
	.reg .b32 	%r<30>;


	ld.param.u32 	%r7, [vec_nextafterf_param_0];
	ld.param.u32 	%r4, [vec_nextafterf_param_1];
	ld.param.u32 	%r5, [vec_nextafterf_param_2];
	ld.param.u32 	%r6, [vec_nextafterf_param_3];
	mov.u32 	%r8, %tid.x;
	mov.u32 	%r9, %ntid.x;
	mov.u32 	%r10, %ctaid.x;
	mad.lo.s32 	%r1, %r9, %r10, %r8;
	setp.ge.u32	%p1, %r1, %r7;
	@%p1 bra 	BB79_9;

	cvta.to.global.u32 	%r11, %r5;
	shl.b32 	%r12, %r1, 2;
	add.s32 	%r13, %r11, %r12;
	cvta.to.global.u32 	%r14, %r6;
	add.s32 	%r15, %r14, %r12;
	ld.global.f32 	%f1, [%r13];
	mov.b32 	 %r2, %f1;
	ld.global.f32 	%f10, [%r15];
	mov.b32 	 %r3, %f10;
	abs.f32 	%f7, %f1;
	setp.gtu.f32	%p2, %f7, 0f7F800000;
	@%p2 bra 	BB79_7;

	abs.f32 	%f8, %f10;
	setp.gtu.f32	%p3, %f8, 0f7F800000;
	@%p3 bra 	BB79_7;
	bra.uni 	BB79_3;

BB79_7:
	add.f32 	%f10, %f1, %f10;
	bra.uni 	BB79_8;

BB79_3:
	or.b32  	%r16, %r3, %r2;
	mov.b32 	 %f9, %r16;
	setp.eq.f32	%p4, %f9, 0f00000000;
	@%p4 bra 	BB79_8;

	setp.eq.f32	%p5, %f1, 0f00000000;
	@%p5 bra 	BB79_6;
	bra.uni 	BB79_5;

BB79_6:
	and.b32  	%r25, %r3, -2147483648;
	or.b32  	%r26, %r25, 1;
	mov.b32 	 %f10, %r26;
	bra.uni 	BB79_8;

BB79_5:
	setp.lt.f32	%p6, %f1, %f10;
	setp.lt.f32	%p7, %f1, 0f00000000;
	and.pred  	%p8, %p6, %p7;
	selp.s32	%r17, -1, 0, %p8;
	add.s32 	%r18, %r17, %r2;
	setp.gt.f32	%p9, %f1, 0f00000000;
	and.pred  	%p10, %p6, %p9;
	selp.u32	%r19, 1, 0, %p10;
	add.s32 	%r20, %r18, %r19;
	setp.gt.f32	%p11, %f1, %f10;
	and.pred  	%p12, %p11, %p7;
	selp.u32	%r21, 1, 0, %p12;
	add.s32 	%r22, %r20, %r21;
	and.pred  	%p13, %p11, %p9;
	selp.s32	%r23, -1, 0, %p13;
	add.s32 	%r24, %r22, %r23;
	mov.b32 	 %f10, %r24;

BB79_8:
	cvta.to.global.u32 	%r27, %r4;
	add.s32 	%r29, %r27, %r12;
	st.global.f32 	[%r29], %f10;

BB79_9:
	ret;
}

	// .globl	vec_powf
.visible .entry vec_powf(
	.param .u32 vec_powf_param_0,
	.param .u32 vec_powf_param_1,
	.param .u32 vec_powf_param_2,
	.param .u32 vec_powf_param_3
)
{
	.reg .pred 	%p<30>;
	.reg .f32 	%f<103>;
	.reg .b32 	%r<36>;


	ld.param.u32 	%r6, [vec_powf_param_0];
	ld.param.u32 	%r3, [vec_powf_param_1];
	ld.param.u32 	%r4, [vec_powf_param_2];
	ld.param.u32 	%r5, [vec_powf_param_3];
	mov.u32 	%r7, %tid.x;
	mov.u32 	%r8, %ntid.x;
	mov.u32 	%r9, %ctaid.x;
	mad.lo.s32 	%r1, %r8, %r9, %r7;
	setp.ge.u32	%p2, %r1, %r6;
	@%p2 bra 	BB80_15;

	cvta.to.global.u32 	%r10, %r4;
	cvta.to.global.u32 	%r2, %r3;
	shl.b32 	%r11, %r1, 2;
	add.s32 	%r12, %r10, %r11;
	cvta.to.global.u32 	%r13, %r5;
	add.s32 	%r14, %r13, %r11;
	ld.global.f32 	%f1, [%r14];
	mul.f32 	%f22, %f1, 0f3F000000;
	cvt.rzi.f32.f32	%f23, %f22;
	fma.rn.f32 	%f24, %f23, 0fC0000000, %f1;
	abs.f32 	%f2, %f24;
	ld.global.f32 	%f3, [%r12];
	abs.f32 	%f4, %f3;
	setp.lt.f32	%p3, %f4, 0f00800000;
	mul.f32 	%f25, %f4, 0f4B800000;
	selp.f32	%f26, 0fC3170000, 0fC2FE0000, %p3;
	selp.f32	%f27, %f25, %f4, %p3;
	mov.b32 	 %r15, %f27;
	and.b32  	%r16, %r15, 8388607;
	or.b32  	%r17, %r16, 1065353216;
	mov.b32 	 %f28, %r17;
	shr.u32 	%r18, %r15, 23;
	cvt.rn.f32.u32	%f29, %r18;
	add.f32 	%f30, %f26, %f29;
	setp.gt.f32	%p4, %f28, 0f3FB504F3;
	mul.f32 	%f31, %f28, 0f3F000000;
	add.f32 	%f32, %f30, 0f3F800000;
	selp.f32	%f33, %f31, %f28, %p4;
	selp.f32	%f34, %f32, %f30, %p4;
	add.f32 	%f35, %f33, 0fBF800000;
	add.f32 	%f19, %f33, 0f3F800000;
	// inline asm
	rcp.approx.ftz.f32 %f18,%f19;
	// inline asm
	add.f32 	%f36, %f35, %f35;
	mul.f32 	%f37, %f18, %f36;
	mul.f32 	%f38, %f37, %f37;
	mov.f32 	%f39, 0f3C4CAF63;
	mov.f32 	%f40, 0f3B18F0FE;
	fma.rn.f32 	%f41, %f40, %f38, %f39;
	mov.f32 	%f42, 0f3DAAAABD;
	fma.rn.f32 	%f43, %f41, %f38, %f42;
	mul.rn.f32 	%f44, %f43, %f38;
	mul.rn.f32 	%f45, %f44, %f37;
	sub.f32 	%f46, %f35, %f37;
	neg.f32 	%f47, %f37;
	add.f32 	%f48, %f46, %f46;
	fma.rn.f32 	%f49, %f47, %f35, %f48;
	mul.rn.f32 	%f50, %f18, %f49;
	add.f32 	%f51, %f45, %f37;
	sub.f32 	%f52, %f37, %f51;
	add.f32 	%f53, %f45, %f52;
	add.f32 	%f54, %f50, %f53;
	add.f32 	%f55, %f51, %f54;
	sub.f32 	%f56, %f51, %f55;
	add.f32 	%f57, %f54, %f56;
	mov.f32 	%f58, 0f3F317200;
	mul.rn.f32 	%f59, %f34, %f58;
	mov.f32 	%f60, 0f35BFBE8E;
	mul.rn.f32 	%f61, %f34, %f60;
	add.f32 	%f62, %f59, %f55;
	sub.f32 	%f63, %f59, %f62;
	add.f32 	%f64, %f55, %f63;
	add.f32 	%f65, %f57, %f64;
	add.f32 	%f66, %f61, %f65;
	add.f32 	%f67, %f62, %f66;
	sub.f32 	%f68, %f62, %f67;
	add.f32 	%f69, %f66, %f68;
	abs.f32 	%f5, %f1;
	setp.gt.f32	%p5, %f5, 0f77F684DF;
	mul.f32 	%f70, %f1, 0f39000000;
	selp.f32	%f71, %f70, %f1, %p5;
	mul.rn.f32 	%f72, %f71, %f67;
	neg.f32 	%f73, %f72;
	fma.rn.f32 	%f74, %f71, %f67, %f73;
	fma.rn.f32 	%f75, %f71, %f69, %f74;
	mov.f32 	%f76, 0f00000000;
	fma.rn.f32 	%f77, %f76, %f67, %f75;
	add.rn.f32 	%f78, %f72, %f77;
	neg.f32 	%f79, %f78;
	add.rn.f32 	%f80, %f72, %f79;
	add.rn.f32 	%f81, %f80, %f77;
	mov.b32 	 %r19, %f78;
	setp.eq.s32	%p6, %r19, 1118925336;
	add.s32 	%r20, %r19, -1;
	mov.b32 	 %f82, %r20;
	add.f32 	%f83, %f81, 0f37000000;
	selp.f32	%f84, %f82, %f78, %p6;
	selp.f32	%f6, %f83, %f81, %p6;
	mul.f32 	%f85, %f84, 0f3FB8AA3B;
	cvt.rzi.f32.f32	%f86, %f85;
	mov.f32 	%f87, 0fBF317200;
	fma.rn.f32 	%f88, %f86, %f87, %f84;
	mov.f32 	%f89, 0fB5BFBE8E;
	fma.rn.f32 	%f90, %f86, %f89, %f88;
	mul.f32 	%f21, %f90, 0f3FB8AA3B;
	// inline asm
	ex2.approx.ftz.f32 %f20,%f21;
	// inline asm
	add.f32 	%f91, %f86, 0f00000000;
	ex2.approx.f32 	%f92, %f91;
	mul.f32 	%f93, %f20, %f92;
	setp.lt.f32	%p7, %f84, 0fC2D20000;
	selp.f32	%f94, 0f00000000, %f93, %p7;
	setp.gt.f32	%p8, %f84, 0f42D20000;
	selp.f32	%f101, 0f7F800000, %f94, %p8;
	setp.eq.f32	%p9, %f101, 0f7F800000;
	@%p9 bra 	BB80_3;

	fma.rn.f32 	%f101, %f101, %f6, %f101;

BB80_3:
	setp.lt.f32	%p10, %f3, 0f00000000;
	setp.eq.f32	%p11, %f2, 0f3F800000;
	and.pred  	%p1, %p10, %p11;
	mov.b32 	 %r21, %f101;
	xor.b32  	%r22, %r21, -2147483648;
	mov.b32 	 %f95, %r22;
	selp.f32	%f102, %f95, %f101, %p1;
	setp.eq.f32	%p12, %f3, 0f00000000;
	@%p12 bra 	BB80_6;
	bra.uni 	BB80_4;

BB80_6:
	add.f32 	%f97, %f3, %f3;
	mov.b32 	 %r23, %f97;
	selp.b32	%r24, %r23, 0, %p11;
	or.b32  	%r25, %r24, 2139095040;
	setp.lt.f32	%p16, %f1, 0f00000000;
	selp.b32	%r26, %r25, %r24, %p16;
	mov.b32 	 %f102, %r26;
	bra.uni 	BB80_7;

BB80_4:
	setp.geu.f32	%p13, %f3, 0f00000000;
	@%p13 bra 	BB80_7;

	cvt.rzi.f32.f32	%f96, %f1;
	setp.neu.f32	%p14, %f96, %f1;
	selp.f32	%f102, 0f7FFFFFFF, %f102, %p14;

BB80_7:
	add.f32 	%f98, %f4, %f5;
	mov.b32 	 %r27, %f98;
	setp.lt.s32	%p17, %r27, 2139095040;
	@%p17 bra 	BB80_14;

	setp.gtu.f32	%p18, %f4, 0f7F800000;
	setp.gtu.f32	%p19, %f5, 0f7F800000;
	or.pred  	%p20, %p18, %p19;
	@%p20 bra 	BB80_13;
	bra.uni 	BB80_9;

BB80_13:
	add.f32 	%f102, %f3, %f1;
	bra.uni 	BB80_14;

BB80_9:
	setp.eq.f32	%p21, %f5, 0f7F800000;
	@%p21 bra 	BB80_12;
	bra.uni 	BB80_10;

BB80_12:
	setp.gt.f32	%p24, %f4, 0f3F800000;
	selp.b32	%r31, 2139095040, 0, %p24;
	xor.b32  	%r32, %r31, 2139095040;
	setp.lt.f32	%p25, %f1, 0f00000000;
	selp.b32	%r33, %r32, %r31, %p25;
	mov.b32 	 %f99, %r33;
	setp.eq.f32	%p26, %f3, 0fBF800000;
	selp.f32	%f102, 0f3F800000, %f99, %p26;
	bra.uni 	BB80_14;

BB80_10:
	setp.neu.f32	%p22, %f4, 0f7F800000;
	@%p22 bra 	BB80_14;

	setp.ltu.f32	%p23, %f1, 0f00000000;
	selp.b32	%r28, 0, 2139095040, %p23;
	or.b32  	%r29, %r28, -2147483648;
	selp.b32	%r30, %r29, %r28, %p1;
	mov.b32 	 %f102, %r30;

BB80_14:
	setp.eq.f32	%p27, %f1, 0f00000000;
	setp.eq.f32	%p28, %f3, 0f3F800000;
	or.pred  	%p29, %p28, %p27;
	selp.f32	%f100, 0f3F800000, %f102, %p29;
	add.s32 	%r35, %r2, %r11;
	st.global.f32 	[%r35], %f100;

BB80_15:
	ret;
}

	// .globl	vec_remainderf
.visible .entry vec_remainderf(
	.param .u32 vec_remainderf_param_0,
	.param .u32 vec_remainderf_param_1,
	.param .u32 vec_remainderf_param_2,
	.param .u32 vec_remainderf_param_3
)
{
	.reg .pred 	%p<29>;
	.reg .f32 	%f<52>;
	.reg .b32 	%r<39>;


	ld.param.u32 	%r9, [vec_remainderf_param_0];
	ld.param.u32 	%r6, [vec_remainderf_param_1];
	ld.param.u32 	%r7, [vec_remainderf_param_2];
	ld.param.u32 	%r8, [vec_remainderf_param_3];
	mov.u32 	%r10, %tid.x;
	mov.u32 	%r11, %ntid.x;
	mov.u32 	%r12, %ctaid.x;
	mad.lo.s32 	%r1, %r11, %r12, %r10;
	setp.ge.u32	%p3, %r1, %r9;
	@%p3 bra 	BB81_20;

	cvta.to.global.u32 	%r13, %r7;
	shl.b32 	%r14, %r1, 2;
	add.s32 	%r15, %r13, %r14;
	cvta.to.global.u32 	%r16, %r8;
	add.s32 	%r17, %r16, %r14;
	ld.global.f32 	%f1, [%r15];
	abs.f32 	%f2, %f1;
	ld.global.f32 	%f3, [%r17];
	abs.f32 	%f4, %f3;
	setp.gtu.f32	%p4, %f2, 0f7F800000;
	setp.gtu.f32	%p5, %f4, 0f7F800000;
	or.pred  	%p6, %p4, %p5;
	@%p6 bra 	BB81_18;
	bra.uni 	BB81_2;

BB81_18:
	add.f32 	%f51, %f1, %f3;
	bra.uni 	BB81_19;

BB81_2:
	setp.eq.f32	%p7, %f2, 0f7F800000;
	setp.eq.f32	%p8, %f4, 0f00000000;
	or.pred  	%p9, %p7, %p8;
	mov.f32 	%f51, 0f7FFFFFFF;
	@%p9 bra 	BB81_19;

	setp.ltu.f32	%p10, %f2, %f4;
	mov.u32 	%r38, 0;
	mov.f32 	%f49, %f2;
	@%p10 bra 	BB81_15;

	lg2.approx.f32 	%f22, %f2;
	cvt.rzi.s32.f32	%r19, %f22;
	lg2.approx.f32 	%f23, %f4;
	cvt.rzi.s32.f32	%r20, %f23;
	sub.s32 	%r2, %r19, %r20;
	abs.f32 	%f5, %f4;
	setp.eq.f32	%p11, %f5, 0f00000000;
	setp.eq.f32	%p12, %f5, 0f7F800000;
	or.pred  	%p13, %p11, %p12;
	setp.eq.s32	%p14, %r19, %r20;
	or.pred  	%p15, %p13, %p14;
	@%p15 bra 	BB81_10;
	bra.uni 	BB81_5;

BB81_10:
	setp.leu.f32	%p18, %f5, 0f00000000;
	add.f32 	%f38, %f4, %f4;
	selp.f32	%f44, %f38, %f4, %p18;
	bra.uni 	BB81_11;

BB81_5:
	abs.s32 	%r3, %r2;
	setp.lt.s32	%p16, %r3, 126;
	@%p16 bra 	BB81_9;
	bra.uni 	BB81_6;

BB81_9:
	cvt.rn.f32.s32	%f37, %r2;
	// inline asm
	ex2.approx.ftz.f32 %f36,%f37;
	// inline asm
	mul.f32 	%f44, %f4, %f36;
	bra.uni 	BB81_11;

BB81_6:
	setp.lt.s32	%p17, %r3, 252;
	@%p17 bra 	BB81_8;
	bra.uni 	BB81_7;

BB81_8:
	shr.u32 	%r26, %r2, 31;
	add.s32 	%r27, %r2, %r26;
	shr.s32 	%r28, %r27, 1;
	cvt.rn.f32.s32	%f32, %r28;
	// inline asm
	ex2.approx.ftz.f32 %f31,%f32;
	// inline asm
	mul.f32 	%f35, %f4, %f31;
	sub.s32 	%r29, %r2, %r28;
	cvt.rn.f32.s32	%f34, %r29;
	// inline asm
	ex2.approx.ftz.f32 %f33,%f34;
	// inline asm
	mul.f32 	%f44, %f35, %f33;
	bra.uni 	BB81_11;

BB81_7:
	shr.s32 	%r21, %r2, 31;
	shr.u32 	%r22, %r21, 30;
	add.s32 	%r23, %r2, %r22;
	shr.s32 	%r24, %r23, 2;
	cvt.rn.f32.s32	%f25, %r24;
	// inline asm
	ex2.approx.ftz.f32 %f24,%f25;
	// inline asm
	mul.f32 	%f28, %f4, %f24;
	mul.f32 	%f29, %f24, %f28;
	mul.f32 	%f30, %f24, %f29;
	mad.lo.s32 	%r25, %r24, -3, %r2;
	cvt.rn.f32.s32	%f27, %r25;
	// inline asm
	ex2.approx.ftz.f32 %f26,%f27;
	// inline asm
	mul.f32 	%f44, %f26, %f30;

BB81_11:
	mul.f32 	%f39, %f2, 0f3F000000;
	setp.gtu.f32	%p19, %f44, %f39;
	add.f32 	%f40, %f44, %f44;
	selp.f32	%f45, %f44, %f40, %p19;
	setp.ltu.f32	%p20, %f45, %f4;
	mov.f32 	%f46, %f2;
	mov.f32 	%f49, %f46;
	@%p20 bra 	BB81_15;

	mov.f32 	%f50, %f2;

BB81_13:
	mov.f32 	%f12, %f50;
	mov.f32 	%f13, %f45;
	sub.f32 	%f41, %f12, %f13;
	setp.ltu.f32	%p21, %f12, %f13;
	selp.f32	%f50, %f12, %f41, %p21;
	mul.f32 	%f45, %f13, 0f3F000000;
	setp.ge.f32	%p22, %f45, %f4;
	@%p22 bra 	BB81_13;

	setp.ge.f32	%p23, %f12, %f13;
	selp.u32	%r38, 1, 0, %p23;
	mov.f32 	%f49, %f50;

BB81_15:
	add.f32 	%f17, %f49, %f49;
	setp.gt.f32	%p25, %f17, %f4;
	mov.pred 	%p28, -1;
	@%p25 bra 	BB81_17;

	setp.eq.f32	%p26, %f17, %f4;
	setp.ne.s32	%p27, %r38, 0;
	and.pred  	%p28, %p26, %p27;

BB81_17:
	sub.f32 	%f42, %f49, %f4;
	selp.f32	%f43, %f42, %f49, %p28;
	mov.b32 	 %r31, %f1;
	and.b32  	%r32, %r31, -2147483648;
	mov.b32 	 %r33, %f43;
	xor.b32  	%r34, %r33, %r32;
	mov.b32 	 %f51, %r34;

BB81_19:
	cvta.to.global.u32 	%r35, %r6;
	add.s32 	%r37, %r35, %r14;
	st.global.f32 	[%r37], %f51;

BB81_20:
	ret;
}






© 2015 - 2025 Weber Informatics LLC | Privacy Policy