<?xml version="1.0" encoding="UTF-8"?><rss version="2.0"
	xmlns:media="http://search.yahoo.com/mrss/"
	xmlns:content="http://purl.org/rss/1.0/modules/content/"
	xmlns:wfw="http://wellformedweb.org/CommentAPI/"
	xmlns:dc="http://purl.org/dc/elements/1.1/"
	xmlns:atom="http://www.w3.org/2005/Atom"
	xmlns:sy="http://purl.org/rss/1.0/modules/syndication/"
	xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
	>

<channel>
	<title>推理 - 四号程序员</title>
	<atom:link href="https://www.coder4.com/archives/tag/%E6%8E%A8%E7%90%86/feed" rel="self" type="application/rss+xml" />
	<link>https://www.coder4.com</link>
	<description>Keep It Simple and Stupid</description>
	<lastBuildDate>Mon, 05 May 2025 23:16:48 +0000</lastBuildDate>
	<language>en-US</language>
	<sy:updatePeriod>
	hourly	</sy:updatePeriod>
	<sy:updateFrequency>
	1	</sy:updateFrequency>
	<generator>https://wordpress.org/?v=6.8.3</generator>
	<item>
		<title>[转]rk3588使用npu进行模型转换和推理，加速AI应用落地</title>
		<link>https://www.coder4.com/archives/8208</link>
					<comments>https://www.coder4.com/archives/8208#respond</comments>
		
		<dc:creator><![CDATA[coder4]]></dc:creator>
		<pubDate>Wed, 23 Oct 2024 04:11:47 +0000</pubDate>
				<category><![CDATA[Linux]]></category>
		<category><![CDATA[机器学习]]></category>
		<category><![CDATA[npu]]></category>
		<category><![CDATA[rknn]]></category>
		<category><![CDATA[推理]]></category>
		<category><![CDATA[模型]]></category>
		<category><![CDATA[转换]]></category>
		<guid isPermaLink="false">https://www.coder4.com/?p=8208</guid>

					<description><![CDATA[转载自：《rk3588使用npu进行模型转换和推理，加速AI应用落地》 🍉零、引言 博主在瑞芯微RK3588的开发板上跑了deepsort跟踪算法，从IP相机中的server拉取rtsp视频流，但是fps只有1.2，和放PPT一样卡顿，无法投入实际应用。本来想使用tensorrt进行加速推理，但是前提需要cuda，rk的板子上都是Arm的手机gpu，没有Nvidia的cuda，所以这条路行不通。那么转过来，使用开发板自带的NPU进行加速推理，岂不是更加可行，而且它本身就是深度学习嵌入式板子，[......] 继续阅读]]></description>
		
					<wfw:commentRss>https://www.coder4.com/archives/8208/feed</wfw:commentRss>
			<slash:comments>0</slash:comments>
		
		
			</item>
		<item>
		<title>MobileNetV3的oxxruntime推理(Python)</title>
		<link>https://www.coder4.com/archives/8175</link>
					<comments>https://www.coder4.com/archives/8175#respond</comments>
		
		<dc:creator><![CDATA[coder4]]></dc:creator>
		<pubDate>Fri, 07 Jun 2024 11:25:01 +0000</pubDate>
				<category><![CDATA[Python]]></category>
		<category><![CDATA[onnx]]></category>
		<category><![CDATA[推理]]></category>
		<guid isPermaLink="false">https://www.coder4.com/?p=8175</guid>

					<description><![CDATA[import cv2 import numpy as np import onnxruntime as ort def load_model(model_path): """加载ONNX模型""" session = ort.InferenceSession(model_path) return session def preprocess_image(image_path): image = cv2.imread(image_path)[......] 继续阅读]]></description>
		
					<wfw:commentRss>https://www.coder4.com/archives/8175/feed</wfw:commentRss>
			<slash:comments>0</slash:comments>
		
		
			</item>
		<item>
		<title>MobileNet的onnxruntime推理(C++)</title>
		<link>https://www.coder4.com/archives/8174</link>
					<comments>https://www.coder4.com/archives/8174#respond</comments>
		
		<dc:creator><![CDATA[coder4]]></dc:creator>
		<pubDate>Fri, 07 Jun 2024 11:23:08 +0000</pubDate>
				<category><![CDATA[C && C++]]></category>
		<category><![CDATA[onnx]]></category>
		<category><![CDATA[推理]]></category>
		<guid isPermaLink="false">https://www.coder4.com/?p=8174</guid>

					<description><![CDATA[#include &#60;opencv2/opencv.hpp&#62; #include &#60;onnxruntime/onnxruntime_cxx_api.h&#62; #include &#60;vector&#62; #include &#60;iostream&#62; int main() { // load onnx model Ort::Env env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, "test"[......] 继续阅读]]></description>
		
					<wfw:commentRss>https://www.coder4.com/archives/8174/feed</wfw:commentRss>
			<slash:comments>0</slash:comments>
		
		
			</item>
		<item>
		<title>MobileNet v3的Java推理</title>
		<link>https://www.coder4.com/archives/8170</link>
					<comments>https://www.coder4.com/archives/8170#respond</comments>
		
		<dc:creator><![CDATA[coder4]]></dc:creator>
		<pubDate>Thu, 16 May 2024 12:04:32 +0000</pubDate>
				<category><![CDATA[Linux]]></category>
		<category><![CDATA[mobilenet v3]]></category>
		<category><![CDATA[onnx]]></category>
		<category><![CDATA[图像分类]]></category>
		<category><![CDATA[推理]]></category>
		<guid isPermaLink="false">https://www.coder4.com/?p=8170</guid>

					<description><![CDATA[模型是MobileNet v3 small + 微调分类器，pyTorch训练后导出onnx模型 pom： &#60;dependencies&#62; &#60;dependency&#62; &#60;groupId&#62;com.microsoft.onnxruntime&#60;/groupId&#62; &#60;artifactId&#62;onnxruntime&#60;/artifactId&#62; &#60;version&#038;gt[......] 继续阅读]]></description>
		
					<wfw:commentRss>https://www.coder4.com/archives/8170/feed</wfw:commentRss>
			<slash:comments>0</slash:comments>
		
		
			</item>
		<item>
		<title>4红牌、4蓝牌推理</title>
		<link>https://www.coder4.com/archives/3322</link>
					<comments>https://www.coder4.com/archives/3322#respond</comments>
		
		<dc:creator><![CDATA[coder4]]></dc:creator>
		<pubDate>Sat, 02 Jun 2012 12:29:56 +0000</pubDate>
				<category><![CDATA[算法&数据结构]]></category>
		<category><![CDATA[推理]]></category>
		<category><![CDATA[红牌]]></category>
		<category><![CDATA[蓝牌]]></category>
		<guid isPermaLink="false">http://www.coder4.com/?p=3322</guid>

					<description><![CDATA[有4 张红色的牌和4 张蓝色的牌，主持人先拿任意两张，再分别在 A、B、C 三人额头上贴任意两张牌，A、 B、C 三人都可以看见其余两人额头上的牌，看完后让他们猜自己额头上是什么颜色的牌，A 说不知道，B 说不知道，C 说不知道，然后 A 说知道了。 请教如何推理，A 是怎么知道的。如果用程序，又怎么实现呢？ 以下推理很NB，转载自：http://topic.csdn.net/u/20100426/11/b47952ac-47f6-45bd-8874-6d36f8996870.htm[......] 继续阅读]]></description>
		
					<wfw:commentRss>https://www.coder4.com/archives/3322/feed</wfw:commentRss>
			<slash:comments>0</slash:comments>
		
		
			</item>
	</channel>
</rss>
