https://www.cnblogs.com/asmer-stone/p/5537111.html

#error : Xiron Platform Abstraction Layer - Win32 - Microsoft Visual Studio versions above 2010 (10.0) are not supported! 解决Ҏ
OpenNI1.5 VS2013配置环境后，~译会出现这个错误：

#ifndef RC_INVOKED
#if _MSC_VER < 1300 // Before MSVC7 (2003)
#error Xiron Platform Abstraction Layer - Win32 - Microsoft Visual Studio versions below 2003 (7.0) are not supported!
#endif
#if _MSC_VER > 1600 // After MSVC8 (2010)
#error Xiron Platform Abstraction Layer - Win32 - Microsoft Visual Studio versions above 2010 (10.0) are not supported!
#endif
#endif

zmj 2018-12-26 17:39 发表评论

void CImageObj::Total_Variation(int iter, double dt, double epsilon, double lambda)
{
int i, j;
int nx = m_width, ny = m_height;
double ep2 = epsilon * epsilon;

double** I_t = NewDoubleMatrix(nx, ny);
double** I_tmp = NewDoubleMatrix(nx, ny);
for (i = 0; i < ny; i++)
for (j = 0; j < nx; j++)
I_t[i][j] = I_tmp[i][j] = (double)m_imgData[i][j];

for (int t = 0; t < iter; t++)
{
for (i = 0; i < ny; i++)
{
for (j = 0; j < nx; j++)
{
int iUp = i - 1, iDown = i + 1;
int jLeft = j - 1, jRight = j + 1;    // 边界处理
if (0 == i) iUp = i; if (ny - 1 == i) iDown = i;
if (0 == j) jLeft = j; if (nx - 1 == j) jRight = j;

double tmp_x = (I_t[i][jRight] - I_t[i][jLeft]) / 2.0;
double tmp_y = (I_t[iDown][j] - I_t[iUp][j]) / 2.0;
double tmp_xx = I_t[i][jRight] + I_t[i][jLeft] - 2 * I_t[i][j];
double tmp_yy = I_t[iDown][j] + I_t[iUp][j] - 2 * I_t[i][j];
double tmp_xy = (I_t[iDown][jRight] + I_t[iUp][jLeft] - I_t[iUp][jRight] - I_t[iDown][jLeft]) / 4.0;
double tmp_num = tmp_yy * (tmp_x * tmp_x + ep2) + tmp_xx * (tmp_y * tmp_y + ep2) - 2 * tmp_x * tmp_y * tmp_xy;
double tmp_den = pow(tmp_x * tmp_x + tmp_y * tmp_y + ep2, 1.5);

I_tmp[i][j] += dt*(tmp_num / tmp_den + lambda*(m_imgData[i][j] - I_t[i][j]));
}
}  // 一ơP?/div>

for (i = 0; i < ny; i++)
for (j = 0; j < nx; j++)
{
I_t[i][j] = I_tmp[i][j];
}

} // q代l束

// l图像赋?/div>
for (i = 0; i < ny; i++)
for (j = 0; j < nx; j++)
{
double tmp = I_t[i][j];
tmp = max(0, min(tmp, 255));
m_imgData[i][j] = (unsigned char)tmp;
}

DeleteDoubleMatrix(I_t, nx, ny);
DeleteDoubleMatrix(I_tmp, nx, ny);
}
---------------------

zmj 2018-11-29 10:47 发表评论

1、用Acrobat打开一个Q?a target="_blank" style="color: #3f88bf; text-decoration-line: none;">PDF文Q?/p>

4、在跛_?#8221;d自定义页面大?#8220;中，如图C添?#8220;6?#8221;U张cdQƈ保存

zmj 2018-11-23 09:09 发表评论
]]>

• 证明末动能小于初动能Q?

׃用T表动?/p>

? i表内Qe表外

U个?/p>

q是动能定理

• 动量守恒Q?/li>

zmj 2018-11-20 16:32 发表评论
]]>

zmj 2018-11-14 09:30 发表评论
]]>

zmj 2018-10-24 15:25 发表评论
]]>

Qt为我们提供了几个可以用于U程Sleep的函敎ͼ分别是：

void QThread::sleep ( unsigned long secs )   [static protected]

void QThread::msleep ( unsigned long msecs )   [static protected]

void QThread::usleep ( unsigned long usecs )   [static protected]

sleep的单位分别是U、毫U、微U?/span>

1.    processEvents

while( QTime::currentTime() < dieTime )

QCoreApplication::processEvents(QEventLoop::AllEvents, 100);

2.        QWaitCondition

QWaitCondition wait;

wait.wait(time);

wait的单位是millisecondsQ但?/span>wait?/span>sleep的作用是不同的?/span>

sleep()Ҏ是ɾU程停止一D|间的Ҏ。在sleep 旉间隔期满后，U程不一定立x复执行。这是因为在那个时刻Q其它线E可能正在运行而且没有被调度ؓ攑ּ执行Q除?/span>

(a)“醒来”的线E具有更高的优先U?/span>

(b)正在q行的线E因为其它原因而阻塞?/span>

wait()会调用它的U程暂停执行Q被调对象进入等待状态，直到被唤醒或{待旉到?/span>

3.        查看sleep的源代码Q?/span>Qt?/span>win下和*nix下的sleep函数?/span>

Windows下的sleep的代码ؓQ?/span>

{

::Sleep(secs * 1000);

}

sleep的单位ؓU?/span>

*nix?/span>sleep的代码ؓQ?/span>

{

struct timeval tv;

gettimeofday(&tv, 0);

struct timespec ti;

ti.tv_sec = tv.tv_sec + secs;

ti.tv_nsec = (tv.tv_usec * 1000);

}

{

}

---------------------

zmj 2018-10-09 17:33 发表评论
//www.digitgolf.com/news/golf/84.html

~辑摘要

认识中通数字高夫

北京中通数字高夫以国安行仿真模拟实验室为技术依托，主要从事计算Z真Y件开发及相关工程的设计、实施和服务Q是国内最早研发、生产和销售模拟高夫pȝ的高U技公司及中国最大的高尔夫模拟器销售商?/p>

Z北京中通数字高夫的多q模拟系l的开发实施经验，EDH公司与北京中通数字高夫在中国进行了q泛的合作，共同成立了北京数字化球场制作研发中心、Y件研发中心。EDH公司指定中通数字高夫公司为其全球领先的FlightScope高尔夫雷达品在中国市场的M理，全面负责FlightScope相关产品在中国的销售、安装及售后服务?nbsp;

# FlightScopeX2雯探测仪是中通高夫从EDH公司引进的全球最先进的高夫雯Q这是世界上目前唯一使用军用雯技术的高尔夫雷达品。它可以实现无线q接Q摆׃必须与台式电脑连接的弊端。与iphone、ipad、ipod、android手机{便备连接，让监更单、便捗X2可以满室内球的监测Q也可以在户外用，q年来国外很多职业球员更青睐X2Q因为它可以在户外随旉地地实现高尔夫球的追t及挥杆监测?/span> X2可以_ևq踪球的飞行整个轨迹Qƈ准确量球倒旋、侧旋、距R杆面角度、挥杆速度、杆头速度 、水q球杆\?nbsp;、垂直球杆\径、挥杆\Uѝ推杆加速度 、杆面高度（d面） 、脚或脚后跟打M|等多种数据。在球具评上，它能q速识别球杆材质，分析挥杆数据Q是高尔夫教l、球友、球具工坊的好帮手?/p>     FlightScope推出C代雷达品X2Q?nbsp;您只要通过iPhone或者iPadp观看 和测量自q挥杆、球速以及杆速等?nbsp;敎ͼ摆脱与有U电脑连接才能用的~?nbsp;炏V更yQ更方便?br />便携式雷达X2带l您更进一步的_և 量Q?/span>    FlightScopeX2 优势   1、电动水q    2?商业可充늚镍氢甉|     3?廉?0时甉|寿命    4 、无U网路（Wi-Fi无线Q连?nbsp;    5 、蓝牙网l连?nbsp;    6?可连接电视等昄?nbsp;   7 、兼容Android安卓pȝ手机、iPad、iPhone{便?nbsp;   三维多普勒跟t雷辑ַ作原?/strong> ◆ 振荡?nbsp;生成微L信号 天线 辐射信号   ◆ q动物体 q扰信号 反映能量 q动转变 ◆ 相控阵天U?nbsp;反馈信?nbsp;比较怽    探测技术原?/strong>   FlightScope?D球跟t技术，成功的秘诀在于已经甌专利的相控阵跟踪技术在讑֤中的使用。FlightScope 3Dq踪装置主要在于它的雯技术（采用军用雯技术）?q意味着您将获得比Q何同cM品更加精准的球测量和跟踪数据?如果您希望找己高夫挥杆的分析数据，希望得到自己ȝ后的反馈Q那么，三维多普勒跟t雷达是您最好的选择?l合性击球监视器 FlightScope X2 不仅是击球监器QX2代表了击球监视器产品cȝ重大H破Q它集多U特性于一w，是非常吸引h的品?·作ؓ一U室内击球监器QX2提供了关于球杆和球的q泛而且准确的数据?/p> ·X2完全是一U?D装置Q比2Dpȝ的功能更强?/p> ·在户外，X2作ؓ一U长距离弚w跟踪装置Q采用的是许多OEM和打高尔夫组l用的相矩阵雷达技术?/p> ·但是成本相对较低?·X2无线|\QWi-Fi无线Q连接?nbsp; ·X2兼容Android安卓pȝ手机、iPad、iPhone{便?X2可监多Ҏ?/span>   X2可以直接量Q?/strong>·球的周{?nbsp;·球的旋{ ·球的上升 ·球的垂直发球角度 ·球的高度 ·球的程 ·杆头速度 ·水^球杆路径 ·垂直球杆路径·挥杆路线 ·杆面扣角 ·推杆速度 ·推杆加速度 ·杆面高度Q离地面Q?nbsp;·脚趾或脚后跟打击位置{多U数据?/span>X2可以_և的测量出您的挥杆杆速、球速采用电感应雷达技术，可以准确鉴别高尔夫球的倒旋、侧?/span>。它能够在几U钟内进行设定，q且在更短的旉内进行校准。对多次U录l果q行q_Q就是高夫球者用某特定推杆挥杆的结果。这U品的灉|度可以指导者或者球杆调试h员确定杆面扣角或者推杆头方向和加速度每分钟的变化。在l习模式下，可以量出您的挥杆加速度Q从而进行校准? X2_և监测Q让l球更有?/span>   用户认知   2010q美国PGAU季博览会最x产品    2011q美国PGA博览会官Ҏ定高夫q踪产品    圆石滩高夫学院唯一指定的高夫q踪用品    圣\易斯Golf Discount店面指定用品   2012qBMW PGA锦标赛官方用?nbsp;   Titleist大多数店面都设有Z界最好球员和高尔夫爱好者提供的高尔夫帮?#8212;—X2?nbsp;     讉K中通数字高夫公司|站Q//golf.digitgolf.com/        x中通数字高公司微博，x了解行业动?/strong>Q//weibo.com/digitgolf        阅读中通数字高夫新浪博客Q?/strong>//blog.sina.com.cn/u/2659198553     联系中通数字高夫公司Q?/strong>       销售电话：010-6267-0916   010-6267-0523   ?nbsp;服：010-6267-0663      |?nbsp;址Q//golf.digitgolf.com/       ?nbsp;：winni@digitgolf.com

zmj 2018-09-14 17:03 发表评论
]]>

zmj 2018-09-13 08:56 发表评论
]]>

Z以上理解Q我们不隄解，虽然中西Ҏ学的h非常cMQ都是基于对于生zd践中遇到的问题进行归U_理性的处理Q然而中国数学的发展一直在延箋前h的研I传l，即以直观现象或实例ؓ基础Qƈ加以q用?span lang="EN-US">

需要指出的是，西方q现代数学发展（?span lang="EN-US" style="color: #993366; font-size: 18pt;">16世纪开始）Q与西方CU学发展的传l，q是直接承从古希腊时期开始，由几何原本奠定下的公理化研究Ҏ。事实上当我们考察无论是近代数学还是物理学的发展之初，都基于对l验事实的依赖和大胆的猜与惌。从q一点上Q中西方差异在于Q西方率先用一般的Q抽象的方式来解释特D问题，坚信世界所有的现象可以被统一在数中。不仅如此，他们善于从复杂的现象中归U_“优美的性质”Qƈ且坚信优，单的理论是世界的l极解释。所?span lang="EN-US" style="color: #993366; font-size: 18pt;">16世纪初，数学与科学的蓬勃发展中，无不透露出对于这U朴素哲学观的诏彅R比如开普勒Q早期的天文学，数学的探索者，在其重要著作《世界的和谐》中指出Q将天文学与音乐完美l合在一L可能性，q且被看作是世界的和谐。而这U朴素认识论正是西方q现代科学的开端?span lang="EN-US">

W二个重要问题是数学体系的徏立和推演。必L认的一ҎQ在体系的徏立和推演上，中西Ҏ学早早地分道扬镳。以《九章算术》ؓ例，从内容上Q中国古代数学问题的核心在于对实际问题的解释和再利用Q故而卷分类?#8220;方田”Q?#8220;_米”Q?#8220;衰分”“广”Q?#8220;商功”{等实际生活场景q行分类。但是从数学内容上，九章术不仅处理了大量复杂问题，而且包含了重要的哲学思想Q如极限Q分Ԍl合{）。最为流传的例子?#8220;暅原理”Q即判断两个物体的体U相同，可用“q势既同Q则U不容异” q一原则q行判断Qƈ且利用这个原理求?#8220;牟合方盖”体积Q所?#8220;牟合方盖”是指相同的两个圆柱正交围出的立体形）而这个立体Ş的体U求解是无法用初{数学解冻I严格来说应用微U分才能完全解决。而从其论qCQ我们能看到朴素的积分思想Q也展现了古代数学家杰出的数学直觉。同Ӟ在研I的领域上有极大的弹性，从初{代敎ͼ初等数论到初{几何学Q基于现代数学的观点Q中的各个问题都有涉猎，q且l出了认识解决问题的重要思考。如卷八方程的开问题，卛_用方E组思想解决问题Q而以西方数学观点来看Q所利用的正是高斯消元法? 再如qؓ乐道的中国剩余定理，以及勾股定理Q涉及到了初{数学中大量重要核心命题。但是，从推gQ我们所l出的叙q性解释ؓ主，而ƈ非推导和计算。事实上Q在《九章算术》中Q只有遇到实际例子和数公式上会q行计算Q而原理性内容作为理解出现? 在这U情况下Q数学的发展仅仅依靠极少数数学家不加证明的洞察给步，对于体系的发展本w是致命的?span lang="EN-US">

而在西方Q数学的发展在初期也是大胆而富有想象力的，不过他们q没有停留于理解Q而是用相对细致的逻辑铄l成数学语言表达出来。数学的zd最早是在艺术家的手上复z，无论是绘画（透视L对射影几何的影响Q，音乐理论发展Q激发了Z的思维?span lang="EN-US" style="color: #993366; font-size: 18pt;">16,17世纪数学家的工作时常是不严}Q甚至也没有M数学公理基础的保证，如欧拉关于很多无IL数的处理Q都是基于一些朴素的认识Q从形式上获得灵感便不加证明的用。这个阶D늚数学Q思想上的推动力其实与中国古代数学家一P依赖于数学家的直觉进行研I。但是，之所以西Ҏ学在l历怼时期之后有爆炸性发展，原因有二。其一Q用抽象符号对数学q行描述Q得数学从实际问题中解攑և来，可以自由地组合，用简单方式刻d杂事物，发挥惌力，不再受制于具象。其二，相对中国古代数学Q西Ҏ学家更重视逻辑铄建立Q所以从因到果的q程更细_Z后的研究打下坚实铺垫。而我们|z乐道的数学公理化与抽象化的工作都不是在早期完成Q而是?span lang="EN-US" style="color: #993366; font-size: 18pt;">18世纪开始被来多数学安视。分析学的诞生事实上是数学家对于精l逻辑铄探烦Qؓ微积分打下坚实理论基Q同时揭CZ大量昄命题正确性的由来Q得h们对微积分体p认识更为深刅R与此同时物理学的蓬勃发展推动了大量计算技术的发展Q将微积分应用至实际研究中去变成了一U共识。进?span lang="EN-US" style="color: #993366; font-size: 18pt;">19世纪后，一斚w微积分席卷了几乎原来所有的初等数学分支Q另一斚wq代代数学的发展提供了抽象工P如群论，用以解释方程解而诞生的理论Q所以接下来发展的数学分支变成了论Q复变函数论Q几何学也焕发新的光芒。进?span lang="EN-US" style="color: #993366; font-size: 18pt;">20世纪后，无论是公理化q是抽象化的工作都达C峰Q数学家意识到各个数学分支间是有紧密联系的，拓扑学，集合论，抽象代数的发展将零碎的研I和数学分支|罗在相互联pȝQ统一的架构中Q真正成Z套体pR? 从这一点上Q中国古代数学传l是不可能演化出q样的体pȝQ其原因不仅仅在于认识论的不同，而是一个更深刻的问题?span lang="EN-US">

在《世界的重新创造》一文中提出Q由于中国文化ƈ未有真正的文化移植而导致中国科学的发展注定是不够好的论点，我是完全支持的。其一Q截然不同的文化交流和碰撞会l两个文明都带来新的启示。其二，西方的文字系l更适合抽象性思维Q而汉语由于其强大的组合能力和良好的直观性导致ƈ未生新的符Ll对数学q行描述Q故而也很难q行复杂抽象的计与推导。但是笔者认为，关键问题在于Qؓ何中国古代数学与西方数学体系Z没有发生撞。从历史时期上来_中国数学发展和西Ҏ学发展存在一个大的时间差。中国数学的研究发源早，公元三世U就已经有杰出的数学成果Q九章算术最早成书亦是此Ӟ由刘徽编U成书）。而古希腊数学虽然亦有杰出成就Q但是明昑֪响覆盖的范围q远不到东亚Q最多至两河域Q再传入印度境内Q而那已经时至公元八世U。唐宋数学高度发达，q且九章术逐渐演变Z亚的最重要的数学教U书。而在同时期的Ƨ洲却在l历中世U，以教会对世界解释权的垄断Z。一直到十三十四世纪Ӟl由印度Q阿拉伯地区数学原c传回西Ƨ社会，西方数学才开始发展，然而此时的中国是由蒙古人所l治的时期，数学发展明显受阻。进入十五世U后Q数学在Ƨ洲开始复_q入蓬勃发展期，但中国数学却仍不温不火，q且来具有偏向性，在这一时期军_了中西方数学的差距。纵观来看，中西Ҏ学发展的断期对于双方的交流有很强的ȝQ没能在同一时期站在大致怼的高度上形成交流? 从政M来说Q中国古代数学的存在意义实则是ؓ政治服务Q所以研I注重实用性，偏向性，对于实际问题的解军_有一套，但是对徏立系l性理论没有太大的热情。相较于西方Ҏ学Ş而上的认识，中国的数?#8220;合ؓ时而用”Q是可以“货与帝王?#8221;的才能，如果没有政治支持Q那么数学就没有发展土壤。也正因如此Q中国的数学家也之又少Q数学文化的传播也ƈ不是随心所ƌӀ重要的Q高U的计算技巧是不可能流入民_自然也不可能催生中国整体上的数学发展。同Ӟ即在一部分重要的文献如Q老子Q周易等先后传入西方世界Q然而东方的数学智慧却未曑ֈƧ洲传播。而从研究方式和工具上Q中国数学重视计，重视实际l果。比如历法上的成，所依托的正是极高的计算技巧。而这些技巧所依托的符Ll，相较于Q何古代数学文明都是先q的。因为简易，而且是组合式的，再通过归纳Q简写（比如百，千，万，亿的概念产生Q再比如百万Q千万，亿万q样的组合概늚产生Q，我们可以方便直观的表C极多数字，q对计算技巧的研究很有帮助。所以即使西方的W号体系Q数字体pM入中国，但是在计上的优性必然导致这些不能取代千q沉淀的文字?span lang="EN-US">

而今有很多对比中西方U学发展的探讨，很多的目的在于给中国古代U技U学正名Q提振民族自信心Q这一Ҏ可厚非，但是我们应本着客观公正的态度探讨。如果将核心观点始终立于两套文化系l的不同上，q而找C个^{的q点，W者认为大可不必。无论是以前提出?#8220;倘若假以时日中国也能发展到西方同L度的U学”Q还是现在提出的“中国的科学广义上是格致学Q生命博物学”Q其实都是在遉Kp地谈问题。且不论西方列强以武力手D|开中国的大门是否是D中国本土U学的原因，q是在双方互不q涉的前提下Q科学的基础学科数学的发展速度׃在一个层ơ上。中国的数学发展是篏U式的，U性的Q是Ex发展的，但是西方数学的发展是爆炸式的Q好比指数函敎ͼ只会发展的越来越快，q就是巨大差距。再有从q义U学角度上切入的观点Q基本是上升臛_学层面的认识Q不能仅仅停留于探讨不同的思\和想法就长舒一口气Q认为找Cq等可以安心一些。对于现在的学习U学Q研I科学的中国人来_如何汲取古代智慧是非帔R要的。这l不是要抛弃U学的方法论Q而是用不同于西方机械唯心论的角度认识世界。值得借鉴的一案便是数学家吴文俊所发展?#8220;吴方?#8221;。吴文俊教授从古代算法思想入手Q通过构徏E序证明了大量初{几何学Q射影几何学内容Q取得了非凡的成果。而在U学分支庞杂林立的现代，大体量的U学pȝ其实反而成了限制h们l探索的ȝQ如何从中国古代的整体观来认识科学，是一个很可能成功Q也极ؓ重要的课题。某U程度上Q我们应该庆q怸国的哲学思想与西方ƈ立，或许带l世界最重要的启C?/span>

zmj 2018-06-08 15:39 发表评论
]]>

# Bernoulli Equation

The Bernoulli Equation can be considered to be a statement of the conservation of energy principle appropriate for flowing fluids. The qualitative behavior that is usually labeled with the term "Bernoulli effect" is the lowering of fluid pressure in regions where the flow velocity is increased. This lowering of pressure in a constriction of a flow path may seem counterintuitive, but seems less so when you consider pressure to be energy density. In the high velocity flow through the constriction, kinetic energy must increase at the expense of pressure energy.

Steady-state flow caveat: While the Bernoulli equation is stated in terms of universally valid ideas like conservation of energy and the ideas of pressure, kinetic energy and potential energy, its application in the above form is limited to cases of steady flow. For flow through a tube, such flow can be visualized as laminar flow, which is still an idealization, but if the flow is to a good approximation laminar, then the kinetic energy of flow at any point of the fluid can be modeled and calculated. The kinetic energy per unit volume term in the equation is the one which requires strict constraints for the Bernoulli equation to apply - it basically is the assumption that all the kinetic energy of the fluid is contributing directly to the forward flow process of the fluid. That should make it evident that the existence of turbulence or any chaotic fluid motion would involve some kinetic energy which is not contributing to the advancement of the fluid through the tube.

It should also be said that while conservation of energy always applies, this form of parsing out that energy certainly does not describe how that energy is distributed under transient conditions. A good visualization of the Bernoulli effect is the flow through a constriction, but that neat picture does not describe the fluid when you first turn on the flow.

Another approximation involved in the statement of the Bernoulli equation above is the neglect of losses from fluid friction. Idealized laminar flow through a pipe can be modeled by , which does include viscous losses resulting in a lowering of the pressure as you progress along the pipe. The statement of the Bernoulli equation above would lead to the expectation that the pressure would return to the value P1 past the constriction since the radius returns to its original value. This is not the case because of the loss of some energy from the active flow process by friction into disordered molecular motion (thermal energy). More accurate modeling can be done by combining the Bernoulli equation with Poiseuille's law. A real example which might help visualize the process is the pressure monitoring of the flow through a constricted tube.

### Bernoulli calculation

Index

Bernoulli concepts

 HyperPhysics***** Mechanics ***** Fluids R Nave
Go Back

# Bernoulli Calculation

The calculation of the "real world" pressure in a constriction of a tube is difficult to do because of viscous losses, turbulence, and the assumptions which must be made about the velocity profile (which affect the calculated kinetic energy). The model calculation here assumes laminar flow (no turbulence), assumes that the distance from the larger diameter to the smaller is short enough that viscous losses can be neglected, and assumes that the velocity profile follows that of theoretical laminar flow. Specifically, this involves assuming that the effective flow velocity is one half of the maximum velocity, and that the average kinetic energy density is given by one third of the maximum kinetic energy density.
Now if you can swallow all those assumptions, you can model* the flow in a tube where the volume flowrate is cm3/s and the fluid density is ρ = gm/cm3. For an inlet tube area A1cm2 (radius r1 =cm), the geometry of flow leads to an effective fluid velocity of v1 =cm/s. Since the Bernoulli equation includes the fluid potential energy as well, the height of the inlet tube is specified as h1 = cm. If the area of the tube is constricted to A2=cm2 (radius r2 =cm), then without any further assumptions the effective fluid velocity in the constriction must be v2 =  cm/s. The height of the constricted tube is specified as h2 =  cm.

The kinetic energy densities at the two locations in the tube can now be calculated, and the Bernoulli equation applied to constrain the process to conserve energy, thus giving a value for the pressure in the constriction. First, specify a pressure in the inlet tube:
Inlet pressure = P1 =  kPa = lb/in2 =  mmHg =  atmos.
The energy densities can now be calculated. The energy unit for the CGS units used is the erg.
 Inlet tube energy densities Kinetic energy density = erg/cm3 Potential energy density = erg/cm3 Pressure energy density = erg/cm3
 Constricted tube energy densities Kinetic energy density = erg/cm3 Potential energy density = erg/cm3 Pressure energy density = erg/cm3
The pressure energy density in the constricted tube can now be finally converted into more conventional pressure units to see the effect of the constricted flow on the fluid pressure:

Calculated pressure in constriction =
P2kPa = lb/in2 =  mmHg =  atmos.

This calculation can give some perspective on the energy involved in fluid flow, but it's accuracy is always suspect because of the assumption of laminar flow. For typical inlet conditions, the energy density associated with the pressure will be dominant on the input side; after all, we live at the bottom of an atmospheric sea which contributes a large amount of pressure energy. If a drastic enough reduction in radius is used to yield a pressure in the constriction which is less than atmospheric pressure, there is almost certainly some turbulence involved in the flow into that constriction. Nevertheless, the calculation can show why we can get a significant amount of suction (pressure less than atmospheric) with an "aspirator" on a high pressure faucet. These devices consist of a metal tube of reducing radius with a side tube into the region of constricted radius for suction.

*Note: Some default values will be entered for some of the values as you start exploring the calculation. All of them can be changed as a part of your calculation.
Index

Bernoulli concepts

 HyperPhysics***** Mechanics ***** Fluids R Nave
Go Back

# Curve of a Baseball

A non-spinning baseball or a stationary baseball in an airstream exhibits symmetric flow. A baseball which is thrown with spin will curve because one side of the ball will experience a reduced pressure. This is commonly interpreted as an application of the Bernoulli principle and involves the viscosity of the air and the boundary layer of air at the surface of the ball.

 The roughness of the ball's surface and the laces on the ball are important! With a perfectly smooth ball you would not get enough interaction with the air.

There are some difficulties with this picture of the curving baseball. The Bernoulli equation cannot really be used to predict the amount of curve of the ball; the flow of the air is compressible, and you can't track the density changes to quantify the change in effective pressure. The experimental work of Watts and Ferrer with baseballs in a wind tunnel suggests another model which gives prominent attention to the spinning boundary layer of air around the baseball. On the side of the ball where the boundary layer is moving in the same direction as the free stream air speed, the boundary layer carries further around the ball before it separates into turbulent flow. On the side where the boundary layer is opposed by the free stream flow, it tends to separate prematurely. This gives a net deflection of the airstream in one direction behind the ball, and therefore a  reaction force on the ball in the opposite direction. This gives an effective force in the same direction indicated above.

Similar issues arise in the treatment of a spinning cylinder in an airstream, which has been shown to experience lift. This is the subject of the Kutta-Joukowski theorem. It is also invoked in the discussion of airfoil lift.

Index

Bernoulli Equation

Bernoulli concepts

Reference
Watts and Ferrer

 HyperPhysics***** Mechanics ***** Fluids R Nave
Go Back

# Airfoil

The air across the top of a conventional airfoil experiences constricted flow lines and increased air speed relative to the wing. This causes a decrease in pressure on the top according to the Bernoulli equation and provides a lift force. Aerodynamicists (see Eastlake) use the Bernoulli model to correlate with pressure measurements made in wind tunnels, and assert that when pressure measurements are made at multiple locations around the airfoil and summed, they do agree reasonably with the observed lift.

 Illustration of lift forceand angle of attack
 Bernoulli vs Newtonfor airfoil lift
 Airfoil terminology

Others appeal to a model based on Newton's laws and assert that the main lift comes as a result of the angle of attack. Part of the Newton's law model of part of the lift force involves attachment of the boundary layer of air on the top of the wing with a resulting downwash of air behind the wing. If the wing gives the air a downward force, then by Newton's third law, the wing experiences a force in the opposite direction - a lift. While the "Bernoulli vs Newton" debate continues, Eastlake's position is that they are really equivalent, just different approaches to the same physical phenonenon. NASA has a nice aerodynamics site at which these issues are discussed.

Increasing the angle of attack gives a larger lift from the upward component of pressure on the bottom of the wing. The lift force can be considered to be a  reaction force to the force exerted downward on the air by the wing.

At too high an angle of attack, turbulent flow increases the drag dramatically and will stall the aircraft.

A vapor trail over the wing helps visualize the air flow. Photo by Frank Starmer, used by permission.

Index

Bernoulli Equation

References
Eastlake

NASA
Aerodynamics

 HyperPhysics***** Mechanics ***** Fluids R Nave
Go Back

zmj 2018-03-05 16:39 发表评论

zmj 2018-03-05 16:37 发表评论
]]>

//enjoy.phy.ntnu.edu.tw/demolab/phpBB/

1:黃福?nbsp;(研究所)張貼:2006-10-22 12:39:58:

2:黃福?研究所)張貼:2006-10-22 13:02:58: [回應上一]

3:x?/a>榮譽點數5?/a> (高中?張貼:2006-12-31 12:41:28: [回應上一]

4:李偉榮譽點數3?/a>(大學理工U系)張貼:2006-12-31 15:24:22: [回應上一]

 Quote: ?2006-12-31 12:41:28, x?寫了: 原來望遠鏡是兩個放大鏡Q之前還認為是一個凸、ㄧ凹，q好及時發現?img height="15" src="//www.phy.ntnu.edu.tw/pix/smiles/2.gif" width="15" align="absMiddle" border="0" style="border: 0px;" alt="" />

5:x?/a>榮譽點數5?/a> (高中?張貼:2006-12-31 15:32:38: [回應上一]

7:黃福?研究所)張貼:2012-11-06 19:45:40: [回應上一]

zmj 2018-02-28 17:23 发表评论

## Qt Creator

Qt Creator directly supports introspection of all Qt Containers and QObject derived classes for Qt 4 and Qt 5. User defined types can be supported in addition, see the Qt Creator documentation for details.

## LLDB

There is an effort to introspect Qt types using LLDB at https://bitbucket.org/lukeworth/lldb-qt-formatters.

KDevelop ships formatter scripts in its LLDB plugin for Qt types that can be used directly in plain LLDB. https://unlimitedcodeworks.xyz/blog/2016/08/20/gsoc-kdevelop-lldb-final-report/#using-data-formatter-scripts-outside-kdevelop

## MS visual studio QString & QByteArray expansions

The new layout of QString in Qt 5 is hard to inspect using the debugger. The following code can be added to autoexp.dat (c:\program files(x86)\visual studio 9.0\common7\packages\debugger\autoexp.dat) You should add it to the [Visualizer] section, before the STL/ATL containers.

; Qt types QStringData{  preview ([(unsigned short*)$e.d +$e.offset,su])  stringview ([(unsigned short*)$e.d +$e.offset,sub]) } QString{  preview ([$e.d]) } QByteArrayData{ preview ([(unsigned char*)$e.d + $e.offset,s]) stringview ([(unsigned char*)$e.d + $e.offset,sb]) } QByteArray{ preview ([$e.d]) }

If all else fails you can always just add a watcher for

  (char*)str.d + str.d->offset,su

in the debugger, to see the contents of str.

## MS Visual Studio 2012

There is a new way to visualize native type, see //code.msdn.microsoft.com/Writing-type-visualizers-2eae77a2 for details.

So we can visualize QString and some other types using qt5.natvis file (save to file: %USERPROFILE%\Documents\Visual Studio 2012\Visualizers\qt5.natvis)

<?xml version="1.0" encoding="utf-8"?> <AutoVisualizer >  <Type Name="QString">  <DisplayString>{(char*)d + d->offset,su}</DisplayString>  </Type>  <Type Name="QtPrivate::RefCount">  <DisplayString>{atomic}</DisplayString>  </Type>  <Type Name="QBasicAtomicInteger<int>">  <DisplayString>{_q_value}</DisplayString>  </Type>  <Type Name="QTypedArrayData<'''>">  <DisplayString>{{Count = {size}}}</DisplayString>  <Expand>  <Item Name="[size]">size</Item>  <ArrayItems>  <Size>size</Size>  <ValuePointer>(iterator) ((char''')this + offset)</ValuePointer>  </ArrayItems>  </Expand>  </Type>  <Type Name="QByteArray">  <DisplayString>{*d}</DisplayString>  </Type>  <!— More Qt5 types… —>  </AutoVisualizer>

## MS Visual Studio 2013

The ".natvis" files introduced in MSVS2012 received some additional attention in MSVS2013:

zmj 2018-02-12 13:25 发表评论

zmj 2018-01-08 14:13 发表评论
]]>

## https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/remap/remap.htmlGoal

In this tutorial you will learn how to:

1. Use the OpenCV function remap to implement simple remapping routines.

## Theory

### What is remapping?

• It is the process of taking pixels from one place in the image and locating them in another position in a new image.

• To accomplish the mapping process, it might be necessary to do some interpolation for non-integer pixel locations, since there will not always be a one-to-one-pixel correspondence between source and destination images.

• We can express the remap for every pixel location  as:

where  is the remapped image,  the source image and  is the mapping function that operates on .

• Let’s think in a quick example. Imagine that we have an image  and, say, we want to do a remap such that:

What would happen? It is easily seen that the image would flip in the  direction. For instance, consider the input image:

observe how the red circle changes positions with respect to x (considering  the horizontal direction):

• In OpenCV, the function remap offers a simple remapping implementation.

## Code

1. What does this program do?
• Each second, apply 1 of 4 different remapping processes to the image and display them indefinitely in a window.
• Wait for the user to exit the program
2. The tutorial code’s is shown lines below. You can also download it from here
 #include "opencv2/highgui/highgui.hpp"  #include "opencv2/imgproc/imgproc.hpp"  #include <iostream>  #include <stdio.h>   using namespace cv;   /// Global variables  Mat src, dst;  Mat map_x, map_y;  char* remap_window = "Remap demo";  int ind = 0;   /// Function Headers  void update_map( void );   /**  * @function main  */  int main( int argc, char** argv )  {    /// Load the image    src = imread( argv[1], 1 );    /// Create dst, map_x and map_y with the same size as src:   dst.create( src.size(), src.type() );   map_x.create( src.size(), CV_32FC1 );   map_y.create( src.size(), CV_32FC1 );    /// Create window   namedWindow( remap_window, CV_WINDOW_AUTOSIZE );    /// Loop   while( true )   {     /// Each 1 sec. Press ESC to exit the program     int c = waitKey( 1000 );      if( (char)c == 27 )       { break; }      /// Update map_x & map_y. Then apply remap     update_map();     remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );      /// Display results     imshow( remap_window, dst );   }   return 0;  }   /**  * @function update_map  * @brief Fill the map_x and map_y matrices with 4 types of mappings  */  void update_map( void )  {    ind = ind%4;     for( int j = 0; j < src.rows; j++ )    { for( int i = 0; i < src.cols; i++ )        {          switch( ind )          {            case 0:              if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )                {                  map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;                  map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;                 }              else                { map_x.at<float>(j,i) = 0 ;                  map_y.at<float>(j,i) = 0 ;                }                  break;            case 1:                  map_x.at<float>(j,i) = i ;                  map_y.at<float>(j,i) = src.rows - j ;                  break;            case 2:                  map_x.at<float>(j,i) = src.cols - i ;                  map_y.at<float>(j,i) = j ;                  break;            case 3:                  map_x.at<float>(j,i) = src.cols - i ;                  map_y.at<float>(j,i) = src.rows - j ;                  break;          } // end of switch        }     }   ind++; }

## Explanation

1. Create some variables we will use:

Mat src, dst; Mat map_x, map_y; char* remap_window = "Remap demo"; int ind = 0;

src = imread( argv[1], 1 );
3. Create the destination image and the two mapping matrices (for x and y )

dst.create( src.size(), src.type() ); map_x.create( src.size(), CV_32FC1 ); map_y.create( src.size(), CV_32FC1 );
4. Create a window to display results

namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
5. Establish a loop. Each 1000 ms we update our mapping matrices (mat_x and mat_y) and apply them to our source image:

while( true ) {   /// Each 1 sec. Press ESC to exit the program   int c = waitKey( 1000 );    if( (char)c == 27 )     { break; }    /// Update map_x & map_y. Then apply remap   update_map();   remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );    /// Display results   imshow( remap_window, dst ); }

The function that applies the remapping is remap. We give the following arguments:

• src: Source image
• dst: Destination image of same size as src
• map_x: The mapping function in the x direction. It is equivalent to the first component of
• map_y: Same as above, but in y direction. Note that map_y and map_x are both of the same size as src
• CV_INTER_LINEAR: The type of interpolation to use for non-integer pixels. This is by default.
• BORDER_CONSTANT: Default

How do we update our mapping matrices mat_x and mat_y? Go on reading:

6. Updating the mapping matrices: We are going to perform 4 different mappings:

1. Reduce the picture to half its size and will display it in the middle:

for all pairs  such that:  and

2. Turn the image upside down:

3. Reflect the image from left to right:

4. Combination of b and c:

This is expressed in the following snippet. Here, map_x represents the first coordinate of h(i,j) and map_y the second coordinate.

for( int j = 0; j < src.rows; j++ ) { for( int i = 0; i < src.cols; i++ )     {       switch( ind )       {         case 0:           if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )             {               map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;               map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;              }           else             { map_x.at<float>(j,i) = 0 ;               map_y.at<float>(j,i) = 0 ;             }               break;         case 1:               map_x.at<float>(j,i) = i ;               map_y.at<float>(j,i) = src.rows - j ;               break;         case 2:               map_x.at<float>(j,i) = src.cols - i ;               map_y.at<float>(j,i) = j ;               break;         case 3:               map_x.at<float>(j,i) = src.cols - i ;               map_y.at<float>(j,i) = src.rows - j ;               break;       } // end of switch     }   }  ind++; }

## Result

1. After compiling the code above, you can execute it giving as argument an image path. For instance, by using the following image:

2. This is the result of reducing it to half the size and centering it:

3. Turning it upside down:

4. Reflecting it in the x direction:

5. Reflecting it in both directions:

zmj 2017-12-20 17:44 发表评论

zmj 2017-11-20 17:10 发表评论
]]>
https://github.com/opencv/opencv/issues/8762

Hi everyone, I currently working on my project which involves vehicle detection and tracking and estimating and optimizing a cuboid around the vehicle. For that I am taking the center of the detected vehicle and I need to find the 3D world coodinate of the point and then estimate the world coordinates of the edges of the cuboid and the project it back to the image to display it.
So, now I am new to computer vision and OpenCV, but in my knowledge, I just need 4 points on the image and need to know the world coordinates of those 4 points and use solvePNP in OpenCV to get the rotation and translation vectors (I already have the camera matrix and distortion coefficients). Then, I need to use Rodrigues to transform the rotation vector into a rotation matrix and then concatenate it with the translation vector to get my extrinsic matrix and then multiply the extrinsic matrix with the camera matrix to get my projection matrix. Since my z coordinate is zero, so I need to take off the third column from the projection matrix which gives the homography matrix for converting the 2D image points to 3D world points. Now, I find the inverse of the homography matrix which gives me the homography between the 3D world points to 2D image points. After that I multiply the image points [x, y, 1]t with the inverse homography matrix to get [wX, wY, w]t and the divide the entire vector by the scalar w to get [X, Y, 1] which gives me the X and Y values of the world coordinates.
My code is like this:
image_points.push_back(Point2d(275, 204));
image_points.push_back(Point2d(331, 204));
image_points.push_back(Point2d(331, 308));
image_points.push_back(Point2d(275, 308));
cout << "Image Points: " << image_points << endl << endl;
world_points.push_back(Point3d(0.0, 0.0, 0.0));
world_points.push_back(Point3d(1.775, 0.0, 0.0));
world_points.push_back(Point3d(1.775, 4.620, 0.0));
world_points.push_back(Point3d(0.0, 4.620, 0.0));
cout << "World Points: " << world_points << endl << endl;
solvePnP(world_points, image_points, cameraMatrix, distCoeffs, rotationVector, translationVector);
cout << "Rotation Vector: " << endl << rotationVector << endl << endl;
cout << "Translation Vector: " << endl << translationVector << endl << endl;
Rodrigues(rotationVector, rotationMatrix);
cout << "Rotation Matrix: " << endl << rotationMatrix << endl << endl;
hconcat(rotationMatrix, translationVector, extrinsicMatrix);
cout << "Extrinsic Matrix: " << endl << extrinsicMatrix << endl << endl;
projectionMatrix = cameraMatrix * extrinsicMatrix;
cout << "Projection Matrix: " << endl << projectionMatrix << endl << endl;
double p11 = projectionMatrix.at<double>(0, 0),
p12 = projectionMatrix.at<double>(0, 1),
p14 = projectionMatrix.at<double>(0, 3),
p21 = projectionMatrix.at<double>(1, 0),
p22 = projectionMatrix.at<double>(1, 1),
p24 = projectionMatrix.at<double>(1, 3),
p31 = projectionMatrix.at<double>(2, 0),
p32 = projectionMatrix.at<double>(2, 1),
p34 = projectionMatrix.at<double>(2, 3);
homographyMatrix = (Mat_<double>(3, 3) << p11, p12, p14, p21, p22, p24, p31, p32, p34);
cout << "Homography Matrix: " << endl << homographyMatrix << endl << endl;
inverseHomographyMatrix = homographyMatrix.inv();
cout << "Inverse Homography Matrix: " << endl << inverseHomographyMatrix << endl << endl;
Mat point2D = (Mat_<double>(3, 1) << image_points[0].x, image_points[0].y, 1);
cout << "First Image ...

https://github.com/opencv/opencv/issues/8762

• OpenCV => 3.2
• Operating System / Platform => Windows 64 Bit
• Compiler => Visual Studio 2015

Hi everyone, I understand that this forum is to report bugs and not to ask questions but I already posted about my problems in answers.opencv.org without any useful response. I need to resolve my problem very urgently since my final year project deadline is approaching soon.

I am currently working on my project which involves vehicle detection and tracking and estimating and optimizing a cuboid around the vehicle. For that I have accomplished detection and tracking of vehicles and I need to find the 3-D world coordinates of the image points of the edges of the bounding boxes of the vehicles and then estimate the world coordinates of the edges of the cuboid and the project it back to the image to display it.

So, I am new to computer vision and OpenCV, but in my knowledge, I just need 4 points on the image and need to know the world coordinates of those 4 points and use solvePNP in OpenCV to get the rotation and translation vectors (I already have the camera matrix and distortion coefficients). Then, I need to use Rodrigues to transform the rotation vector into a rotation matrix and then concatenate it with the translation vector to get my extrinsic matrix and then multiply the extrinsic matrix with the camera matrix to get my projection matrix. Since my z coordinate is zero, so I need to take off the third column from the projection matrix which gives the homography matrix for converting the 2D image points to 3D world points. Now, I find the inverse of the homography matrix which gives me the homography between the 3D world points to 2D image points. After that I multiply the image points [x, y, 1]t with the inverse homography matrix to get [wX, wY, w]t and the divide the entire vector by the scalar w to get [X, Y, 1] which gives me the X and Y values of the world coordinates.

My code looks like this:

#include "opencv2/opencv.hpp" #include <stdio.h> #include <iostream> #include <sstream> #include <math.h> #include <conio.h>  using namespace cv; using namespace std;  Mat cameraMatrix, distCoeffs, rotationVector, rotationMatrix, translationVector, extrinsicMatrix, projectionMatrix, homographyMatrix, inverseHomographyMatrix;   Point point; vector<Point2d> image_points; vector<Point3d> world_points;  int main() {     FileStorage fs1("intrinsics.yml", FileStorage::READ);     fs1["camera_matrix"] >> cameraMatrix;    cout << "Camera Matrix: " << cameraMatrix << endl << endl;     fs1["distortion_coefficients"] >> distCoeffs;    cout << "Distortion Coefficients: " << distCoeffs << endl << endl;          image_points.push_back(Point2d(275, 204));    image_points.push_back(Point2d(331, 204));    image_points.push_back(Point2d(331, 308));    image_points.push_back(Point2d(275, 308));     cout << "Image Points: " << image_points << endl << endl;     world_points.push_back(Point3d(0.0, 0.0, 0.0));    world_points.push_back(Point3d(1.775, 0.0, 0.0));    world_points.push_back(Point3d(1.775, 4.620, 0.0));    world_points.push_back(Point3d(0.0, 4.620, 0.0));     cout << "World Points: " << world_points << endl << endl;     solvePnP(world_points, image_points, cameraMatrix, distCoeffs, rotationVector, translationVector);    cout << "Rotation Vector: " << endl << rotationVector << endl << endl;    cout << "Translation Vector: " << endl << translationVector << endl << endl;     Rodrigues(rotationVector, rotationMatrix);    cout << "Rotation Matrix: " << endl << rotationMatrix << endl << endl;     hconcat(rotationMatrix, translationVector, extrinsicMatrix);    cout << "Extrinsic Matrix: " << endl << extrinsicMatrix << endl << endl;     projectionMatrix = cameraMatrix * extrinsicMatrix;    cout << "Projection Matrix: " << endl << projectionMatrix << endl << endl;     double p11 = projectionMatrix.at<double>(0, 0),    	p12 = projectionMatrix.at<double>(0, 1),    	p14 = projectionMatrix.at<double>(0, 3),    	p21 = projectionMatrix.at<double>(1, 0),    	p22 = projectionMatrix.at<double>(1, 1),    	p24 = projectionMatrix.at<double>(1, 3),    	p31 = projectionMatrix.at<double>(2, 0),    	p32 = projectionMatrix.at<double>(2, 1),    	p34 = projectionMatrix.at<double>(2, 3);      homographyMatrix = (Mat_<double>(3, 3) << p11, p12, p14, p21, p22, p24, p31, p32, p34);    cout << "Homography Matrix: " << endl << homographyMatrix << endl << endl;     inverseHomographyMatrix = homographyMatrix.inv();    cout << "Inverse Homography Matrix: " << endl << inverseHomographyMatrix << endl << endl;     Mat point2D = (Mat_<double>(3, 1) << image_points[0].x, image_points[0].y, 1);    cout << "First Image Point" << point2D << endl << endl;     Mat point3Dw = inverseHomographyMatrix*point2D;    cout << "Point 3D-W : " << point3Dw << endl << endl;     double w = point3Dw.at<double>(2, 0);    cout << "W: " << w << endl << endl;     Mat matPoint3D;    divide(w, point3Dw, matPoint3D);     cout << "Point 3D: " << matPoint3D << endl << endl;     _getch();    return 0; }

I have got the image coordinates of the four known world points and hard-coded it for simplification.image_points contain the image coordinates of the four points and world_points contain the world coordinates of the four points. I am considering the the first world point as the origin (0, 0, 0) in the world axis and using known distance calculating the coordinates of the other four points. Now after calculating the inverse homography matrix, I multiplied it with [image_points[0].x, image_points[0].y, 1]t which is related to the world coordinate (0, 0, 0). Then I divide the result by the third component w to get [X, Y, 1]. But after printing out the values of X and Y, it turns out they are not 0, 0 respectively. What am doing wrong?

The output of my code is like this:

Camera Matrix: [517.0036881709533, 0, 320;  0, 517.0036881709533, 212;  0, 0, 1]  Distortion Coefficients: [0.1128663679798094;  -1.487790079922432;  0;  0;  2.300571896761067]  Image Points: [275, 204;  331, 204;  331, 308;  275, 308]  World Points: [0, 0, 0;  1.775, 0, 0;  1.775, 4.62, 0;  0, 4.62, 0]  Rotation Vector: [0.661476468596541;  -0.02794460022559267;  0.01206996342819649]  Translation Vector: [-1.394495345140898;  -0.2454153722672731;  15.47126945512652]  Rotation Matrix: [0.9995533907649279, -0.02011656447351923, -0.02209848058392758;  0.002297501163799448, 0.7890323093017149, -0.6143474069013439;  0.02979497438726573, 0.6140222623910194, 0.7887261380159]  Extrinsic Matrix: [0.9995533907649279, -0.02011656447351923, -0.02209848058392758, -1.394495345140898;  0.002297501163799448, 0.7890323093017149, -0.6143474069013439, -0.2454153722672731;  0.02979497438726573, 0.6140222623910194, 0.7887261380159, 15.47126945512652]  Projection Matrix: [526.3071813531748, 186.086785938988, 240.9673682002232, 4229.846989065414;  7.504351145361707, 538.1053336219271, -150.4099339268854, 3153.028471890794;  0.02979497438726573, 0.6140222623910194, 0.7887261380159, 15.47126945512652]  Homography Matrix: [526.3071813531748, 186.086785938988, 4229.846989065414;  7.504351145361707, 538.1053336219271, 3153.028471890794;  0.02979497438726573, 0.6140222623910194, 15.47126945512652]  Inverse Homography Matrix: [0.001930136511648154, -8.512427241879318e-05, -0.5103513244724983;  -6.693679705844383e-06, 0.00242178892313387, -0.4917279870709287;  -3.451449134581896e-06, -9.595179260534558e-05, 0.08513443835773901]  First Image Point[275;  204;  1]  Point 3D-W : [0.003070864657310213;  0.0004761913292736786;  0.06461112415423849]  W: 0.0646111  Point 3D: [21.04004290792539;  135.683117651025;  1] 

zmj 2017-11-17 14:40 发表评论
]]>
https://stackoverflow.com/questions/10163034/how-can-i-calculate-camera-position-by-comparing-two-photographs

## Background

I have taught this course several times (almost every semester). I am always fiddling around with the course content, so the material covered and the order of presentation changes from semester to semester. Below are the lecture notes from Fall 2007.

In addition to slides that I created, I borrowed heavily from other lecturers whose computer vision slides are on the web. I used to put an attribution at the bottom of each slide as to where and who it came from. However, that led to cluttered slides, and was distracting. So, I dropped that format. Instead, I'm telling you up-front that a lot of the slides in the lectures below did not originate from me. Here is a partial list of the main sources that I can remember: Octavia Camps, Forsyth and Ponce, David Jacobs, Steve Seitz, Chuck Dyer, Martial Hebert. If I forgot you, and you see your slides here, well... thanks. And drop me a line so I can add your name to the list.

By the same token, if you are putting together a computer vision course, and want to use some of my slides, go right ahead. You are welcome to them, since the main goal here is to improve the quality of computer vision education everywhere. To quote Thomas Jefferson: "He who receives an idea from me, receives instruction himself without lessening mine; as he who lights his taper at mine, receives light without darkening me. That ideas should freely spread from one to another over the globe, for the moral and mutual instruction of man, and improvement of his condition, seems to have been peculiarly and benevolently designed by nature, when she made them, like fire, expansible over all space, without lessening their density at any point, and like the air in which we breathe, move, and have our physical being, incapable of confinement or exclusive appropriation." Jefferson was one awesome dude.

## Fall 2007 Lecture Notes

Detailed List of Topics Covered in Fall 2007

 Lecture 01: Intro to Computer Vision slides 6 per page Lecture 02: Intensity Surfaces and Gradients slides 6 per page Lecture 03: Linear Operators and Convolution slides 6 per page Lecture 04: Smoothing slides 6 per page Lecture 05: Edge Detection slides 6 per page Lecture 06: Corner Detection slides 6 per page Lecture 07: Template Matching slides 6 per page Lecture 08: Introduction to Stereo slides 6 per page Lecture 09: Stereo Algorithms slides 6 per page Lecture 10: Image Pyramids slides 6 per page Lecture 11: LoG Edge and Blob Finding slides 6 per page Lecture 12: Camera Projection (Extrinsics) slides 6 per page Lecture 13: Camera Projection (Intrinsics) slides 6 per page Lecture 14: Parameter Estimation; Image Warping slides 6 per page Lecture 15: Robust Estimation: RANSAC slides 6 per page Lecture 16: Planar Homographies slides 6 per page Lecture 17: Stabilization and Mosaicing slides 6 per page Lecture 18: Generalized Stereo slides 6 per page Lecture 19: Essential and Fundamental Matrices slides 6 per page Lecture 20: The 8-point algorithm slides 6 per page Lecture 21: Stereo Reconstruction slides 6 per page Lecture 22: Camera Motion Field slides 6 per page Lecture 23: Optic Flow slides 6 per page Lecture 24: Video Change Detection slides 6 per page Lecture 25: Structure From Motion (SFM) slides 6 per page Lecture 26: Color and Light slides 6 per page Lecture 27: Application: Skin Color slides 6 per page Lecture 28: Intro to Tracking slides 6 per page Lecture 29: Video Tracking: Mean-shift slides 6 per page Lecture 30: Video Tracking: Lucas-Kanade slides 6 per page Lecture 31: Object Recognition : SIFT Keys slides 6 per page Lecture 32: Object Recognition : PCA / Eigenfaces slides 6 per page

zmj 2017-11-17 13:46 发表评论
]]>

zmj 2017-11-16 09:55 发表评论
]]>

//blog.sina.com.cn/s/blog_14d1511ee0102wxi4.html
Matlab2017a

g亲测的安装教E在q里Q觉得有用的话ؓ我写的百度经验点个赞?^_^
//jingyan.baidu.com/article/ac6a9a5e0e67652b653eacc2.html

===================================================
Matlab2016b?

Matlab2016a

Matlab2015a

Matlab2014a

Matlab2013a

Matlab2012a

Matlab2011a

Matlab2010a

Matlab2009a

Matlab2008a

zmj 2017-11-14 16:53 发表评论
]]>

zmj 2017-11-10 16:57 发表评论
]]>

using this as input (your own median filtered image (I've just cropped it):

First I "normalize" the image. I just stretch values, that smallest val is 0 and biggest val is 255, leading to this result: (maybe some real contrast enhancement is better)

after that I compute the threshold of that image with some fixed threshold (you might need to edit that and find a way to choose the threshold dynamically! a better contrast enhancement might help there)

from this image, I use some simple RANSAC circle detection(very similar to my answer in the linked semi-circle detection question), giving you this result as a best semi-sircle:

int main()

{

cv::Mat gray;

// convert to grayscale

cv::cvtColor(color, gray, CV_BGR2GRAY);

// now map brightest pixel to 255 and smalles pixel val to 0. this is for easier finding of threshold

double min, max;

cv::minMaxLoc(gray,&min,&max);

float sub = min;

float mult = 255.0f/(float)(max-sub);

cv::Mat normalized = gray - sub;

normalized = mult * normalized;

cv::imshow("normalized" , normalized);

//--------------------------------

// now compute threshold

// TODO: this might ne a tricky task if noise differs...

//cv::threshold(input, mask, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

std::vector<cv::Point2f> edgePositions;

// create distance transform to efficiently evaluate distance to nearest edge

cv::Mat dt;

//TODO: maybe seed random variable for real random numbers.

unsigned int nIterations = 0;

cv::Point2f bestCircleCenter;

float bestCirclePercentage = 0;

float minRadius = 50;   // TODO: ADJUST THIS PARAMETER TO YOUR NEEDS, otherwise smaller circles wont be detected or "small noise circles" will have a high percentage of completion

//float minCirclePercentage = 0.2f;

float minCirclePercentage = 0.05f;  // at least 5% of a circle must be present? maybe more...

int maxNrOfIterations = edgePositions.size();   // TODO: adjust this parameter or include some real ransac criteria with inlier/outlier percentages to decide when to stop

for(unsigned int its=0; its< maxNrOfIterations; ++its)

{

//RANSAC: randomly choose 3 point and create a circle:

//TODO: choose randomly but more intelligent,

//so that it is more likely to choose three points of a circle.

//For example if there are many small circles, it is unlikely to randomly choose 3 points of the same circle.

unsigned int idx1 = rand()%edgePositions.size();

unsigned int idx2 = rand()%edgePositions.size();

unsigned int idx3 = rand()%edgePositions.size();

// we need 3 different samples:

if(idx1 == idx2) continue;

if(idx1 == idx3) continue;

if(idx3 == idx2) continue;

// create circle from 3 points:

// inlier set unused at the moment but could be used to approximate a (more robust) circle from alle inlier

std::vector<cv::Point2f> inlierSet;

//verify or falsify the circle by inlier counting:

// update best circle information if necessary

if(cPerc >= bestCirclePercentage)

{

bestCirclePercentage = cPerc;

bestCircleCenter = center;

}

}

// draw if good circle was found

if(bestCirclePercentage >= minCirclePercentage)

cv::imshow("output",color);

cv::waitKey(0);

return 0;

}

float verifyCircle(cv::Mat dt, cv::Point2f center, float radius, std::vector<cv::Point2f> & inlierSet)
{
 unsigned int counter = 0;
 unsigned int inlier = 0;
 float minInlierDist = 2.0f;
 float maxInlierDistMax = 100.0f;
 float maxInlierDist = radius/25.0f;
 if(maxInlierDist<minInlierDist) maxInlierDist = minInlierDist;
 if(maxInlierDist>maxInlierDistMax) maxInlierDist = maxInlierDistMax;
 
 // choose samples along the circle and count inlier percentage
 for(float t =0; t<2*3.14159265359f; t+= 0.05f)
 {
     counter++;
     float cX = radius*cos(t) + center.x;
     float cY = radius*sin(t) + center.y;
 
     if(cX < dt.cols)
     if(cX >= 0)
     if(cY < dt.rows)
     if(cY >= 0)
     if(dt.at<float>(cY,cX) < maxInlierDist)
     {
        inlier++;
        inlierSet.push_back(cv::Point2f(cX,cY));
     }
 }
 
 return (float)inlier/float(counter);
}
 
 
inline void getCircle(cv::Point2f& p1,cv::Point2f& p2,cv::Point2f& p3, cv::Point2f& center, float& radius)
{
  float x1 = p1.x;
  float x2 = p2.x;
  float x3 = p3.x;
 
  float y1 = p1.y;
  float y2 = p2.y;
  float y3 = p3.y;
 
  // PLEASE CHECK FOR TYPOS IN THE FORMULA :)
  center.x = (x1*x1+y1*y1)*(y2-y3) + (x2*x2+y2*y2)*(y3-y1) + (x3*x3+y3*y3)*(y1-y2);
  center.x /= ( 2*(x1*(y2-y3) - y1*(x2-x3) + x2*y3 - x3*y2) );
 
  center.y = (x1*x1 + y1*y1)*(x3-x2) + (x2*x2+y2*y2)*(x1-x3) + (x3*x3 + y3*y3)*(x2-x1);
  center.y /= ( 2*(x1*(y2-y3) - y1*(x2-x3) + x2*y3 - x3*y2) );
 
  radius = sqrt((center.x-x1)*(center.x-x1) + (center.y-y1)*(center.y-y1));
}
 
 
 
std::vector<cv::Point2f> getPointPositions(cv::Mat binaryImage)
{
 std::vector<cv::Point2f> pointPositions;
 
 for(unsigned int y=0; y<binaryImage.rows; ++y)
 {
     //unsigned char* rowPtr = binaryImage.ptr<unsigned char>(y);
     for(unsigned int x=0; x<binaryImage.cols; ++x)
     {
         //if(rowPtr[x] > 0) pointPositions.push_back(cv::Point2i(x,y));
         if(binaryImage.at<unsigned char>(y,x) > 0) pointPositions.push_back(cv::Point2f(x,y));
     }
 }
 
 return pointPositions;
}

zmj 2017-10-17 13:39 发表评论
]]>

## Goals

• Learn to apply different geometric transformation to images like translation, rotation, affine transformation etc.
• You will see these functions: cv2.getPerspectiveTransform

## Transformations

OpenCV provides two transformation functions, cv2.warpAffine and cv2.warpPerspective, with which you can have all kinds of transformations. cv2.warpAffine takes a 2x3 transformation matrix while cv2.warpPerspective takes a 3x3 transformation matrix as input.

### Scaling

Scaling is just resizing of the image. OpenCV comes with a function cv2.resize() for this purpose. The size of the image can be specified manually, or you can specify the scaling factor. Different interpolation methods are used. Preferable interpolation methods are cv2.INTER_AREA for shrinking and cv2.INTER_CUBIC (slow) & cv2.INTER_LINEAR for zooming. By default, interpolation method used is cv2.INTER_LINEAR for all resizing purposes. You can resize an input image either of following methods:

import cv2 import numpy as np  img = cv2.imread('messi5.jpg')  res = cv2.resize(img,None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC)  #OR  height, width = img.shape[:2] res = cv2.resize(img,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)

### Translation

Translation is the shifting of object’s location. If you know the shift in (x,y) direction, let it be , you can create the transformation matrix  as follows:

You can take make it into a Numpy array of type np.float32 and pass it into cv2.warpAffine() function. See below example for a shift of (100,50):

import cv2 import numpy as np  img = cv2.imread('messi5.jpg',0) rows,cols = img.shape  M = np.float32([[1,0,100],[0,1,50]]) dst = cv2.warpAffine(img,M,(cols,rows))  cv2.imshow('img',dst) cv2.waitKey(0) cv2.destroyAllWindows()

Warning

Third argument of the cv2.warpAffine() function is the size of the output image, which should be in the form of (width, height). Remember width = number of columns, and height = number of rows.

See the result below:

### Rotation

Rotation of an image for an angle  is achieved by the transformation matrix of the form

But OpenCV provides scaled rotation with adjustable center of rotation so that you can rotate at any location you prefer. Modified transformation matrix is given by

where:

To find this transformation matrix, OpenCV provides a function, cv2.getRotationMatrix2D. Check below example which rotates the image by 90 degree with respect to center without any scaling.

img = cv2.imread('messi5.jpg',0) rows,cols = img.shape  M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1) dst = cv2.warpAffine(img,M,(cols,rows))

See the result:

### Affine Transformation

In affine transformation, all parallel lines in the original image will still be parallel in the output image. To find the transformation matrix, we need three points from input image and their corresponding locations in output image. Then cv2.getAffineTransform will create a 2x3 matrix which is to be passed to cv2.warpAffine.

Check below example, and also look at the points I selected (which are marked in Green color):

img = cv2.imread('drawing.png') rows,cols,ch = img.shape  pts1 = np.float32([[50,50],[200,50],[50,200]]) pts2 = np.float32([[10,100],[200,50],[100,250]])  M = cv2.getAffineTransform(pts1,pts2)  dst = cv2.warpAffine(img,M,(cols,rows))  plt.subplot(121),plt.imshow(img),plt.title('Input') plt.subplot(122),plt.imshow(dst),plt.title('Output') plt.show()

See the result:

### Perspective Transformation

For perspective transformation, you need a 3x3 transformation matrix. Straight lines will remain straight even after the transformation. To find this transformation matrix, you need 4 points on the input image and corresponding points on the output image. Among these 4 points, 3 of them should not be collinear. Then transformation matrix can be found by the function cv2.getPerspectiveTransform. Then apply cv2.warpPerspective with this 3x3 transformation matrix.

See the code below:

img = cv2.imread('sudokusmall.png') rows,cols,ch = img.shape  pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])  M = cv2.getPerspectiveTransform(pts1,pts2)  dst = cv2.warpPerspective(img,M,(300,300))  plt.subplot(121),plt.imshow(img),plt.title('Input') plt.subplot(122),plt.imshow(dst),plt.title('Output') plt.show()

Result:

1. “Computer Vision: Algorithms and Applications”, Richard Szeliski

## Help and Feedback

You did not find what you were looking for?
• Ask a question on the Q&A forum.
• If you think something is missing or wrong in the documentation, please file a bug report.

zmj 2017-10-12 15:28 发表评论
]]>

zmj 2017-10-11 15:20 发表评论
]]>

The smoothness of a ball’s surface — in addition to playing technique — is a critical factor.

It happens every four years: The World Cup begins and some of the world’s most skilled players carefully line up free kicks, take aim — and shoot way over the goal.

The players are all trying to bend the ball into a top corner of the goal, often over a wall of defensive players and away from the reach of a lunging goalkeeper. Yet when such shots go awry in the World Cup, a blame game usually sets in. Players, fans, and pundits all suggest that the new official tournament ball, introduced every four years, is the cause.

Many of the people saying that may be seeking excuses. And yet scholars do think that subtle variations among soccer balls affect how they fly. Specifically, researchers increasingly believe that one variable really does differentiate soccer balls: their surfaces. It is harder to control a smoother ball, such as the much-discussed “Jabulani” used at the 2010 World Cup. The new ball used at this year’s tournament in Brazil, the “Brazuca,” has seams that are over 50 percent longer, one factor that makes the ball less smooth and apparently more predictable in flight.

“The details of the flow of air around the ball are complicated, and in particular they depend on how rough the ball is,” says John Bush, a professor of applied mathematics at MIT and the author of a recently published article about the aerodynamics of soccer balls. “If the ball is perfectly smooth, it bends the wrong way.”

By the “wrong way,” Bush means that two otherwise similar balls struck precisely the same way, by the same player, can actually curve in opposite directions, depending on the surface of those balls. Sound surprising?

Magnus, meet Messi

It may, because the question of how a spinning ball curves in flight would seem to have a textbook answer: the Magnus Effect. This phenomenon was first described by Isaac Newton, who noticed that in tennis, topspin causes a ball to dip, while backspin flattens out its trajectory. A curveball in baseball is another example from sports: A pitcher throws the ball with especially tight topspin, or sidespin rotation, and the ball curves in the direction of the spin.

In soccer, the same thing usually occurs with free kicks, corner kicks, crosses from the wings, and other kinds of passes or shots: The player kicking the ball applies spin during contact, creating rotation that makes the ball curve. For a right-footed player, the “natural” technique is to brush toward the outside of the ball, creating a shot or pass with a right-to-left hook; a left-footed player’s “natural” shot will curl left-to-right.

So far, so intuitive: Soccer fans can probably conjure the image of stars like Lionel Messi, Andrea Pirlo, or Marta, a superstar of women’s soccer, doing this. But this kind of shot — the Brazilians call it the “chute de curva” — depends on a ball with some surface roughness. Without that, this classic piece of the soccer player’s arsenal goes away, as Bush points out in his article, “The Aerodynamics of the Beautiful Game,” from the volume “Sports Physics,” published by Les Editions de L’Ecole Polytechnique in France.

“The fact is that the Magnus Effect can change sign,” Bush says. “People don’t generally appreciate that fact.” Given an absolutely smooth ball, the direction of the curve may reverse: The same kicking motion will not produce a shot or pass curving in a right-to-left direction, but in a left-to-right direction.

In the above animation, a player strikes two balls: one smooth, and one with an elastic band wrapped around its equator. Both balls are struck with his instep so as to impart a counterclockwise spin. However, the smooth ball bends in the opposite direction as the banded ball. The presence of the elastic band changes the boundary layer on the ball surface from “laminar" to “turbulent." This is why all soccer balls have some surface roughness; otherwise, they would bend in the opposite direction as the ball's initial rotation. (Courtesy of the researchers.)

Why is this? Bush says it is due to the way the surface of the ball creates motion at the “boundary layer” between the spinning ball and the air. The rougher the ball, the easier it is to create the textbook version of the Magnus Effect, with a “positive” sign: The ball curves in the expected direction.

“The boundary layer can be laminar, which is smoothly flowing, or turbulent, in which case you have eddies,” Bush says. “The boundary layer is changing from laminar to turbulent at different spots according to how quickly the ball is spinning. Where that transition arises is influenced by the surface roughness, the stitching of the ball. If you change the patterning of the panels, the transition points move, and the pressure distribution changes.” The Magnus Effect can then have a “negative” sign.

From Brazil: The “dove without wings”

If the reversing of the Magnus Effect has largely eluded detection, of course, that is because soccer balls are not absolutely smooth — but they have been moving in that direction over the decades. While other sports, such as baseball and cricket, have strict rules about the stitching on the ball, soccer does not, and advances in technology have largely given balls sleeker, smoother designs — until the introduction of the Brazuca, at least.

There is actually a bit more to the story, however, since sometimes players will strike balls so as to give them very little spin — the equivalent of a knuckleball in baseball. In this case, the ball flutters unpredictably from side to side. Brazilians have a name for this: the “pombo sem asa,” or “dove without wings.”

In this case, Bush says, “The peculiar motion of a fluttering free kick arises because the points of boundary-layer transition are different on opposite sides of the ball.” Because the ball has no initial spin, the motion of the surrounding air has more of an effect on the ball’s flight: “A ball that’s knuckling … is moving in response to the pressure distribution, which is constantly changing.” Indeed, a free kick Pirlo took in Italy’s match against England on Saturday, which fooled the goalkeeper but hit the crossbar, demonstrated this kind of action.

Bush’s own interest in the subject arises from being a lifelong soccer player and fan — the kind who, sitting in his office, will summon up clips of the best free-kick takers he’s seen. These include Juninho Pernambucano, a Brazilian midfielder who played at the 2006 World Cup, and Sinisa Mihajlovic, a Serbian defender of the 1990s.

And Bush happily plays a clip of Brazilian fullback Roberto Carlos’ famous free kick from a 1997 match against France, where the player used the outside of his left foot — but deployed the “positive” Magnus Effect — to score on an outrageously bending free kick.

“That was by far the best free kick ever taken,” Bush says. Putting on his professor’s hat for a moment, he adds: “I think it’s important to encourage people to try to understand everything. Even in the most commonplace things, there is subtle and interesting physics.”

zmj 2017-10-10 17:32 发表评论
]]>

IN PART I of this post, I talked about the basics of projectile motion with no air resistance. Also in that post, I showed that (without air resistance) the angle to throw a ball for maximum range is 45 degrees. When throwing a football, there is some air resistance this means that 45 degree is not necessarily the angle for the greatest range. Well, can’t I just do the same thing as before? It turns out that it is a significantly different problem when air resistance is added. Without air resistance, the acceleration was constant. Not so now, my friend.

The problem is that air resistance depends on the velocity of the object. Search your feelings, you know this to be true. When you are driving (or riding) in a car and you stick your hand out the window, you can feel the air pushing against your hand. The faster the car moves, the greater this force. The air resistance force depends on:

• Velocity of the object. The typical model used for objects like a football would depend on the direction and the square of the magnitude of the velocity.
• The density of air.
• The cross sectional area of the object. Compare putting an open hand out the car window to a closed fist out the car window.
• Some air drag coefficient. Imagine a cone and a flat disk, both with the same radius (and thus same cross sectional area). These two objects would have different air resistances due to the shape, this is the coefficient of drag (also called other things I am sure).

So, since the air force depends on the velocity, it will not be a constant acceleration. Kinematic equations won’t really work. To easily solve this problem, I will use numerical methods. The basic idea in numerical calculations is to break the problem into a whole bunch of little steps. During these small steps, the velocity does not change much so that I can “pretend” like the acceleration is constant. Here is a diagram of the forces on the ball while in the air.

Before I go any further, I would like to say that there has been some “stuff” done on throwing a football before – and they probably do a better job than this post. Here are a few references (especially with more detailed discussion about the coefficient of drag for a spinning football):

And now for some assumptions:

• I hereby assume that the air resistance is proportional to the square of the magnitude of the velocity of the object.
• The orientation of the football is such that the coefficient of drag is constant. This may not actually be true. Imagine if the ball were thrown and spinning with the axis parallel to the ground. If the axis stayed parallel to the ground, for part of the motion the direction of motion would not be along the axis. Get it?
• Ignore aerodynamic lift effects.
• Mass of the ball is .42 kg.
• The density of air is 1.2 kg/m3.
• The coefficient of drag for the football is 0.05 to 0.14
• Typical initial speed of a thrown football is around 20 m/s.

And finally, here is the recipie for my numerical calculation (in vpython of course):

• Set up initial conditions
• Set the angle of the throw
• Calculate the new position assuming a constant velocity.
• Calculate the new momentum (and thus velocity) assuming a constant force.
• Calculate the force (it changes when the velocity changes)
• Increase the time.
• Keep doing the above until the ball gets back to y=0 m.
• Change the angle and do all the above again.

First, I ran the program with an initial velocity of 20 m/s. Here is the data:

At 35 degrees, this gives a distance of 23 meters (25 yards). This doesn’t seem right. I know a quarterback can throw farther than that. What if I change the coefficient to 0.05? Then the greatest angle is closer to 40 degrees and it goes 28 meters. Still seems low (think Doug Flutie). What about with no air resistance? Then it goes 41 meters (at 45 degrees). So, here is the Doug Flutie throw.

From the video, it looks like he threw the ball from the 36ish yard line to about the 2 yard line. This would be 62 yards (56.7 meters). I am going to assume a coefficient of 0.07 (randomly). So, what initial speed will get this far? If I put in an initial velocity of 33 m/s, the ball will go 55.7 meters at an angle of 35 degrees.

Really the thing that amazes me is that someone (not me) can throw a ball that far and essentially get it where they want it. Even if they are only sometimes successful, it is still amazing. How is it that humans can throw things somewhat accurately? We obviously do not do projectile motion calculations in our head – or maybe we do?

zmj 2017-09-24 13:32 发表评论
]]>

zmj 2017-09-14 16:44 发表评论
]]>

zmj 2017-09-14 16:07 发表评论
]]>

1. #include<iostream>
2. #include<vector>
3. #include<opencv2/opencv.hpp>
4.
5. using namespace cv;
6. using namespace std;
7.
8. int main()
9. {
10.     Mat srcImage(Size(600, 600), CV_8UC3, Scalar(0));
11.
12.     RNG &rng = theRNG();
13.
14.     char key;
15.     while (1)
16.     {
17.         //随机生成一些点
18.         //首先是随机生成点的L?/span>
19.         int g_nPointCount = rng.uniform(3, 30);
20.         //接下来就是随机生成一些点的坐?/span>
21.         vector<Point> points;
22.         for (int i = 0; i < g_nPointCount; i++)
23.         {
24.             Point midPoint;
25.
26.             midPoint.x = rng.uniform(srcImage.cols / 4, srcImage.cols * 3 / 4);
27.             midPoint.y = rng.uniform(srcImage.rows / 4, srcImage.rows * 3 / 4);
28.
29.             points.push_back(midPoint);
30.         }
31.
32.         //昄刚刚随机生成的那些点
33.         for (int i = 0; i < g_nPointCount; i++)
34.         {
35.             circle(srcImage, points[i], 0, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), 3);
36.         }
37.
38.         //在生成的那些随机点中L最包围圆?/span>
39.         Point2f center;
42.
43.         //Ҏ得到的圆形和半径  l制圆Ş
45.             , Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), 3);
46.
47.         imshow("【绘制结束后的图像?, srcImage);
48.
49.         key = waitKey();
50.         if (key == 27)
51.             break;
52.         else
53.             srcImage = Scalar::all(0);
54.     }
55.
56.     return 0;
57. }

սƵ2019 2017-09-14 16:05 发表评论
]]>