wheeledexperimenthelper.cpp
1 /********************************************************************************
2  * FARSA Experiments Library *
3  * Copyright (C) 2007-2012 *
4  * Gianluca Massera <emmegian@yahoo.it> *
5  * Stefano Nolfi <stefano.nolfi@istc.cnr.it> *
6  * Tomassino Ferrauto <tomassino.ferrauto@istc.cnr.it> *
7  * Onofrio Gigliotta <onofrio.gigliotta@istc.cnr.it> *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the Free Software *
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
22  ********************************************************************************/
23 
24 #include "wheeledexperimenthelper.h"
25 #include "logger.h"
26 #include "phybox.h"
27 #include "phycylinder.h"
28 #include "robots.h"
29 #include "arena.h"
30 #include "intervals.h"
31 
32 namespace farsa {
33 
34 // This anonymous namespace contains helper functions used in this file
35 namespace {
48  wVector computeWallVertex(PhyBox* wall, unsigned int vertexID)
49  {
50  const wVector centerOnPlane = wall->matrix().w_pos - wall->matrix().z_ax.scale(wall->sideZ() / 2.0);
51  const wVector halfSide1Vector = wall->matrix().x_ax.scale(wall->sideX() / 2.0);
52  const wVector halfSide2Vector = wall->matrix().y_ax.scale(wall->sideY() / 2.0);
53 
54  wVector vertex;
55  switch(vertexID % 4) {
56  case 0:
57  vertex = centerOnPlane + halfSide1Vector + halfSide2Vector;
58  break;
59  case 1:
60  vertex = centerOnPlane + halfSide1Vector - halfSide2Vector;
61  break;
62  case 2:
63  vertex = centerOnPlane - halfSide1Vector + halfSide2Vector;
64  break;
65  case 3:
66  vertex = centerOnPlane - halfSide1Vector - halfSide2Vector;
67  break;
68  default:
69  break;
70  }
71 
72  return vertex;
73  }
74 
75 
85  double getAngleWithXAxis(const wMatrix& mtr, const wVector& v)
86  {
87  FARSA_DEBUG_TEST_INVALID(mtr.x_ax.x) FARSA_DEBUG_TEST_INVALID(mtr.x_ax.y) FARSA_DEBUG_TEST_INVALID(mtr.x_ax.z)
88  FARSA_DEBUG_TEST_INVALID(v.x) FARSA_DEBUG_TEST_INVALID(v.y) FARSA_DEBUG_TEST_INVALID(v.z)
89 
90  // Normalizing v
91  const wVector vdir = v.scale(1.0 / v.norm()); FARSA_DEBUG_TEST_INVALID(vdir.x) FARSA_DEBUG_TEST_INVALID(vdir.y) FARSA_DEBUG_TEST_INVALID(vdir.z)
92 
93  // To get the angle (unsigned), computing the acos of the dot product of the two vectors. We have to
94  // constrain the cross product between -1 and 1 because sometimes it can have values outside the range
95  const double crossProduct = min(1.0, max(-1.0, mtr.x_ax % vdir)); FARSA_DEBUG_TEST_INVALID(crossProduct)
96  const double unsignedAngle = acos(crossProduct); FARSA_DEBUG_TEST_INVALID(unsignedAngle)
97 
98  // Now choosing the right sign. To do this we first compute the cross product of the x axis and
99  // the vector direction, then we see if it has the same direction of Z or not
100  const double s = mtr.z_ax % (mtr.x_ax * vdir); FARSA_DEBUG_TEST_INVALID(s)
101 
102  return (s < 0.0) ? -unsignedAngle : unsignedAngle;
103  }
104 
146  template <class SegmentColorIt>
147  void computeLinearViewFieldOccupiedRangeForCircle(const wMatrix& cameraMtr, const wMatrix& objMtr, double radius, const SegmentColorIt segmentsColorBegin, const SegmentColorIt segmentsColorEnd, QVector<PhyObject2DWrapper::AngularRangeAndColor>& rangesAndColors, double& distance, double maxDistance)
148  {
149  // Useful constant
150  const real epsilon = 0.0001f;
151 
152  // We have to translate the camera to lie on the same plane of the cylinder base. We translate it along
153  // its local upvector (Z axis) until it reaches the plane containing the base of the cylinder. Of course
154  // this only works if the camera Z axis is not paraller to the plane with the base of the cylinder. In
155  // that case all computations would be invalid, so we don't do anything
156  wMatrix mtr = cameraMtr;
157  if (fabs(mtr.z_ax % objMtr.x_ax) < epsilon) {
158  distance = -1.0;
159  return;
160  }
161  mtr.w_pos = mtr.w_pos + mtr.z_ax.scale((objMtr.w_pos.z - mtr.w_pos.z) / mtr.z_ax.z); FARSA_DEBUG_TEST_INVALID(mtr.w_pos.x) FARSA_DEBUG_TEST_INVALID(mtr.w_pos.y) FARSA_DEBUG_TEST_INVALID(mtr.w_pos.z) FARSA_DEBUG_TEST_INVALID(mtr.z_ax.x) FARSA_DEBUG_TEST_INVALID(mtr.z_ax.y) FARSA_DEBUG_TEST_INVALID(mtr.z_ax.z) FARSA_DEBUG_TEST_INVALID(objMtr.w_pos.x) FARSA_DEBUG_TEST_INVALID(objMtr.w_pos.y) FARSA_DEBUG_TEST_INVALID(objMtr.w_pos.z)
162 
163  // First of all we have to calculate the distance between the center of the camera and the center of the
164  // cylinder. Here we also check that the camera is not inside the cylinder. The vector from the camera
165  // to the center of the cylinder is computed in the object frame of reference because we need it in this
166  // frame of reference later
167  const wVector centerDir = objMtr.unrotateVector(mtr.w_pos - objMtr.w_pos);
168  const double centerDistance = centerDir.norm();
169  distance = centerDistance - radius;
170  if ((distance < 0.0) || (distance > maxDistance)) {
171  distance = -1.0;
172  return;
173  }
174 
175  // We also need to compute the angles of the two tangent points to compute the ranges for
176  // various colors. The angles are relative to the cylinder center. The first thing we need
177  // is the angle between the vector from the camera to center of the cylinder and the radius
178  // perpendicular to the tangent
179  const double halfSectorAngle = acos(radius / centerDistance); FARSA_DEBUG_TEST_INVALID(halfSectorAngle)
180  // Now computing the starting and ending angle in the cylinder frame of reference. We need
181  // the angle of the centerDir vector, then we only have to subtract halfSectorAngle to get
182  // starting angle. The end angle is easy to obtain, then
183  const real startAngleInCylinder = normalizeRad(atan2(centerDir.z, centerDir.y) - halfSectorAngle);
184  const real endAngleInCylinder = normalizeRad(startAngleInCylinder + 2.0 * halfSectorAngle);
185 
186  // Clearing the vector that will contain segments and colors. It will first contain ranges in the
187  // cylinder frame of reference, and then the angles will be converted to linear camera ranges
188  rangesAndColors.clear();
189  if ((segmentsColorBegin + 1) == segmentsColorEnd) {
190  // There is only one color, we can simply add one element to the rangesAndColors vector
191  rangesAndColors.append(PhyObject2DWrapper::AngularRangeAndColor(startAngleInCylinder, endAngleInCylinder, segmentsColorBegin->color));
192  } else {
193  // The interval modelling the visible arc, with which all other arcs are intersected. If
194  // the arc crosses -pi/pi, the interval would go from -inf to end and from start to inf,
195  // so we restrict it to -pi, pi
196  const Intervals visibleArc = Intervals(SimpleInterval(startAngleInCylinder, endAngleInCylinder)) & SimpleInterval(-PI_GRECO, PI_GRECO);
197 
198  // This could perhaps be made a bit more efficient by checking if the we already analyzed
199  // all segments in the range, but it would be necessary only if we had objects with a lot
200  // of segments
201  for (SegmentColorIt it = segmentsColorBegin; it != segmentsColorEnd; ++it) {
202  // Computing the intersection between the current segment and the visible range and adding it. We don't
203  // need to intersect the cur segment with [-pi, pi] as we did for visibleArc because the intersection
204  // with the latter will remove all values below -pi or above pi
205  const Intervals curVisibleSegmentArc = visibleArc & it->intervals;
206  if (!curVisibleSegmentArc.isEmpty()) {
207  for (Intervals::const_iterator intrvIt = curVisibleSegmentArc.begin(); intrvIt != curVisibleSegmentArc.end(); ++intrvIt) {
208  rangesAndColors.append(PhyObject2DWrapper::AngularRangeAndColor(intrvIt->start, intrvIt->end, it->color));
209  }
210  }
211  }
212  }
213 
214  // Now converting angles to linear camera ranges
215  for (int i = 0; i < rangesAndColors.size(); i++) {
216  // Getting the points corresponding to the angles in the current range in the frame of reference
217  // of the cylinder
218  const wVector startPointCylinder(0.0, radius * cos(rangesAndColors[i].minAngle), radius * sin(rangesAndColors[i].minAngle)); FARSA_DEBUG_TEST_INVALID(startPointCylinder.x) FARSA_DEBUG_TEST_INVALID(startPointCylinder.y) FARSA_DEBUG_TEST_INVALID(startPointCylinder.z)
219  const wVector endPointCylinder(0.0, radius * cos(rangesAndColors[i].maxAngle), radius * sin(rangesAndColors[i].maxAngle)); FARSA_DEBUG_TEST_INVALID(endPointCylinder.x) FARSA_DEBUG_TEST_INVALID(endPointCylinder.y) FARSA_DEBUG_TEST_INVALID(endPointCylinder.z)
220 
221  // Now computing the points in the global frame of reference
222  const wVector startPoint = objMtr.transformVector(startPointCylinder); FARSA_DEBUG_TEST_INVALID(startPoint.x) FARSA_DEBUG_TEST_INVALID(startPoint.y) FARSA_DEBUG_TEST_INVALID(startPoint.z)
223  const wVector endPoint = objMtr.transformVector(endPointCylinder); FARSA_DEBUG_TEST_INVALID(endPoint.x) FARSA_DEBUG_TEST_INVALID(endPoint.y) FARSA_DEBUG_TEST_INVALID(endPoint.z)
224 
225  // Now computing the angles in the linear camera. As we don't know which is the start angle and which
226  // the end angle, we rely on the fact that for cylinders the camera cannot see a portion greater than
227  // PI_GRECO. We also check that the vectors to start and end point are not zero; if they are the angle is
228  // computed with respect to the center of the other cylinder
229  const wVector firstAngleVector = startPoint - mtr.w_pos;
230  const double firstAngleCamera = (fabs(firstAngleVector.norm()) < epsilon) ? getAngleWithXAxis(mtr, objMtr.w_pos - mtr.w_pos) : getAngleWithXAxis(mtr, firstAngleVector); FARSA_DEBUG_TEST_INVALID(firstAngleCamera)
231  const wVector secondAngleVector = endPoint - mtr.w_pos;
232  const double secondAngleCamera = (fabs(secondAngleVector.norm()) < epsilon) ? getAngleWithXAxis(mtr, objMtr.w_pos - mtr.w_pos) : getAngleWithXAxis(mtr, secondAngleVector); FARSA_DEBUG_TEST_INVALID(secondAngleCamera)
233  if (firstAngleCamera > secondAngleCamera) {
234  // Checking if they are on different sides of the -PI_GRECO/PI_GRECO boundary or on the same side. Here we exploit
235  // the fact that the camera cannot see more than PI_GRECO of the cylinder
236  if ((firstAngleCamera > (PI_GRECO / 2.0)) && (secondAngleCamera < (-PI_GRECO / 2.0))) {
237  rangesAndColors[i].minAngle = firstAngleCamera;
238  rangesAndColors[i].maxAngle = secondAngleCamera;
239  } else {
240  rangesAndColors[i].minAngle = secondAngleCamera;
241  rangesAndColors[i].maxAngle = firstAngleCamera;
242  }
243  } else {
244  // Checking if they are on different sides of the -PI_GRECO/PI_GRECO boundary or on the same side. Here we exploit
245  // the fact that the camera cannot see more than PI_GRECO of the cylinder
246  if ((firstAngleCamera < (-PI_GRECO / 2.0)) && (secondAngleCamera > (PI_GRECO / 2.0))) {
247  rangesAndColors[i].minAngle = secondAngleCamera;
248  rangesAndColors[i].maxAngle = firstAngleCamera;
249  } else {
250  rangesAndColors[i].minAngle = firstAngleCamera;
251  rangesAndColors[i].maxAngle = secondAngleCamera;
252  }
253  }
254  }
255  }
256 // Old implementation of the function above, kept for a while for reference
257 // void computeLinearViewFieldOccupiedRangeForCircle(const wMatrix& cameraMtr, const wMatrix& objMtr, double radius, const QList<PhyCylinder::SegmentColor>& segmentsColor, QVector<PhyObject2DWrapper::AngularRangeAndColor>& rangesAndColors, double& distance, double maxDistance)
258 // {
259 // // We have to translate the camera to lie on the same plane of the cylinder base. We translate it along
260 // // its local upvector (Z axis) until it reaches the plane containing the base of the cylinder. Of course
261 // // this only works if the camera Z axis is not paraller to the plane with the base of the cylinder. In
262 // // that case all computations would be invalid, so we don't do anything
263 // wMatrix mtr = cameraMtr;
264 // if (fabs(mtr.z_ax % objMtr.x_ax) < 0.0001) {
265 // distance = -1.0;
266 // return;
267 // }
268 // mtr.w_pos = mtr.w_pos + mtr.z_ax.scale((objMtr.w_pos.z - mtr.w_pos.z) / mtr.z_ax.z);
269 //
270 // // First of all we have to calculate the distance between the center of the camera and the center of the
271 // // cylinder. Here we also check that the camera is not inside the cylinder. The vector from the camera
272 // // to the center of the cylinder is computed in the object frame of reference because we need it in this
273 // // frame of reference later
274 // const wVector centerDir = objMtr.unrotateVector(mtr.w_pos - objMtr.w_pos);
275 // const double centerDistance = centerDir.norm();
276 // distance = centerDistance - radius;
277 // if ((distance < 0.0) || (distance > maxDistance)) {
278 // distance = -1.0;
279 // return;
280 // }
281 //
282 // // We also need to compute the angles of the two tangent points to compute the ranges for
283 // // various colors. The angles are relative to the cylinder center. The first thing we need
284 // // is the angle between the vector from the camera to center of the cylinder and the radius
285 // // perpendicular to the tangent
286 // const double halfSectorAngle = acos(radius / centerDistance);
287 // // Now computing the starting and ending angle in the cylinder frame of reference. We need
288 // // the angle of the centerDir vector, then we only have to subtract halfSectorAngle to get
289 // // starting angle. The end angle is easy to obtain, then
290 // const real startAngleInCylinder = normalizeRad(atan2(centerDir.z, centerDir.y) - halfSectorAngle);
291 // const real endAngleInCylinder = normalizeRad(startAngleInCylinder + 2.0 * halfSectorAngle);
292 //
293 // // Clearing the vector that will contain segments and colors. It will first contain ranges in the
294 // // cylinder frame of reference, and then the angles will be converted to linear camera ranges
295 // rangesAndColors.clear();
296 // if (segmentsColor.size() == 1) {
297 // // There is only one color, we can simply add one element to the rangesAndColors vector
298 // rangesAndColors.append(PhyObject2DWrapper::AngularRangeAndColor(startAngleInCylinder, endAngleInCylinder, segmentsColor[0].color));
299 // } else {
300 // // The interval modelling the visible arc, with which all other arcs are intersected. If
301 // // the arc crosses -pi/pi, the interval would go from -inf to end and from start to inf,
302 // // so we restrict it to -pi, pi
303 // const Intervals visibleArc = Intervals(SimpleInterval(startAngleInCylinder, endAngleInCylinder)) & SimpleInterval(-PI_GRECO, PI_GRECO);
304 //
305 // // This could perhaps be made a bit more efficient by checking if the we already analyzed
306 // // all segments in the range, but it would be necessary only if we had objects with a lot
307 // // of segments
308 // for (int i = 0; i < segmentsColor.size(); i++) {
309 // // Computing the intersection between the current segment and the visible range and adding it. We don't
310 // // need to intersect the cur segment with [-pi, pi] as we did for visibleArc because the intersection
311 // // with the latter will remove all values below -pi or above pi
312 // const Intervals curVisibleSegmentArc = visibleArc & segmentsColor[i].intervals;
313 // if (!curVisibleSegmentArc.isEmpty()) {
314 // for (Intervals::const_iterator it = curVisibleSegmentArc.begin(); it != curVisibleSegmentArc.end(); ++it) {
315 // rangesAndColors.append(PhyObject2DWrapper::AngularRangeAndColor(it->start, it->end, segmentsColor[i].color));
316 // }
317 // }
318 // }
319 // }
320 //
321 // // Now converting angles to linear camera ranges
322 // for (int i = 0; i < rangesAndColors.size(); i++) {
323 // // Getting the points corresponding to the angles in the current range in the frame of reference
324 // // of the cylinder
325 // const wVector startPointCylinder(0.0, radius * cos(rangesAndColors[i].minAngle), radius * sin(rangesAndColors[i].minAngle));
326 // const wVector endPointCylinder(0.0, radius * cos(rangesAndColors[i].maxAngle), radius * sin(rangesAndColors[i].maxAngle));
327 //
328 // // Now computing the points in the global frame of reference
329 // const wVector startPoint = objMtr.transformVector(startPointCylinder);
330 // const wVector endPoint = objMtr.transformVector(endPointCylinder);
331 //
332 // // Now computing the angles in the linear camera. As we don't know which is the start angle and which the end angle, we
333 // // rely on the fact that for cylinders the camera cannot see a portion greater than PI_GRECO
334 // const double firstAngleCamera = getAngleWithXAxis(mtr, startPoint - mtr.w_pos);
335 // const double secondAngleCamera = getAngleWithXAxis(mtr, endPoint - mtr.w_pos);
336 // if (firstAngleCamera > secondAngleCamera) {
337 // // Checking if they are on different sides of the -PI_GRECO/PI_GRECO boundary or on the same side. Here we exploit
338 // // the fact that the camera cannot see more than PI_GRECO of the cylinder
339 // if ((firstAngleCamera > (PI_GRECO / 2.0)) && (secondAngleCamera < (-PI_GRECO / 2.0))) {
340 // rangesAndColors[i].minAngle = firstAngleCamera;
341 // rangesAndColors[i].maxAngle = secondAngleCamera;
342 // } else {
343 // rangesAndColors[i].minAngle = secondAngleCamera;
344 // rangesAndColors[i].maxAngle = firstAngleCamera;
345 // }
346 // } else {
347 // // Checking if they are on different sides of the -PI_GRECO/PI_GRECO boundary or on the same side. Here we exploit
348 // // the fact that the camera cannot see more than PI_GRECO of the cylinder
349 // if ((firstAngleCamera < (-PI_GRECO / 2.0)) && (secondAngleCamera > (PI_GRECO / 2.0))) {
350 // rangesAndColors[i].minAngle = secondAngleCamera;
351 // rangesAndColors[i].maxAngle = firstAngleCamera;
352 // } else {
353 // rangesAndColors[i].minAngle = firstAngleCamera;
354 // rangesAndColors[i].maxAngle = secondAngleCamera;
355 // }
356 // }
357 // }
358 // }
359 
378  void computeDistanceAndOrientationFromRobotToCylinder(wVector robotPosition, double robotOrientation, double robotRadius, double radius, wVector position, double& distance, double& angle)
379  {
380  // Setting to 0.0 the z coordinate of positions
381  robotPosition.z = 0.0;
382  position.z = 0.0;
383 
384  // Computing the distance. We have to remove both the robot radius and the object radius
385  distance = (position - robotPosition).norm() - robotRadius - radius;
386 
387  // Now computing the angle between the robot and the object
388  angle = atan2(position.y - robotPosition.y, position.x - robotPosition.x) - robotOrientation;
389  }
390 }
391 
393  m_arena(arena),
394  m_previousMatrix()
395 {
396  // Nothing to do here
397 }
398 
400 {
401  // Nothing to do here
402 }
403 
405 {
406  return phyObject();
407 }
408 
410 {
411  return phyObject();
412 }
413 
415 {
416  if (phyObject() != NULL) {
417  phyObject()->setStatic(s);
418  }
419 }
420 
422 {
423  if (phyObject() != NULL) {
424  return phyObject()->getStatic();
425  } else {
426  return true;
427  }
428 }
429 
431 {
432  if (phyObject() != NULL) {
433  phyObject()->setKinematic(b, c);
434  }
435 }
436 
438 {
439  if (phyObject() != NULL) {
440  return phyObject()->getKinematic();
441  } else {
442  return true;
443  }
444 }
445 
447 {
448  setPosition(pos.x, pos.y);
449 }
450 
452 {
453  return wObject()->matrix().w_pos;
454 }
455 
456 void PhyObject2DWrapper::setTexture(QString textureName)
457 {
458  wObject()->setTexture(textureName);
459 }
460 
462 {
463  return wObject()->texture();
464 }
465 
467 {
468  wObject()->setColor(color);
469 }
470 
472 {
473  return wObject()->color();
474 }
475 
477 {
479 }
480 
482 {
483  return wObject()->useColorTextureOfOwner();
484 }
485 
487  PhyObject2DWrapper(arena),
488  m_box(box),
489  m_vertexes(QVector<wVector>() << computeWallVertex(m_box, 0) << computeWallVertex(m_box, 1) << computeWallVertex(m_box, 2) << computeWallVertex(m_box, 3)),
490  m_centerOnPlane(m_box->matrix().w_pos - m_box->matrix().z_ax.scale(m_box->sideZ() / 2.0)),
491  m_type(((type != Plane) && (type != Wall) && (type != RectangularTargetArea)) ? Box : type)
492 {
493  if (m_type == RectangularTargetArea) {
494  m_box->setKinematic(true, false);
495  } else if (m_type != Box) {
496  m_box->setStatic(true);
497  }
498 }
499 
501 {
502  // Nothing to do here
503 }
504 
506 {
507  return m_box;
508 }
509 
511 {
512  return m_box;
513 }
514 
516 {
517  return m_type;
518 }
519 
521 {
522  // Only Boxes can be made non-static
523  if (m_type != Box) {
524  return;
525  }
526 
527  phyObject()->setStatic(s);
528 }
529 
530 void Box2DWrapper::setKinematic(bool b, bool c)
531 {
532  if (m_type != RectangularTargetArea) {
534  }
535 }
536 
538 {
539  // Planes and Walls cannot be moved
540  if ((m_type == Plane) || (m_type == Wall)) {
541  return;
542  }
543 
544  wVector pos = phyObject()->matrix().w_pos;
545 
546  pos.x = x;
547  pos.y = y;
548 
549  phyObject()->setPosition(pos);
550 
551  // We also have to recompute the vertexes and center
552  for (unsigned int i = 0; i < 4; i++) {
553  m_vertexes[i] = computeWallVertex(m_box, i);
554  }
555  m_centerOnPlane = m_box->matrix().w_pos - m_box->matrix().z_ax.scale(m_box->sideZ() / 2.0);
556 }
557 
558 void Box2DWrapper::computeLinearViewFieldOccupiedRange(const wMatrix& cameraMtr, QVector<AngularRangeAndColor>& rangesAndColors, double& distance, double maxDistance) const
559 {
560  // Here for the moment we only have one color, so we can compute one range
561 
562  // If this is a Plane or a RectangularTargetArea, we simply return a negative distance (they are not visible with
563  // a linear camera)
564  if ((m_type == Plane) || (m_type == RectangularTargetArea)) {
565  distance = -1.0;
566  return;
567  }
568 
569  // We have to translate the camera to lie on the same plane of the vertex. We translate it along
570  // its local upvector (Z axis) until it reaches the plane containing the base of the wall. Of course
571  // this only works if the camera Z axis is not paraller to the plane with the base of the wall. In
572  // that case all computations would be invalid, so we don't do anything
573  wMatrix mtr = cameraMtr;
574  if (fabs(mtr.z_ax % m_box->matrix().z_ax) < 0.0001) {
575  distance = -1.0;
576  return;
577  }
578  mtr.w_pos = mtr.w_pos + mtr.z_ax.scale((m_centerOnPlane.z - mtr.w_pos.z) / mtr.z_ax.z);
579 
580  // First of all computing the angle for every vertex
581  QVector<double> angles(4);
582 
583  for (int i = 0; i < 4; i++) {
584  // Computing the vector giving the direction to the vertex
585  const wVector vdir = m_vertexes[i] - mtr.w_pos;
586 
587  // Now computing the angle
588  angles[i] = getAngleWithXAxis(mtr, vdir);
589  }
590 
591  // Now finding the min and max angle (their indexes). We have to take into account the fact that the
592  // angle with the minimum value could be the upper limit and viceversa because the object could be
593  // behind the camera. However we know that, as the camera is outside the wall, the maximum possible
594  // angular sector of the view filed occupied by the wall is 180°. This means that also the angular
595  // distance of one vertex with the center of the wall must be less than 180°. So, if we compute this
596  // distance and get a value greater than 180°, we have to take (360° - computed_angular_distance)
597  // and invert min with max.
598  const wVector centerDir = m_centerOnPlane - mtr.w_pos;
599  const double centerAngle = getAngleWithXAxis(mtr, centerDir);
600  int minAngleID = 0;
601  int maxAngleID = 0;
602 
603  // These two are the angular distances of the current min and max angles from the center. Their initial
604  // value is the lowest possible
605  double minDelta = 0.0;
606  double maxDelta = 0.0;
607 
608  for (int i = 0; i < 4; i++) {
609  const double curDelta = fabs(angles[i] - centerAngle);
610 
611  // Checking if the vertex and the center are behind the camera
612  if (curDelta > PI_GRECO) {
613  const double actualDelta = (2.0 * PI_GRECO) - curDelta;
614  if (angles[i] > centerAngle) {
615  // This is a candidate minimum angle
616  if (actualDelta > minDelta) {
617  minAngleID = i;
618  minDelta = actualDelta;
619  }
620  } else {
621  // This is a candidate maximum angle
622  if (actualDelta > maxDelta) {
623  maxAngleID = i;
624  maxDelta = actualDelta;
625  }
626  }
627  } else {
628  if (angles[i] < centerAngle) {
629  // This is a candidate minimum angle
630  if (curDelta > minDelta) {
631  minAngleID = i;
632  minDelta = curDelta;
633  }
634  } else {
635  // This is a candidate maximum angle
636  if (curDelta > maxDelta) {
637  maxAngleID = i;
638  maxDelta = curDelta;
639  }
640  }
641  }
642  }
643 
644  // Filling the minAngle and maxAngle parameters
645  rangesAndColors.clear();
646  rangesAndColors.append(AngularRangeAndColor(angles[minAngleID], angles[maxAngleID], color()));
647 
648 #if defined(__GNUC__) && defined(DEVELOPER_WARNINGS)
649  #warning QUESTO MODO DI CALCOLARE LA DISTANZA È SBAGLIATO (E NON NE CAPISCO IL SENSO), MA È QUELLO USATO IN EVOROBOT, QUINDI PER IL MOMENTO LO USO (ANCHE PERCHÉ USARE LA DISTANZA PER L OCCLUSIONE NON VA BENE COMUNQUE)
650 #endif
651  // Now computing distance. This way of calculating the distance is plainly wrong (and I can't
652  // see why it is written this way), but it is the method used by Evorobot, so for the moment
653  // using it (moreover using distance for occlusion is not correct)
654  distance = ((mtr.w_pos - m_vertexes[minAngleID]).norm() + (mtr.w_pos - m_vertexes[maxAngleID]).norm()) / 2.0;
655 
656  // Checking that the distance is less than the maximum one
657  if (distance > maxDistance) {
658  distance = -1.0;
659  }
660 }
661 
662 bool Box2DWrapper::computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper& robot, double& distance, double& angle) const
663 {
664 #if defined(__GNUC__) && defined(DEVELOPER_WARNINGS)
665  #warning I CALCOLI PER DISTANZA E ORIENTAMENTO IN EVOROBOT SONO STRANI, QUI HO CERCATO DI FARE QUALCOSA CHE MI SEMBRI SENSATO...
666 #endif
667  // Only doing computations for walls and boxes
668  if ((m_type != Wall) && (m_type != Box)) {
669  return false;
670  }
671 
672  // Taking the robot position and setting z to lie on the same plane of the vertexes
673  const wVector robotPosition(robot.position().x, robot.position().y, m_vertexes[0].z);
674 
675  // Now computing the robot position in the box frame of reference
676  const wVector relRobotPosition = m_box->matrix().untransformVector(robotPosition);
677 
678  // Now we can find the point in the rectangle that is nearest to the robot position. As we work in the box
679  // frame of reference, the vertex are easy to compute. They are (discarding z):
680  // (+m_box->sideX() / 2.0, +m_box->sideY() / 2.0)
681  // (+m_box->sideX() / 2.0, -m_box->sideY() / 2.0)
682  // (-m_box->sideX() / 2.0, +m_box->sideY() / 2.0)
683  // (-m_box->sideX() / 2.0, -m_box->sideY() / 2.0)
684  // Finding the nearest point is just a matter of separately computing x and y.
685  real nearestX;
686  if (relRobotPosition.x < -m_box->sideX() / 2.0) {
687  nearestX = -m_box->sideX() / 2.0;
688  } else if (relRobotPosition.x > +m_box->sideX() / 2.0) {
689  nearestX = +m_box->sideX() / 2.0;
690  } else {
691  nearestX = relRobotPosition.x;
692  }
693  real nearestY;
694  if (relRobotPosition.y < -m_box->sideY() / 2.0) {
695  nearestY = -m_box->sideY() / 2.0;
696  } else if (relRobotPosition.y > +m_box->sideY() / 2.0) {
697  nearestY = +m_box->sideY() / 2.0;
698  } else {
699  nearestY = relRobotPosition.y;
700  }
701 
702  // Although distance is independent of the frame of reference, we convert the nearest point to the global frame
703  // of reference because we only have the robot orientation in that frame
704  const wVector nearestPoint = m_box->matrix().transformVector(wVector(nearestX, nearestY, relRobotPosition.z));
705 
706  // Now we can easily compute the distance and orientation. For the distance we have to remove the robot radius
707  distance = (nearestPoint - robotPosition).norm() - robot.getRadius();
708  const real robotOrientation = (dynamic_cast<const RobotOnPlane*>(robot.wObject()))->orientation(m_arena->getPlane());
709  angle = atan2(nearestPoint.y - robotPosition.y, nearestPoint.x - robotPosition.x) - robotOrientation;
710 
711  return true;
712 }
713 
715  PhyObject2DWrapper(arena),
716  m_cylinder(cylinder),
717  m_type(((type != SmallCylinder) && (type != BigCylinder) && (type != CircularTargetArea)) ? Cylinder : type)
718 {
719  if (m_type == CircularTargetArea) {
720  m_cylinder->setKinematic(true, false);
721  }
722 }
723 
725 {
726  // Nothing to do here
727 }
728 
730 {
731  return m_cylinder;
732 }
733 
735 {
736  return m_cylinder;
737 }
738 
740 {
741  // CircularTargetArea cannot be made non-static
742  if (m_type == CircularTargetArea) {
743  return;
744  }
745 
746  phyObject()->setStatic(s);
747 }
748 
750 {
751  // CircularTargetArea cannot be made non-kinematic
752  if (m_type == CircularTargetArea) {
753  return;
754  }
755 
756  phyObject()->setKinematic(b, c);
757 }
758 
760 {
761  return m_type;
762 }
763 
765 {
766  wVector pos = phyObject()->matrix().w_pos;
767 
768  pos.x = x;
769  pos.y = y;
770 
771  phyObject()->setPosition(pos);
772 }
773 
774 void Cylinder2DWrapper::computeLinearViewFieldOccupiedRange(const wMatrix& cameraMtr, QVector<AngularRangeAndColor>& rangesAndColors, double& distance, double maxDistance) const
775 {
776  // If this is a CircularTargetArea, we simply return a negative distance (it is not visible with
777  // a linear camera)
778  if (m_type == CircularTargetArea) {
779  distance = -1.0;
780  return;
781  }
782 
783  // Getting the vector with cylinder colors
784  computeLinearViewFieldOccupiedRangeForCircle(cameraMtr, m_cylinder->matrix(), m_cylinder->radius(), m_cylinder->segmentsColor().constBegin(), m_cylinder->segmentsColor().constEnd(), rangesAndColors, distance, maxDistance);
785 }
786 
787 bool Cylinder2DWrapper::computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper& robot, double& distance, double& angle) const
788 {
789  // If this is a CircularTargetArea, we simply return a negative distance
790  if (m_type == CircularTargetArea) {
791  return false;
792  }
793 
794  const double robotOrientation = (dynamic_cast<const RobotOnPlane*>(robot.wObject()))->orientation(m_arena->getPlane());
795  computeDistanceAndOrientationFromRobotToCylinder(robot.position(), robotOrientation, robot.getRadius(), m_cylinder->radius(), m_cylinder->matrix().w_pos, distance, angle);
796 
797  return true;
798 }
799 
800 
801 Sphere2DWrapper::Sphere2DWrapper(Arena* arena, PhySphere* sphere, Type /*type*/) :
802  PhyObject2DWrapper(arena),
803  m_sphere(sphere),
804  m_type(LightBulb) // For the moment type can only be LightBulb
805 {
806  m_sphere->setKinematic(true);
807 }
808 
810 {
811  // Nothing to do here
812 }
813 
815 {
816  return m_sphere;
817 }
818 
820 {
821  return m_sphere;
822 }
823 
825 {
826  // The sphere is always static, this does nothing
827 }
828 
829 void Sphere2DWrapper::setKinematic(bool /*b*/, bool /*c*/)
830 {
831  // The sphere is always kinematic, this does nothing
832 }
833 
835 {
836  return m_type;
837 }
838 
840 {
841  wVector pos = phyObject()->matrix().w_pos;
842 
843  pos.x = x;
844  pos.y = y;
845 
846  phyObject()->setPosition(pos);
847 }
848 
849 void Sphere2DWrapper::computeLinearViewFieldOccupiedRange(const wMatrix& /*cameraMtr*/, QVector<AngularRangeAndColor>& /*rangesAndColors*/, double& distance, double /*maxDistance*/) const
850 {
851  // Light bulbs are not seen by the linear camera
852  distance = -1.0;
853  return;
854 }
855 
856 bool Sphere2DWrapper::computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper& robot, double& distance, double& angle) const
857 {
858  wVector robotPosition = robot.position();
859  robotPosition.z = 0.0;
860  wVector position = m_sphere->matrix().w_pos;
861  position.z = 0.0;
862 
863  distance = (position - robotPosition).norm() - robot.getRadius() - m_sphere->radius();
864 
865  const double robotOrientation = (dynamic_cast<const RobotOnPlane*>(robot.wObject()))->orientation(m_arena->getPlane());
866  angle = atan2(position.y - robotPosition.y, position.x - robotPosition.x) - robotOrientation;
867 
868  return true;
869 }
870 
871 WheeledRobot2DWrapper::WheeledRobot2DWrapper(Arena* arena, RobotOnPlane* robot, double height, double radius) :
872  PhyObject2DWrapper(arena),
873  m_robot(robot),
874  m_height(height),
875  m_radius(radius)
876 {
877 }
878 
880 {
881 }
882 
884 {
885  return m_robot;
886 }
887 
889 {
890  return m_robot;
891 }
892 
894 {
895  return dynamic_cast<WObject*>(m_robot);
896 }
897 
899 {
900  return dynamic_cast<const WObject*>(m_robot);
901 }
902 
904 {
905  return NULL;
906 }
907 
909 {
910  return NULL;
911 }
912 
914 {
915  return WheeledRobot;
916 }
917 
919 {
920  wVector pos = wObject()->matrix().w_pos;
921 
922  pos.x = x;
923  pos.y = y;
924 
925  wObject()->setPosition(pos);
926 }
927 
928 void WheeledRobot2DWrapper::computeLinearViewFieldOccupiedRange(const wMatrix& cameraMtr, QVector<AngularRangeAndColor>& rangesAndColors, double& distance, double maxDistance) const
929 {
930  const wMatrix mtr = wObject()->matrix().rotateAround(wObject()->matrix().y_ax, wObject()->matrix().w_pos, -PI_GRECO / 2.0);
931 
932  // Getting the vector with cylinder colors. For the moment only the MarXbot can have multiple colors
933 #if defined(__GNUC__) && defined(DEVELOPER_WARNINGS)
934  #warning QUESTA ROBA È BRUTTA, IL CODICE PER LE SIMULAZIONI CON I WHEELED SI STA INGARBUGLIANDO...
935 #endif
936  PhyMarXbot* marxbot = dynamic_cast<PhyMarXbot*>(m_robot);
937  if (marxbot == NULL) {
938  PhyEpuck* epuck = dynamic_cast<PhyEpuck*>(m_robot);
939 
940  if (epuck == NULL) {
941  // This cast should never fail!
942  WObject* r = dynamic_cast<WObject*>(m_robot);
943  PhyCylinder::SegmentColor s(SimpleInterval(-PI_GRECO, PI_GRECO), r->color());
944  computeLinearViewFieldOccupiedRangeForCircle(cameraMtr, mtr, m_radius, &s, (&s + 1), rangesAndColors, distance, maxDistance);
945  } else {
946  computeLinearViewFieldOccupiedRangeForCircle(cameraMtr, mtr, m_radius, epuck->segmentsColor().constBegin(), epuck->segmentsColor().constEnd(), rangesAndColors, distance, maxDistance);
947  }
948  } else {
949  computeLinearViewFieldOccupiedRangeForCircle(cameraMtr, mtr, m_radius, marxbot->segmentsColor().constBegin(), marxbot->segmentsColor().constEnd(), rangesAndColors, distance, maxDistance);
950  }
951 }
952 
953 bool WheeledRobot2DWrapper::computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper& robot, double& distance, double& angle) const
954 {
955  if (this == &robot) {
956  return false;
957  }
958 
959  const double robotOrientation = (dynamic_cast<const RobotOnPlane*>(robot.wObject()))->orientation(m_arena->getPlane());
960  computeDistanceAndOrientationFromRobotToCylinder(robot.position(), robotOrientation, robot.getRadius(), m_radius, position(), distance, angle);
961 
962  return true;
963 }
964 
965 wVector positionOnPlane(const Box2DWrapper* plane, real x, real y)
966 {
967  wVector pos = plane->position();
968 
969  pos.x = x;
970  pos.y = y;
971  pos.z += plane->phyObject()->sideZ() / 2.0f;
972 
973  return pos;
974 }
975 
976 void orientationOnPlane(const Box2DWrapper* plane, real angle, wMatrix& mtr)
977 {
978  wMatrix rotatedMtr = plane->phyObject()->matrix();
979 
980  // Now rotating the matrix around the Z axis
981  rotatedMtr = rotatedMtr.rotateAround(rotatedMtr.z_ax, rotatedMtr.w_pos, angle);
982 
983  // Setting the position of the rotated matrix to be the same as the original one
984  rotatedMtr.w_pos = mtr.w_pos;
985 
986  // Now overwriting the matrix
987  mtr = rotatedMtr;
988 }
989 
990 real angleBetweenXAxes(const wMatrix& mtr1, const wMatrix& mtr2)
991 {
992  // Taking the two x axes. We can take the x axis of mtr1 as is, while we need to project the X axis
993  // of mtr2 onto the XY plane of mtr1. To do so we simply project the x axis of mtr2 on the z axis of
994  // mtr1 and subtract this vector from the original x axis of mtr2
995  const wVector& x1 = mtr1.x_ax;
996  const wVector x2 = mtr2.x_ax - mtr1.z_ax.scale(mtr2.x_ax % mtr1.z_ax);
997 
998  // Now normalizing both axes
999  const wVector normX1 = x1.scale(1.0 / x1.norm());
1000  const wVector normX2 = x2.scale(1.0 / x2.norm());
1001 
1002  // To get the angle (unsigned), computing the acos of the dot product of the two vectors
1003  const double unsignedAngle = acos(normX1 % normX2);
1004 
1005  // Now choosing the right sign. To do this we first compute the cross product of the two x axes and
1006  // then we see if it has the same direction of the z axis of the first matrix or not
1007  const double s = mtr1.z_ax % (normX1 * normX2);
1008  return (s < 0.0) ? -unsignedAngle : unsignedAngle;
1009 }
1010 
1011 } // end namespace farsa
virtual bool computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper &robot, double &distance, double &angle) const
Computes the distance and orientation of this object respect to the given robot.
virtual PhySphere * phyObject()
Returns a pointer to the wrapped PhyObject.
bool getKinematic() const
virtual Type type() const
Returns the type of this wrapper object.
void setKinematic(bool b, bool c=false)
void setUseColorTextureOfOwner(bool b)
Sets whether this will be rendered with the color and texture of our owner (if we have one) ...
Cylinder2DWrapper(Arena *arena, PhyCylinder *cylinder, Type type)
Constructor.
FARSA_UTIL_TEMPLATE real normalizeRad(real x)
QColor color() const
Returns the color to use when rendering this.
void setUseColorTextureOfOwner(bool b)
The subclass of PhyObject2DWrapper wrapping a box.
double getRadius() const
Returns the radius of the robot.
void setTexture(QString textureName)
Set the texture to use when rendering this.
const QList< PhyCylinder::SegmentColor > & segmentsColor() const
The subclass of PhyObject2DWrapper wrapping a wheeled robot.
QColor color() const
real sideZ() const
The base class for robots that move on a plane.
Definition: robots.h:108
WheeledRobot2DWrapper(Arena *arena, RobotOnPlane *robot, double height, double radius)
Constructor.
virtual Type type() const
Returns the type of this wrapper object.
bool getStatic() const
virtual bool computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper &robot, double &distance, double &angle) const
Computes the distance and orientation of this object respect to the given robot.
virtual PhyCylinder * phyObject()
Returns a pointer to the wrapped PhyObject.
virtual void setStatic(bool s)
Sets whether the object is static or not.
virtual void setStatic(bool s)
Sets whether the object is static or not.
Box2DWrapper * getPlane()
Returns the plane of the arena.
Definition: arena.cpp:226
void setTexture(QString textureName)
virtual void computeLinearViewFieldOccupiedRange(const wMatrix &cameraMtr, QVector< AngularRangeAndColor > &rangesAndColors, double &distance, double maxDistance) const
Computes the portion of a linear the view field occupied by this object.
virtual WObject * wObject()
Returns a pointer to the wrapped object.
FARSA_UTIL_TEMPLATE const T max(const T &t1, const U &t2)
wVector position() const
Returns the position of the object.
The class modelling an arena.
Definition: arena.h:50
virtual bool computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper &robot, double &distance, double &angle) const
Computes the distance and orientation of this object respect to the given robot.
void setColor(QColor c)
real radius() const
const QList< PhyCylinder::SegmentColor > & segmentsColor() const
virtual void setPosition(real x, real y)
Sets the position of the object in the plane.
bool useColorTextureOfOwner() const
Returns whether this will be rendered with the color and texture of the owner (if we have one) ...
virtual PhyObject * phyObject()
Returns a pointer to the wrapped PhyObject.
real sideX() const
bool getStatic() const
Returns true if the object is static.
virtual bool computeDistanceAndOrientationFromRobot(const WheeledRobot2DWrapper &robot, double &distance, double &angle) const
Computes the distance and orientation of this object respect to the given robot.
virtual void computeLinearViewFieldOccupiedRange(const wMatrix &cameraMtr, QVector< AngularRangeAndColor > &rangesAndColors, double &distance, double maxDistance) const
Computes the portion of a linear the view field occupied by this object.
const wMatrix & matrix() const
real radius() const
virtual Type type() const
Returns the type of this wrapped object.
virtual void setKinematic(bool b, bool c=false)
Sets whether the object is kinematic or not.
PhyObject2DWrapper(Arena *arena)
Constructor.
virtual void computeLinearViewFieldOccupiedRange(const wMatrix &cameraMtr, QVector< AngularRangeAndColor > &rangesAndColors, double &distance, double maxDistance) const
Computes the portion of a linear the view field occupied by this object.
A class wrapping a PhyObject to add methods suitable for wheeled robots simulations.
void setPosition(const wVector &newpos)
virtual void setPosition(real x, real y)
Sets the position of the object in the plane.
real sideY() const
bool useColorTextureOfOwner() const
wMatrix rotateAround(const wVector &axis, const wVector &centre, real angle) const
The structure with angular range and color used by the computeLinearViewFieldOccupiedRange() function...
virtual WObject * wObject()
Returns a pointer to the wrapped object.
virtual void setStatic(bool s)
Sets whether the object is static or not.
wVector untransformVector(const wVector &v) const
const QList< SegmentColor > & segmentsColor() const
void setColor(QColor color)
Sets the color to use when rendering this.
void setStatic(bool b)
virtual void setStatic(bool s)
Sets whether the object is static or not.
float real
virtual void setKinematic(bool b, bool c=false)
Sets whether the object is kinematic or not.
Type
The possible type of wrapped objects.
QString texture() const
QLinkedList< SimpleInterval >::const_iterator const_iterator
virtual void computeLinearViewFieldOccupiedRange(const wMatrix &cameraMtr, QVector< AngularRangeAndColor > &rangesAndColors, double &distance, double maxDistance) const
Computes the portion of a linear the view field occupied by this object.
QString texture() const
Returns the name of the texture.
RobotOnPlane * robotOnPlane()
Returns a pointer to the wrapped object as a RobotOnPlane.
virtual void setKinematic(bool b, bool c=false)
Sets whether the object is kinematic or not.
virtual void setKinematic(bool b, bool c=false)
Sets whether the object is kinematic or not.
FARSA_UTIL_TEMPLATE const T min(const T &t1, const U &t2)
bool getKinematic() const
Returns true if the object has kinematic behaviour.
virtual PhyObject * phyObject()=0
Returns a pointer to the wrapped PhyObject.
Arena *const m_arena
The pointer to the arena in which this object lives.
void setPosition(wVector pos)
Sets the position of the object in the plane.
virtual Type type() const
Returns the type of this wrapper object.
Sphere2DWrapper(Arena *arena, PhySphere *sphere, Type type)
Constructor.
Box2DWrapper(Arena *arena, PhyBox *box, Type type)
Constructor.
wVector transformVector(const wVector &v) const
virtual PhyBox * phyObject()
Returns a pointer to the wrapped PhyObject.
virtual ~PhyObject2DWrapper()
Destructor.
virtual void setPosition(real x, real y)
Sets the position of the object in the plane.
virtual void setPosition(real x, real y)
Sets the position of the object in the plane.