Return to Main Page
GeSHi © 2004, Nigel McNie.
  1. /*
  2.  * Copyright (c) 2006 Thomas Stout
  3.  * All rights reserved.
  4.  */
  5.  
  6. /**
  7.  * @author Thomas Stout <tstout@mtu.edu>
  8.  */
  9.  
  10. #define MAX_REQS 2 //10
  11. #define MAX_WAITSIZE 10 //20
  12. #define MAX_QUEUE 5 //20
  13. #define HIST_TABLE 5 //20
  14. #define PKT_REQ 1
  15. #define PKT_RESP 2
  16. #define PKT_DATA 3
  17. #define SEND_COUNT 3
  18. #define CLOCK_RES 100
  19. #define SAMPLE_PERIOD 500 //500 millisecond sampling interval
  20.  
  21. //Function Definitions
  22. #define F_MIN 0
  23. #define F_MAX 1
  24. #define F_AVERAGE 2
  25. #define F_SUM 3
  26. #define F_ALL 4
  27.  
  28. //Field Definitions
  29. #define D_TEMP 10
  30. #define D_LIGHT 11
  31. #define D_ACCELX 12
  32. #define D_ACCELY 13
  33. #define D_MIC 14
  34. #define D_VOLTAGE 15
  35. #define D_NODEID 16
  36. #define D_PARENTID 17
  37.  
  38. //Conditional Definitions
  39. #define C_EQ 20
  40. #define C_LT 21
  41. #define C_GT 22
  42. #define C_LTEQ 23
  43. #define C_GTEQ 24
  44. #define C_NTEQ 25
  45. #define C_FLD 0x80 //If this bit is set, the immediate value is another field Id, not an actual number
  46. //Conditional relationships
  47. #define C_START 26
  48. #define C_STOP 27
  49. #define C_AND 28
  50. #define C_OR 29
  51.  
  52. typedef struct
  53. {
  54. int16_t msgType;
  55. uint16_t sourceAddr;
  56. uint16_t requestId;
  57. uint8_t fieldLength; //number of fields
  58. uint8_t condLength; //number of conditionals
  59. uint8_t field_cond[255]; //Fields and conditional data
  60. } Request_msg;
  61.  
  62. typedef struct
  63. {
  64. int16_t msgType;
  65. uint16_t sourceAddr;
  66. uint16_t destAddr;
  67. uint16_t requestId;
  68. } Response_msg;
  69.  
  70. typedef struct
  71. {
  72. int16_t msgType;
  73. uint16_t sourceAddr;
  74. uint16_t destAddr;
  75. uint16_t requestId;
  76. bool valid; //If the node ended up not fitting the conditionals, valid is set to FALSE
  77. uint32_t nodes;
  78. uint8_t dataLength;
  79. uint8_t data[255]; //the data that was requested
  80. } DataReply_msg;
  81.  
  82. typedef struct
  83. {
  84. bool active;
  85. bool valid; //Valid if the data should be sent as valid=TRUE
  86. uint16_t parentAddr;
  87. uint16_t requestId;
  88. uint8_t fieldLength;
  89. uint8_t condLength;
  90. uint8_t field_cond[255];
  91. uint16_t partialResults[20];
  92. uint32_t partialNodes;
  93. int16_t waitingOn[MAX_WAITSIZE];
  94. uint64_t respondTime;
  95. } Request;
  96.  
  97. module ProjectP {
  98. provides interface StdControl;
  99. uses interface Timer;
  100. uses interface Timer as SampleTimer;
  101. uses interface Leds;
  102. uses interface SendMsg;
  103. uses interface ReceiveMsg;
  104. uses interface Random;
  105. uses interface ADC as AccelX;
  106. uses interface ADC as AccelY;
  107. uses interface ADC as Photo;
  108. uses interface ADC as Mic;
  109. uses interface ADC as Temp;
  110. uses interface ADC as Voltage;
  111. }
  112. implementation {
  113. TOS_Msg m_msg;
  114. uint64_t m_latestTime = 0;
  115. bool m_isSendingData;
  116. uint16_t nextSend;
  117. int sendIndex;
  118. int sendCount;
  119. Request curReqs[MAX_REQS];
  120. Request_msg sendReqs[MAX_QUEUE];
  121. Response_msg sendResp[MAX_QUEUE];
  122. DataReply_msg sendData[MAX_QUEUE];
  123. uint16_t histMic[HIST_TABLE];
  124. uint16_t histTemp[HIST_TABLE];
  125. uint16_t histLight[HIST_TABLE];
  126. uint16_t histAccelX[HIST_TABLE];
  127. uint16_t histAccelY[HIST_TABLE];
  128. uint16_t histVoltage[HIST_TABLE];
  129.  
  130. command result_t StdControl.init()
  131. {
  132. int i;
  133. call Random.init();
  134. for (i = 0; i < MAX_QUEUE; i++)
  135. {
  136. sendReqs[i].msgType = -1; //msgType of -1 means "empty"
  137. sendResp[i].msgType = -1; //Or, don't send this packet
  138. sendData[i].msgType = -1; //if not -1, then send the packet
  139. }
  140. for (i = 0; i < MAX_REQS; i++)
  141. {
  142. curReqs[i].active = FALSE;
  143. curReqs[i].requestId = 0;
  144. }
  145. atomic
  146. {
  147. for (i = 0; i < HIST_TABLE; i++)
  148. {
  149. histMic[i] = 0;
  150. histTemp[i] = 0;
  151. histLight[i] = 0;
  152. histAccelX[i] = 0;
  153. histAccelY[i] = 0;
  154. histVoltage[i] = 0;
  155. }
  156. }
  157.  
  158. m_isSendingData = FALSE;
  159. nextSend = PKT_REQ;
  160. sendCount = 0;
  161. return SUCCESS;
  162. }
  163.  
  164. command result_t StdControl.start()
  165. {
  166. call Timer.start( TIMER_REPEAT, CLOCK_RES );
  167. call SampleTimer.start( TIMER_REPEAT, SAMPLE_PERIOD );
  168. return SUCCESS;
  169. }
  170.  
  171. command result_t StdControl.stop()
  172. {
  173. call Timer.stop();
  174. call SampleTimer.stop();
  175. return SUCCESS;
  176. }
  177.  
  178. event result_t SendMsg.sendDone(TOS_MsgPtr msg, result_t result)
  179. {
  180. int i, j;
  181. sendCount = sendCount + 1;
  182. if (nextSend == PKT_REQ)
  183. {
  184. if (sendCount == SEND_COUNT)
  185. {
  186. sendReqs[sendIndex].msgType = -1;
  187. for (i = sendIndex; i < MAX_QUEUE - 1; i++)
  188. {
  189. sendReqs[i].msgType = sendReqs[i + 1].msgType;
  190. sendReqs[i].sourceAddr = sendReqs[i + 1].sourceAddr;
  191. sendReqs[i].requestId = sendReqs[i + 1].requestId;
  192. sendReqs[i].fieldLength = sendReqs[i + 1].fieldLength;
  193. sendReqs[i].condLength = sendReqs[i + 1].condLength;
  194. for (j = 0; j < sendReqs[i].fieldLength + sendReqs[i].condLength; j++)
  195. {
  196. sendReqs[i].field_cond[j] = sendReqs[i + 1].field_cond[j];
  197. }
  198. }
  199. sendReqs[MAX_QUEUE - 1].msgType = -1;
  200. nextSend = PKT_RESP;
  201. sendCount = 0;
  202. }
  203. }
  204. else if (nextSend == PKT_RESP)
  205. {
  206. if (sendCount == SEND_COUNT)
  207. {
  208. sendResp[sendIndex].msgType = -1;
  209. for (i = sendIndex; i < MAX_QUEUE - 1; i++)
  210. {
  211. sendResp[i].msgType = sendResp[i + 1].msgType;
  212. sendResp[i].sourceAddr = sendResp[i + 1].sourceAddr;
  213. sendResp[i].destAddr = sendResp[i + 1].destAddr;
  214. sendResp[i].requestId = sendResp[i + 1].requestId;
  215. }
  216. sendResp[MAX_QUEUE - 1].msgType = -1;
  217. nextSend = PKT_DATA;
  218. sendCount = 0;
  219. }
  220. }
  221. else
  222. {
  223. if (sendCount == SEND_COUNT)
  224. {
  225. sendData[sendIndex].msgType = -1;
  226. for (i = sendIndex; i < MAX_QUEUE - 1; i++)
  227. {
  228. sendData[i].msgType = sendData[i + 1].msgType;
  229. sendData[i].sourceAddr = sendData[i + 1].sourceAddr;
  230. sendData[i].destAddr = sendData[i + 1].destAddr;
  231. sendData[i].requestId = sendData[i + 1].requestId;
  232. sendData[i].valid = sendData[i + 1].valid;
  233. sendData[i].nodes = sendData[i + 1].nodes;
  234. sendData[i].dataLength = sendData[i + 1].dataLength;
  235. for (j = 0; j < sendData[i].dataLength;j++)
  236. {
  237. sendData[i].data[j] = sendData[i + 1].data[j];
  238. }
  239. }
  240. sendData[MAX_QUEUE - 1].msgType = -1;
  241. nextSend = PKT_REQ;
  242. sendCount = 0;
  243. }
  244. }
  245. m_isSendingData = FALSE;
  246. return SUCCESS;
  247. }
  248.  
  249. int findReqById(uint16_t requestId)
  250. {
  251. int i;
  252. i = -1;
  253. //Find out which request this Id is for
  254. for (i = 0; i < MAX_REQS; i++)
  255. {
  256. if (curReqs[i].requestId == requestId && curReqs[i].active)
  257. {
  258. return i;
  259. }
  260. }
  261. return i;
  262. }
  263.  
  264. bool isNodeListed(uint16_t nodeId, uint32_t nodeList)
  265. {
  266. return (((1 << nodeId) && nodeList) > 0);
  267. }
  268.  
  269. int getFreeReqIndex()
  270. {
  271. int i;
  272. for (i = 0; i < MAX_QUEUE; i++)
  273. {
  274. if (sendReqs[i].msgType == -1)
  275. {
  276. return i;
  277. }
  278. }
  279. return -1;
  280. }
  281.  
  282. int getFreeRespIndex()
  283. {
  284. int i;
  285. for (i = 0; i < MAX_QUEUE; i++)
  286. {
  287. if (sendResp[i].msgType == -1)
  288. {
  289. return i;
  290. }
  291. }
  292. return -1;
  293. }
  294.  
  295. int getFreeDataIndex()
  296. {
  297. int i;
  298. for (i = 0; i < MAX_QUEUE; i++)
  299. {
  300. if (sendData[i].msgType == -1)
  301. {
  302. return i;
  303. }
  304. }
  305. return -1;
  306. }
  307.  
  308. int countNodes(int n)
  309. {
  310. int i;
  311. int toRet;
  312. toRet = 0;
  313. for (i = 0; i < 32; i++)
  314. {
  315. if ((n & (1 << i)) > 0)
  316. {
  317. toRet = toRet + 1;
  318. }
  319. }
  320. return toRet;
  321. }
  322.  
  323. void processData(Request* req, DataReply_msg* data)
  324. {
  325. int i, j;
  326. int numFields;
  327. int newNodeCount;
  328. int oldNodeCount;
  329. int temp;
  330.  
  331. if ((data->nodes & req->partialNodes) > 0)
  332. {
  333. //Somehow, there is a duplicate node, so abort
  334. return;
  335. }
  336.  
  337. numFields = req->fieldLength / 2; //1-byte field, 1-byte function
  338. newNodeCount = countNodes(data->nodes);
  339. oldNodeCount = countNodes(req->partialNodes);
  340.  
  341. for (i = 0; i < numFields; i++)
  342. {
  343. if (req->field_cond[(i * 2) + 1] == F_ALL)
  344. {
  345. //All data reqested for one field means all data MUST be request for all fields
  346. //So, we need to just forward this data on to our parent
  347. temp = getFreeDataIndex();
  348. if (temp == -1)
  349. {
  350. //No queue space for this packet, gotta bail...
  351. return;
  352. }
  353. sendData[temp].msgType = PKT_DATA;
  354. sendData[temp].sourceAddr = TOS_LOCAL_ADDRESS;
  355. sendData[temp].destAddr = req->parentAddr;
  356. sendData[temp].requestId = data->requestId;
  357. sendData[temp].valid = TRUE;
  358. sendData[temp].nodes = data->nodes;
  359. sendData[temp].dataLength = data->dataLength;
  360. for (j = 0; j < data->dataLength; j++)
  361. {
  362. sendData[temp].data[j] = data->data[j];
  363. }
  364. return; //All data is forwarded, no processing to do
  365. }
  366. else if (req->field_cond[(i * 2) + 1] == F_MIN)
  367. {
  368. //Find MINIMUM of request field
  369. //if (oldNodeCount == 0 || *(uint16_t*)&data->data[(i * 2)] < req->partialResults[i])
  370. if (oldNodeCount == 0 || (data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8)) < req->partialResults[i])
  371. {
  372. //req->partialResults[i] = *(uint16_t*)&data->data[(i * 2)];
  373. req->partialResults[i] = (data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8));
  374. }
  375. }
  376. else if (req->field_cond[(i * 2) + 1] == F_MAX)
  377. {
  378. //Find MAXIMUM of request field
  379. //if (oldNodeCount == 0 || *(uint16_t*)&data->data[(i * 2)] > req->partialResults[i])
  380. if (oldNodeCount == 0 || (data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8)) > req->partialResults[i])
  381. {
  382. //req->partialResults[i] = *(uint16_t*)&data->data[(i * 2)];
  383. req->partialResults[i] = (data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8));
  384. }
  385. }
  386. else if (req->field_cond[(i * 2) + 1] == F_AVERAGE)
  387. {
  388. //Find AVERAGE of request fields
  389. //req->partialResults[i] = (uint16_t)((uint32_t)((uint32_t)((uint32_t)req->partialResults[i] * oldNodeCount) + (uint32_t)((uint32_t)(*(uint16_t*)&data->data[(i * 2)]) * newNodeCount)) / (oldNodeCount + newNodeCount));
  390. req->partialResults[i] = (uint16_t)((uint32_t)((uint32_t)((uint32_t)req->partialResults[i] * oldNodeCount) + (uint32_t)((uint32_t)(data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8)) * newNodeCount)) / (oldNodeCount + newNodeCount));
  391. }
  392. else if (req->field_cond[(i * 2) + 1] == F_SUM)
  393. {
  394. //Find SUM of request fields
  395. //req->partialResults[i] = req->partialResults[i] + *(uint16_t*)&data->data[(i * 2)];
  396. req->partialResults[i] = req->partialResults[i] + (data->data[(i * 2)] | (data->data[(i * 2) + 1] << 8));
  397. }
  398. }
  399. //Update which nodes we have results for
  400. req->partialNodes = req->partialNodes | data->nodes;
  401. }
  402.  
  403. uint16_t getSensorValue(uint8_t sensor, int hist, Request* req)
  404. {
  405. atomic
  406. {
  407. if (sensor == D_TEMP)
  408. {
  409. return histTemp[hist];
  410. }
  411. else if (sensor == D_LIGHT)
  412. {
  413. return histLight[hist];
  414. }
  415. else if (sensor == D_ACCELX)
  416. {
  417. return histAccelX[hist];
  418. }
  419. else if (sensor == D_ACCELY)
  420. {
  421. return histAccelY[hist];
  422. }
  423. else if (sensor == D_MIC)
  424. {
  425. return histMic[hist];
  426. }
  427. else if (sensor == D_VOLTAGE)
  428. {
  429. return histVoltage[hist];
  430. }
  431. else if (sensor == D_NODEID)
  432. {
  433. return TOS_LOCAL_ADDRESS;
  434. }
  435. else if (sensor == D_PARENTID)
  436. {
  437. return req->parentAddr;
  438. }
  439. }
  440. return 0;
  441. }
  442.  
  443. //checkConditions evalutes an conditional expression between a START and STOP tag
  444. //It calls itself recursively to evaluate sub expression START/STOP tags
  445. //Ex. A * (B + C) will be encoded as "START A AND START B OR START C STOP STOP STOP"
  446. //All conditionals are broken into individual groups as well
  447. //Ex. A * B * C will be encodes as "START A AND START B AND C STOP STOP STOP"
  448. //Ex. ((A + (B)) * (C + (D)) encodes as "START START A OR START B STOP STOP AND START C OR START D STOP STOP STOP"
  449. bool checkConditions(uint8_t* cond, Request* req)
  450. {
  451. int i;
  452. int nextIndex;
  453. int count;
  454. bool toRet;
  455. bool temp;
  456. uint16_t val1;
  457. uint16_t val2;
  458. toRet = TRUE; //Assume the conditions are met until we find out otherwise
  459.  
  460. if (cond[0] != C_START)
  461. {
  462. //Something is wrong with the conditional expression
  463. return toRet;
  464. }
  465.  
  466. //Terminate condition: we have reached the STOP tag
  467. if (cond[1] == C_STOP)
  468. {
  469. return toRet;
  470. }
  471.  
  472. //See if this is a comparison or another nested group
  473. if (cond[1] == C_START)
  474. {
  475. temp = checkConditions(&cond[1], req);
  476. count = 0;
  477. i = 2;
  478. while(cond[i] != C_STOP || count > 0)
  479. {
  480. if (cond[i] == C_START)
  481. {
  482. count = count + 1;
  483. i = i + 1;
  484. }
  485. else if (cond[i] == C_STOP)
  486. {
  487. count = count - 1;
  488. i = i + 1;
  489. }
  490. else if (cond[i] == C_AND || cond[i] == C_OR)
  491. {
  492. i = i + 1;
  493. }
  494. else
  495. {
  496. //The current group is a condition, skip over it
  497. i = i + 4;
  498. }
  499. if (i == 255)
  500. {
  501. //Prevent bad data causing an infinite loop
  502. break;
  503. }
  504. }
  505. while (cond[i] != C_START)
  506. {
  507. i = i + 1;
  508. if (i == 255)
  509. {
  510. //Prevent an infinite loop...
  511. break;
  512. }
  513. }
  514. nextIndex = i;
  515. }
  516. else
  517. {
  518. //Location of the next condition
  519. nextIndex = 6;
  520.  
  521. //Find first value for comparison
  522. val1 = getSensorValue(cond[1], 0, req);
  523.  
  524. //Find Second Value For Comparison
  525. if ((cond[2] & C_FLD) > 0)
  526. {
  527. //This is a field vs. field condition
  528. val2 = getSensorValue(cond[3], 0, req);
  529. call Leds.set(0x00);
  530. }
  531. else
  532. {
  533. //This is a field vs. value condition
  534. //val2 = *(uint16_t*)&cond[3];
  535. val2 = (cond[3] | (cond[4] << 8));
  536. call Leds.set((val2 >> ((TOS_LOCAL_ADDRESS - 1) * 3)) & 0x07);
  537. }
  538.  
  539. //Perform comparison
  540. if ((cond[2] & ~C_FLD) == C_EQ)
  541. {
  542. temp = (val1 == val2);
  543. }
  544. else if ((cond[2] & ~C_FLD) == C_LT)
  545. {
  546. temp = (val1 < val2);
  547. }
  548. else if ((cond[2] & ~C_FLD) == C_GT)
  549. {
  550. temp = (val1 > val2);
  551. }
  552. else if ((cond[2] & ~C_FLD) == C_LTEQ)
  553. {
  554. temp = (val1 <= val2);
  555. }
  556. else if ((cond[2] & ~C_FLD) == C_GTEQ)
  557. {
  558. temp = (val1 >= val2);
  559. }
  560. else if((cond[2] & ~C_FLD) == C_NTEQ)
  561. {
  562. temp = (val1 != val2);
  563. }
  564. else
  565. {
  566. temp = TRUE; //Unrecognized comparison
  567. }
  568. }
  569.  
  570. //See if this is the end, or if there are more conditions
  571. if (cond[nextIndex - 1] == C_STOP)
  572. {
  573. toRet = temp;
  574. }
  575. else if (cond[nextIndex - 1] == C_AND)
  576. {
  577. toRet = temp && checkConditions(&cond[nextIndex], req);
  578. }
  579. else if (cond[nextIndex - 1] == C_OR)
  580. {
  581. toRet = temp || checkConditions(&cond[nextIndex], req);
  582. }
  583.  
  584. //All conditions are checked, return the results
  585. return toRet;
  586. }
  587.  
  588. void doDataReply(Request* req)
  589. {
  590. int i;
  591. int numFields;
  592. bool condsMet;
  593. int nodeCount;
  594. int temp;
  595. uint16_t sensorData;
  596.  
  597. numFields = req->fieldLength / 2; //1-byte field, 1-byte function
  598. nodeCount = countNodes(req->partialNodes);
  599.  
  600. //Check the conditions for this request
  601. condsMet = checkConditions(&req->field_cond[req->fieldLength], req);
  602. if (condsMet)
  603. {
  604. for (i = 0; i < numFields; i++)
  605. {
  606. sensorData = getSensorValue(req->field_cond[(i * 2)], 0, req);
  607. if (req->field_cond[(i * 2) + 1] == F_ALL)
  608. {
  609. //All data requested
  610. req->partialResults[i] = sensorData;
  611. }
  612. else if (req->field_cond[(i * 2) + 1] == F_MIN)
  613. {
  614. if (nodeCount == 0 || sensorData < req->partialResults[i])
  615. {
  616. req->partialResults[i] = sensorData;
  617. }
  618. }
  619. else if (req->field_cond[(i * 2) + 1] == F_MAX)
  620. {
  621. if (nodeCount == 0 || sensorData > req->partialResults[i])
  622. {
  623. req->partialResults[i] = sensorData;
  624. }
  625. }
  626. else if (req->field_cond[(i * 2) + 1] == F_AVERAGE)
  627. {
  628. req->partialResults[i] = (uint16_t)((uint32_t)((uint32_t)(nodeCount * (uint32_t)req->partialResults[i]) + (uint32_t)sensorData) / (nodeCount + 1));
  629. }
  630. else if (req->field_cond[(i * 2) + 1] == F_SUM)
  631. {
  632. req->partialResults[i] = req->partialResults[i] + sensorData;
  633. }
  634. }
  635. }
  636. else
  637. {
  638. if (req->field_cond[1] == F_ALL)
  639. {
  640. req->valid = FALSE; //Sent a data-less response to say no data is coming
  641. }
  642. }
  643. req->partialNodes = req->partialNodes | (1 << TOS_LOCAL_ADDRESS);
  644.  
  645. //Send out the data packet!
  646. temp = getFreeDataIndex();
  647. sendData[temp].msgType = PKT_DATA;
  648. sendData[temp].sourceAddr = TOS_LOCAL_ADDRESS;
  649. sendData[temp].destAddr = req->parentAddr;
  650. sendData[temp].requestId = req->requestId;
  651. sendData[temp].valid = req->valid;
  652. sendData[temp].nodes = req->partialNodes;
  653. sendData[temp].dataLength = numFields * 2;
  654. for (i = 0; i < numFields; i++)
  655. {
  656. sendData[temp].data[(i * 2)] = req->partialResults[i] & 0xFF;
  657. sendData[temp].data[(i * 2) + 1] = (req->partialResults[i] >> 8) & 0xFF;
  658. }
  659. }
  660.  
  661. event result_t Timer.fired()
  662. {
  663. int i, j;
  664. bool sent;
  665. m_latestTime = m_latestTime + CLOCK_RES;
  666.  
  667. //See if there are any timers that have expired
  668. for (i = 0; i < MAX_REQS; i++)
  669. {
  670. if (curReqs[i].active)
  671. {
  672. if (m_latestTime >= curReqs[i].respondTime)
  673. {
  674. //This request has had a timeout expire, send what data we have
  675. doDataReply(&curReqs[i]);
  676. curReqs[i].active = FALSE; //Since we replied, this request is no longer active
  677. }
  678. }
  679. }
  680.  
  681. //See if we can send out some data from our send queues...
  682. if (!m_isSendingData)
  683. {
  684. sent = FALSE;
  685. if (nextSend == PKT_REQ)
  686. {
  687. for (i = 0; i < MAX_QUEUE; i++)
  688. if (sendReqs[i].msgType != -1)
  689. {
  690. Request_msg* body = (Request_msg*)m_msg.data;
  691. body->msgType = sendReqs[i].msgType;
  692. body->sourceAddr = sendReqs[i].sourceAddr;
  693. body->requestId = sendReqs[i].requestId;
  694. body->fieldLength = sendReqs[i].fieldLength;
  695. body->condLength = sendReqs[i].condLength;
  696. for (j = 0; j < sendReqs[i].fieldLength + sendReqs[i].condLength; j++)
  697. {
  698. body->field_cond[j] = sendReqs[i].field_cond[j];
  699. }
  700. if ( call SendMsg.send(TOS_BCAST_ADDR,sizeof(Request_msg) - (255 - (sendReqs[i].fieldLength + sendReqs[i].condLength)),&m_msg) == SUCCESS )
  701. {
  702. m_isSendingData = TRUE;
  703. sendIndex = i;
  704. sent = TRUE;
  705. dbg(DBG_USR1, "Node %i broadcasting request %i\n", (int)TOS_LOCAL_ADDRESS, (int)sendReqs[i].requestId);
  706. }
  707. break;
  708. }
  709. if (!sent)
  710. {
  711. nextSend = PKT_RESP;
  712. }
  713. }
  714. else if (nextSend == PKT_RESP)
  715. {
  716. for (i = 0; i < MAX_QUEUE; i++)
  717. if (sendResp[i].msgType != -1)
  718. {
  719. Response_msg* body = (Response_msg*)m_msg.data;
  720. body->msgType = sendResp[i].msgType;
  721. body->sourceAddr = sendResp[i].sourceAddr;
  722. body->destAddr = sendResp[i].destAddr;
  723. body->requestId = sendResp[i].requestId;
  724. if (call SendMsg.send(TOS_BCAST_ADDR,sizeof(Response_msg),&m_msg) == SUCCESS)
  725. {
  726. m_isSendingData = TRUE;
  727. sendIndex = i;
  728. sent = TRUE;
  729. dbg(DBG_USR1, "Node %i responding to Node %i request %i\n", (int)TOS_LOCAL_ADDRESS, (int)sendResp[i].destAddr, (int)sendResp[i].requestId);
  730. }
  731. break;
  732. }
  733. if (!sent)
  734. {
  735. nextSend = PKT_DATA;
  736. }
  737. }
  738. else if (nextSend == PKT_DATA)
  739. {
  740. for (i = 0; i < MAX_QUEUE; i++)
  741. if (sendData[i].msgType != -1)
  742. {
  743. DataReply_msg* body = (DataReply_msg*)m_msg.data;
  744. body->msgType = sendData[i].msgType;
  745. body->sourceAddr = sendData[i].sourceAddr;
  746. body->destAddr = sendData[i].destAddr;
  747. body->requestId = sendData[i].requestId;
  748. body->valid = sendData[i].valid;
  749. body->nodes = sendData[i].nodes;
  750. body->dataLength = sendData[i].dataLength;
  751. dbg(DBG_USR1, "Node %i data for req %i:\n", (int)TOS_LOCAL_ADDRESS, (int)sendData[i].requestId);
  752. for (j = 0; j < sendData[i].dataLength / 2; j++)
  753. {
  754. body->data[(j * 2)] = sendData[i].data[(j * 2)];
  755. body->data[(j * 2) + 1] = sendData[i].data[(j * 2) + 1];
  756. //dbg(DBG_USR1, " Field%i: %i\n", j, *(uint16_t*)&(sendData[i].data[j * 2]));
  757. dbg(DBG_USR1, " Field%i: %i\n", j, (sendData[i].data[(j * 2)] | (sendData[i].data[(j * 2) + 1] << 8)));
  758. }
  759. if (call SendMsg.send(TOS_BCAST_ADDR,sizeof(DataReply_msg) - (255 - sendData[i].dataLength),&m_msg) == SUCCESS)
  760. {
  761. m_isSendingData = TRUE;
  762. sendIndex = i;
  763. sent = TRUE;
  764. dbg(DBG_USR1, "Node %i sending data to Node %i request %i\n", (int)TOS_LOCAL_ADDRESS, (int)sendData[i].destAddr, (int)sendData[i].requestId);
  765. }
  766. break;
  767. }
  768. if (!sent)
  769. {
  770. nextSend = PKT_REQ;
  771. }
  772. }
  773. }
  774. return SUCCESS;
  775. }
  776.  
  777. event TOS_MsgPtr ReceiveMsg.receive( TOS_MsgPtr msg )
  778. {
  779. int i, j;
  780. int temp;
  781. bool found;
  782. Response_msg* body = (Response_msg*)msg->data;
  783. Request_msg* req = (Request_msg*)msg->data;
  784. Response_msg* resp = (Response_msg*)msg->data;
  785. DataReply_msg* data = (DataReply_msg*)msg->data;
  786.  
  787. if (body->msgType == PKT_REQ)
  788. {
  789. //a request
  790. found = FALSE;
  791. for (i = 0; i < MAX_REQS; i++)
  792. {
  793. if (curReqs[i].requestId == req->requestId)
  794. {
  795. found = TRUE;
  796. }
  797. }
  798. if (found)
  799. {
  800. //We already did this request
  801. return msg;
  802. }
  803. //If we have space, add this request to our queue
  804. for (i = 0; i < MAX_REQS; i++)
  805. {
  806. if (!curReqs[i].active)
  807. {
  808. curReqs[i].active = TRUE;
  809. curReqs[i].valid = TRUE;
  810. curReqs[i].parentAddr = req->sourceAddr;
  811. curReqs[i].requestId = req->requestId;
  812. curReqs[i].fieldLength = req->fieldLength;
  813. curReqs[i].condLength = req->condLength;
  814. for (temp = 0; temp < req->fieldLength + req->condLength; temp++)
  815. {
  816. curReqs[i].field_cond[temp] = req->field_cond[temp];
  817. }
  818. for (temp = 0; temp < 20; temp++)
  819. {
  820. curReqs[i].partialResults[temp] = 0;
  821. }
  822. curReqs[i].partialNodes = 0;
  823. curReqs[i].respondTime = m_latestTime + 3000; //wait 3 seconds before assuming there are no downstream nodes
  824. for (j = 0; j < MAX_WAITSIZE; j++)
  825. {
  826. curReqs[i].waitingOn[j] = -1;
  827. }
  828. //Reply to parent
  829. temp = getFreeRespIndex();
  830. if (temp == -1)
  831. {
  832. //Response Transmit buffer is full
  833. return msg;
  834. }
  835. sendResp[temp].msgType = PKT_RESP;
  836. sendResp[temp].requestId = req->requestId;
  837. sendResp[temp].sourceAddr = TOS_LOCAL_ADDRESS;
  838. sendResp[temp].destAddr = req->sourceAddr;
  839. //Forward this request on to other nodes
  840. temp = getFreeReqIndex();
  841. if (temp == -1)
  842. {
  843. //Request Transmit buffer is full
  844. return msg;
  845. }
  846. sendReqs[temp].msgType = PKT_REQ;
  847. sendReqs[temp].sourceAddr = TOS_LOCAL_ADDRESS;
  848. sendReqs[temp].requestId = req->requestId;
  849. sendReqs[temp].fieldLength = req->fieldLength;
  850. sendReqs[temp].condLength = req->condLength;
  851. for (i = 0; i < req->fieldLength + req->condLength; i++)
  852. {
  853. sendReqs[temp].field_cond[i] = req->field_cond[i];
  854. }
  855. dbg(DBG_USR1, "Node %i accepts request %i from Node %i\n", (int)TOS_LOCAL_ADDRESS, (int)sendReqs[temp].requestId, (int)req->sourceAddr);
  856. return msg;
  857. }
  858. }
  859. }
  860. else if (body->msgType == PKT_RESP && resp->destAddr == TOS_LOCAL_ADDRESS)
  861. {
  862. //a response
  863. //First, find out which request this response is for
  864. temp = findReqById(resp->requestId);
  865.  
  866. //If this is a response to an active request, put this replying node in the wait list
  867. if (temp == -1)
  868. {
  869. return msg;
  870. }
  871. found = FALSE;
  872. //Make sure we aren't already waiting on this node
  873. for (i = 0; i < MAX_WAITSIZE; i++)
  874. {
  875. if (curReqs[temp].waitingOn[i] == resp->sourceAddr)
  876. found = TRUE;
  877. }
  878. //If not, put this node on the waiting list
  879. if (!found)
  880. {
  881. for (i = 0; i < MAX_WAITSIZE; i++)
  882. {
  883. if (curReqs[temp].waitingOn[i] == -1)
  884. {
  885. curReqs[temp].waitingOn[i] = resp->sourceAddr;
  886. curReqs[temp].respondTime = (uint64_t)m_latestTime + (uint64_t)60000; //wait 60 seconds for data before timing out
  887. dbg(DBG_USR1, "Node %i waiting for Node %i request %i\n", (int)TOS_LOCAL_ADDRESS, (int)resp->sourceAddr, (int)resp->requestId);
  888. return msg;
  889. }
  890. }
  891. }
  892. }
  893. else if (body->msgType == PKT_DATA && data->destAddr == TOS_LOCAL_ADDRESS)
  894. {
  895. //some data
  896. //First, find out which request this data is for
  897. temp = findReqById(data->requestId);
  898.  
  899. //Check if we were expecting this data
  900. if (temp == -1)
  901. {
  902. return msg;
  903. }
  904. found = FALSE;
  905. for (i = 0; i < MAX_WAITSIZE; i++)
  906. {
  907. if (curReqs[temp].waitingOn[i] == data->sourceAddr)
  908. {
  909. //If this is data for the child node, then this is the last data coming
  910. //from this child node for this request, if not there will be more
  911. if (isNodeListed(data->sourceAddr, data->nodes))
  912. curReqs[temp].waitingOn[i] = -1;
  913. found = TRUE;
  914. break;
  915. }
  916. }
  917. if (!found)
  918. {
  919. //We weren't expecting this data
  920. return msg;
  921. }
  922.  
  923. dbg(DBG_USR1, "Node %i got data for Node %i request %i\n", (int)TOS_LOCAL_ADDRESS, (int)data->sourceAddr, (int)data->requestId);
  924.  
  925. //Proccess this data (if we should)
  926. if (data->valid)
  927. {
  928. processData(&curReqs[temp], data);
  929. }
  930. found = FALSE; //Here, found is if we find a node we're still waiting on
  931. for (i = 0; i < MAX_WAITSIZE; i++)
  932. {
  933. if (curReqs[temp].waitingOn[i] != -1)
  934. {
  935. found = TRUE;
  936. }
  937. }
  938. if (!found)
  939. {
  940. //We're not waiting on any more data
  941. //Add our data to the current request and send our Data Reply
  942. doDataReply(&curReqs[temp]);
  943. curReqs[temp].active = FALSE; //Since we replied, this request is no longer active
  944. }
  945. }
  946. return msg;
  947. }
  948.  
  949. event result_t SampleTimer.fired()
  950. {
  951. call AccelX.getData();
  952. return SUCCESS;
  953. }
  954.  
  955. async event result_t AccelX.dataReady(uint16_t data)
  956. {
  957. int i;
  958. atomic
  959. {
  960. for (i = HIST_TABLE; i > 0; i--)
  961. {
  962. histAccelX[i] = histAccelX[i - 1];
  963. }
  964. histAccelX[0] = data;
  965. }
  966. call AccelY.getData();
  967. return SUCCESS;
  968. }
  969.  
  970. async event result_t AccelY.dataReady(uint16_t data)
  971. {
  972. int i;
  973. atomic
  974. {
  975. for (i = HIST_TABLE; i > 0; i--)
  976. {
  977. histAccelY[i] = histAccelY[i - 1];
  978. }
  979. histAccelY[0] = data;
  980. }
  981. call Photo.getData();
  982. return SUCCESS;
  983. }
  984.  
  985. async event result_t Photo.dataReady(uint16_t data)
  986. {
  987. int i;
  988. atomic
  989. {
  990. for (i = HIST_TABLE; i > 0; i--)
  991. {
  992. histLight[i] = histLight[i - 1];
  993. }
  994. histLight[0] = data;
  995. }
  996. call Mic.getData();
  997. return SUCCESS;
  998. }
  999.  
  1000. async event result_t Mic.dataReady(uint16_t data)
  1001. {
  1002. int i;
  1003. atomic
  1004. {
  1005. for (i = HIST_TABLE; i > 0; i--)
  1006. {
  1007. histMic[i] = histMic[i - 1];
  1008. }
  1009. histMic[0] = data;
  1010. }
  1011. call Temp.getData();
  1012. return SUCCESS;
  1013. }
  1014.  
  1015. async event result_t Temp.dataReady(uint16_t data)
  1016. {
  1017. int i;
  1018. atomic
  1019. {
  1020. for (i = HIST_TABLE; i > 0; i--)
  1021. {
  1022. histTemp[i] = histTemp[i - 1];
  1023. }
  1024. histTemp[0] = data;
  1025. }
  1026. call Voltage.getData();
  1027. return SUCCESS;
  1028. }
  1029.  
  1030. async event result_t Voltage.dataReady(uint16_t data)
  1031. {
  1032. int i;
  1033. atomic
  1034. {
  1035. for (i = HIST_TABLE; i > 0; i--)
  1036. {
  1037. histVoltage[i] = histVoltage[i - 1];
  1038. }
  1039. histVoltage[0] = data;
  1040. }
  1041. return SUCCESS;
  1042. }
  1043. }
  1044.  
  1045.  
Parsed in 0.142 seconds, using GeSHi 1.0.7.20