{"text": "//\n// Copyright Jason Rice 2017\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n//\n#ifndef NBDL_WEBSOCKET_DETAIL_SEND_HANDSHAKE_RESPONSE_HPP\n#define NBDL_WEBSOCKET_DETAIL_SEND_HANDSHAKE_RESPONSE_HPP\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace nbdl::websocket::detail\n{\n namespace asio = boost::asio;\n using asio::ip::tcp;\n using namespace std::string_view_literals;\n using std::string_view;\n\n inline std::string generate_accept_token(std::string const& key)\n {\n namespace sha1 = nbdl::detail::beast_sha1;\n using nbdl::util::base64_encode;\n\n constexpr string_view guid = \"258EAFA5-E914-47DA-95CA-C5AB0DC85B11\"sv;\n std::array digest{};\n sha1::sha1_context ctx{};\n\n sha1::init(ctx);\n sha1::update(ctx, key.data(), key.length());\n sha1::update(ctx, guid.data(), guid.size());\n sha1::finish(ctx, digest.data());\n\n return base64_encode(digest);\n }\n\n struct send_handshake_response_fn\n {\n template \n auto operator()(Resolver& resolver, tcp::socket& socket, handshake_info_t const& handshake_info)\n {\n auto const& [websocket_key, cookies] = handshake_info;\n accept_token = generate_accept_token(websocket_key);\n\n const Buffers buffers{{\n asio::buffer(response_start)\n , asio::buffer(accept_token)\n , asio::buffer(response_end)\n }};\n\n asio::async_write(socket, buffers, [&, cookies = cookies](std::error_code error_code, std::size_t)\n {\n if (error_code)\n {\n resolver.reject(error_code);\n }\n else\n {\n resolver.resolve(socket, detail::get_auth_token(cookies));\n }\n });\n }\n\n private:\n\n using Buffers = std::array;\n\n static constexpr string_view response_start = string_view(\n \"HTTP/1.1 101 Switching Protocols\"\n \"\\r\\n\"\n \"Upgrade: websocket\"\n \"\\r\\n\"\n \"Connection: Upgrade\"\n \"\\r\\n\"\n \"Sec-WebSocket-Accept: \"\n );\n static constexpr string_view response_end = string_view(\"\\r\\n\\r\\n\");\n\n std::string accept_token{};\n };\n\n constexpr auto send_handshake_response = [] { return nbdl::promise(send_handshake_response_fn{}); };\n}\n\n#endif\n", "meta": {"hexsha": "098626231263d533bdbb27b7e348473b47714bea", "size": 2553, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/nbdl/websocket/detail/send_handshake_response.hpp", "max_stars_repo_name": "ricejasonf/nbdl", "max_stars_repo_head_hexsha": "ae63717c96ab2c36107bc17b2b00115f96e9d649", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 47.0, "max_stars_repo_stars_event_min_datetime": "2016-06-20T01:41:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T10:53:27.000Z", "max_issues_repo_path": "include/nbdl/websocket/detail/send_handshake_response.hpp", "max_issues_repo_name": "ricejasonf/nbdl", "max_issues_repo_head_hexsha": "ae63717c96ab2c36107bc17b2b00115f96e9d649", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 21.0, "max_issues_repo_issues_event_min_datetime": "2015-11-12T23:05:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-17T19:01:40.000Z", "max_forks_repo_path": "include/nbdl/websocket/detail/send_handshake_response.hpp", "max_forks_repo_name": "ricejasonf/nbdl", "max_forks_repo_head_hexsha": "ae63717c96ab2c36107bc17b2b00115f96e9d649", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2015-11-12T21:23:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-09T17:54:25.000Z", "avg_line_length": 27.4516129032, "max_line_length": 104, "alphanum_fraction": 0.6811594203, "num_tokens": 651, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6039318337259583, "lm_q2_score": 0.3311197462295937, "lm_q1q2_score": 0.1999737555233125}} {"text": "//\n// statistics.hpp\n// ndnrtc\n//\n// Copyright 2013 Regents of the University of California\n// For licensing details see the LICENSE file.\n//\n// Author: Peter Gusev \n// Created: 8/21/13\n//\n\n#ifndef ndnrtc_statistics_h\n#define ndnrtc_statistics_h\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace ndnrtc {\n namespace statistics {\n enum class Indicator {\n // general\n Timestamp, // NDN-RTC timestamp when statistics were captured\n \n // consumer\n // buffer\n AcquiredNum, // PlaybackQueue\n AcquiredKeyNum, // PlaybackQueue\n DroppedNum, // Buffer\n DroppedKeyNum, // Buffer\n AssembledNum, // Buffer\n AssembledKeyNum, // Buffer\n RecoveredNum, // VideoPlayout\n RecoveredKeyNum, // VideoPlayout\n RescuedNum, // VideoPlayout\n RescuedKeyNum, // VideoPlayout\n IncompleteNum, // Buffer\n IncompleteKeyNum, // Buffer\n BufferTargetSize, // RemoteStreamImpl\n BufferPlayableSize, // PlaybackQueue\n BufferReservedSize, // PlaybackQueue\n CurrentProducerFramerate, // BufferControl\n VerifySuccess, // SampleValidator\n VerifyFailure, // SampleValidator\n LatencyControlStable, // LatencyControl\n LatencyControlCommand, // LatencyControl\n FrameFetchAvgDelta, // Buffer\n FrameFetchAvgKey, // Buffer\n \n // playout\n LastPlayedNo, // VideoPlayout\n LastPlayedDeltaNo, // VideoPlayout\n LastPlayedKeyNo, // VideoPlayout\n PlayedNum, // VideoPlayout\n PlayedKeyNum, // VideoPlayout\n SkippedNum, // VideoPlayout\n LatencyEstimated,\n \n // pipeliner\n SegmentsDeltaAvgNum, // SampleEstimator\n SegmentsKeyAvgNum, // SampleEstimator\n SegmentsDeltaParityAvgNum, // SampleEstimator\n SegmentsKeyParityAvgNum, // SampleEstimator\n RtxNum,\n RebufferingsNum, // PipelineControlStateMachine\n RequestedNum, // Pipeliner\n RequestedKeyNum, // Pipeliner\n DW, // InterestControl\n W, // InterestControl\n SegmentsReceivedNum, // SegmentController\n TimeoutsNum, // SegmentController\n NacksNum, // SegmentController\n AppNackNum, // SegmentController\n Darr, // LatencyControl\n BytesReceived, // SegmentController\n RawBytesReceived, // SegmentController\n State, // PipelineControlStateMachine\n DoubleRtFrames, // Pipeliner\n DoubleRtFramesKey, // Pipeliner\n \n // DRD estimator\n DrdOriginalEstimation, // BufferControl\n DrdCachedEstimation, // BufferControl\n \n // interest queue\n QueueSize, // InterestQueue\n InterestsSentNum, // InterestQueue\n \n // producer\n //media thread\n BytesPublished,\n RawBytesPublished,\n PublishedSegmentsNum,\n ProcessedNum,\n PublishedNum,\n PublishedKeyNum,\n InterestsReceivedNum,\n SignNum,\n \n // encoder\n // DroppedNum, // borrowed from buffer (above)\n EncodedNum,\n \n // capturer\n CapturedNum\n };\n \n class StatisticsStorage {\n public:\n typedef std::map StatRepo;\n static const std::map IndicatorNames;\n static const std::map IndicatorKeywords;\n \n static StatisticsStorage*\n createConsumerStatistics()\n { return new StatisticsStorage(StatisticsStorage::ConsumerStatRepo); }\n \n static StatisticsStorage*\n createProducerStatistics()\n { return new StatisticsStorage(StatisticsStorage::ProducerStatRepo); }\n \n StatisticsStorage(const StatisticsStorage& statisticsStorage):\n inidicatorNames_(StatisticsStorage::IndicatorNames),\n indicators_(statisticsStorage.getIndicators()){}\n ~StatisticsStorage(){}\n \n // may throw an exception if indicator is not present in the repo\n void\n updateIndicator(const statistics::Indicator& indicator,\n const double& value) throw(std::out_of_range);\n \n StatRepo\n getIndicators() const;\n \n StatisticsStorage&\n operator=(const StatisticsStorage& other)\n {\n indicators_ = other.getIndicators();\n return *this;\n }\n \n double&\n operator[](const statistics::Indicator& indicator)\n { return indicators_.at(indicator); }\n \n friend std::ostream& operator<<(std::ostream& os,\n const StatisticsStorage& storage)\n {\n for (auto& it:storage.indicators_)\n {\n try {\n os << std::fixed\n << storage.inidicatorNames_.at(it.first) << \"\\t\"\n << std::setprecision(2) << it.second << std::endl;\n }\n catch (...) {\n }\n }\n \n return os;\n }\n private:\n StatisticsStorage(const StatRepo& indicators):indicators_(indicators){}\n \n const std::map inidicatorNames_;\n static const StatRepo ConsumerStatRepo;\n static const StatRepo ProducerStatRepo;\n StatRepo indicators_;\n };\n\n class StatObject {\n public:\n StatObject(const boost::shared_ptr& statStorage):statStorage_(statStorage){}\n \n virtual ~StatObject(){}\n \n protected:\n boost::shared_ptr statStorage_;\n };\n };\n}\n\n#endif\n", "meta": {"hexsha": "754958823f7169517d45aee4e9706274912b35e0", "size": 7778, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cpp/include/statistics.hpp", "max_stars_repo_name": "luckiday/ndnrtc", "max_stars_repo_head_hexsha": "ea224ce8d9f01d164925448c7424cf0f0caa4b07", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cpp/include/statistics.hpp", "max_issues_repo_name": "luckiday/ndnrtc", "max_issues_repo_head_hexsha": "ea224ce8d9f01d164925448c7424cf0f0caa4b07", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpp/include/statistics.hpp", "max_forks_repo_name": "luckiday/ndnrtc", "max_forks_repo_head_hexsha": "ea224ce8d9f01d164925448c7424cf0f0caa4b07", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3723404255, "max_line_length": 107, "alphanum_fraction": 0.4456158395, "num_tokens": 1283, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.519521321952093, "lm_q2_score": 0.3849121444839335, "lm_q1q2_score": 0.19997006613770815}} {"text": "#pragma once\r\n\r\n#include \r\n#include \r\n#include \r\n\r\nnamespace dynaman {\r\n\tclass arfModelLinearBase\r\n\t{\r\n\tpublic:\r\n\t\t~arfModelLinearBase();\r\n\t\tvirtual Eigen::MatrixXf arf(const Eigen::MatrixXf& posRel, const Eigen::MatrixXf& eulerAnglesAUTD) = 0;\r\n\t\tvirtual Eigen::MatrixXf arf(const Eigen::MatrixXf& posRel, const std::vector& rots) = 0;\r\n\t};\r\n\r\n\tclass arfModelTabular : public arfModelLinearBase\r\n\t{\r\n\tprivate:\r\n\t\tEigen::VectorXf m_tableDistance;\r\n\t\tEigen::VectorXf m_tableAngle;\r\n\t\tEigen::MatrixXf m_tableForce;\r\n\r\n\t\tEigen::MatrixXf arfFromDirections(const Eigen::MatrixXf& posRel, const Eigen::MatrixXf& directionsAutd);\r\n\r\n\tpublic:\r\n\t\tarfModelTabular(const Eigen::VectorXf& tableDistance, const Eigen::VectorXf& tableAngle, const Eigen::MatrixXf& tableForce);\r\n\t\tEigen::MatrixXf arf(const Eigen::MatrixXf& posRel, const Eigen::MatrixXf& eulerAnglesAutd) override;\r\n\t\tEigen::MatrixXf arf(const Eigen::MatrixXf& posRel, const std::vector& rots) override;\r\n\t};\r\n\r\n\tclass arfModelFocusSphereR100 : public arfModelTabular\r\n\t{\r\n\tpublic:\r\n\t\tarfModelFocusSphereR100();\r\n\r\n\tprivate:\r\n\t\tEigen::VectorXf tableDistances() const;\r\n\t\tEigen::VectorXf tableAngles() const;\r\n\t\tEigen::MatrixXf tableForces() const;\r\n\t};\r\n\r\n\tclass arfModelFocusSphereR50 : public arfModelTabular {\r\n\tpublic:\r\n\t\tarfModelFocusSphereR50();\r\n\r\n\tprivate:\r\n\t\tEigen::VectorXf tableDistances() const;\r\n\t\tEigen::VectorXf tableAngles() const;\r\n\t\tEigen::MatrixXf tableForces() const;\r\n\t};\r\n}\r\n", "meta": {"hexsha": "8321aa61615608c2ed447219a5123ecdb768a947", "size": 1512, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "inc/arfModel.hpp", "max_stars_repo_name": "shinolab/dynamic-manipulation", "max_stars_repo_head_hexsha": "d43bae688cecf87e15605ed6a9dbc80a782d72fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inc/arfModel.hpp", "max_issues_repo_name": "shinolab/dynamic-manipulation", "max_issues_repo_head_hexsha": "d43bae688cecf87e15605ed6a9dbc80a782d72fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inc/arfModel.hpp", "max_forks_repo_name": "shinolab/dynamic-manipulation", "max_forks_repo_head_hexsha": "d43bae688cecf87e15605ed6a9dbc80a782d72fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0769230769, "max_line_length": 127, "alphanum_fraction": 0.7321428571, "num_tokens": 446, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.519521321952093, "lm_q2_score": 0.3849121444839335, "lm_q1q2_score": 0.19997006613770815}} {"text": "#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\n#include \n#include \n// PCL specific includes\n#include \n#include \n#include \n\nros::Publisher pub;\n\nvoid\ncloud_cb (const sensor_msgs::PointCloud2ConstPtr& input)\n{\n // Convert the sensor_msgs/PointCloud2 data to pcl/PointCloud\n pcl::PointCloud ::Ptr cloud (new pcl::PointCloud );\n pcl::fromROSMsg (*input, *cloud);\n\n\n std::cout << \"test: \" << cloud->points[4] << std::endl;\n std::cout << \"test2: \" << cloud->points[4].rgb << cloud->points[4].r << cloud->points[4].g << cloud->points[4].b << std::endl;\n\n pcl::search::Search ::Ptr tree (new pcl::search::KdTree);\n\n pcl::PointCloud ::Ptr cloud_filtered (new pcl::PointCloud );\n // Perform the actual filtering\n pcl::VoxelGrid sor;\n sor.setInputCloud (cloud);\n float voxel_size = 0.04;\n sor.setMinimumPointsNumberPerVoxel(3);\n sor.setLeafSize (voxel_size, voxel_size, voxel_size);\n sor.filter (*cloud_filtered);\n\n pcl::IndicesPtr indices (new std::vector );\n pcl::PassThrough pass;\n pass.setInputCloud (cloud_filtered);\n pass.setFilterFieldName (\"z\");\n pass.setFilterLimits (0.0, 5.0);\n pass.filter (*indices);\n\n pcl::RegionGrowingRGB reg;\n reg.setInputCloud (cloud_filtered);\n reg.setIndices (indices);\n reg.setSearchMethod (tree);\n reg.setDistanceThreshold (0.06);\n reg.setPointColorThreshold (6);\n reg.setRegionColorThreshold (2);\n reg.setMinClusterSize (60);\n reg.setMaxClusterSize (5000);\n\n std::vector clusters;\n reg.extract (clusters);\n\n pcl::PointCloud ::Ptr colored_cloud = reg.getColoredCloud ();\n\n sensor_msgs::PointCloud2 output;\n\n pcl::toROSMsg(*colored_cloud,output);\n output.header.frame_id = input->header.frame_id;\n\n // Publish the data\n pub.publish (output);\n}\n\nint\nmain (int argc, char** argv)\n{\n // Initialize ROS\n ros::init (argc, argv, \"segment_rgb\");\n ros::NodeHandle nh;\n\n // Create a ROS subscriber for the input point cloud\n ros::Subscriber sub = nh.subscribe (\"/camera/depth/color/points\", 1, cloud_cb);\n\n // Create a ROS publisher for the output point cloud\n pub = nh.advertise (\"output_rgb\", 1);\n\n // Spin\n ros::spin ();\n}\n\n\n\n/*\ntypedef pcl::PointCloud PointCloud;\n\nvoid callback(const PointCloud::ConstPtr& msg)\n{\n printf (\"Cloud: width = %d, height = %d\\n\", msg->width, msg->height);\n\n pcl::PointCloud ::Ptr cloud (new pcl::PointCloud );\n// BOOST_FOREACH (cloud, msg->points);\n\n// pcl::visualization::CloudViewer viewer (\"Cluster viewer\");\n// viewer.showCloud (cloud);\n\n// pcl::search::Search ::Ptr tree (new pcl::search::KdTree);\n\n// pcl::IndicesPtr indices (new std::vector );\n// pcl::PassThrough pass;\n// pass.setInputCloud (cloud);\n// pass.setFilterFieldName (\"z\");\n// pass.setFilterLimits (0.0, 5.0);\n// pass.filter (*indices);\n\n// pcl::RegionGrowingRGB reg;\n// reg.setInputCloud (cloud);\n// reg.setIndices (indices);\n// reg.setSearchMethod (tree);\n// reg.setDistanceThreshold (10);\n// reg.setPointColorThreshold (6);\n// reg.setRegionColorThreshold (5);\n// reg.setMinClusterSize (600);\n\n// std::vector clusters;\n// reg.extract (clusters);\n\n// pcl::PointCloud ::Ptr colored_cloud = reg.getColoredCloud ();\n// pcl::visualization::CloudViewer viewer (\"Cluster viewer\");\n// viewer.showCloud (colored_cloud);\n\n}\n\nint main(int argc, char** argv)\n{\n ros::init(argc, argv, \"sub_pcl\");\n ros::NodeHandle nh;\n ros::Subscriber sub = nh.subscribe(\"points2\", 1, callback);\n ros::spin();\n}\n*/\n", "meta": {"hexsha": "fdd080facf1e868de9fd048e113c79e866b00d41", "size": 4272, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "camera/src/segment_rgb.cpp", "max_stars_repo_name": "miriamschmelzer/SegSlam", "max_stars_repo_head_hexsha": "4b4f8226636a0b3e04b2dad0be244681548fd44a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-02-11T13:55:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T13:55:52.000Z", "max_issues_repo_path": "camera/src/segment_rgb.cpp", "max_issues_repo_name": "miriamschmelzer/SegSlam", "max_issues_repo_head_hexsha": "4b4f8226636a0b3e04b2dad0be244681548fd44a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camera/src/segment_rgb.cpp", "max_forks_repo_name": "miriamschmelzer/SegSlam", "max_forks_repo_head_hexsha": "4b4f8226636a0b3e04b2dad0be244681548fd44a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8648648649, "max_line_length": 128, "alphanum_fraction": 0.7052902622, "num_tokens": 1163, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.519521321952093, "lm_q2_score": 0.3849121444839335, "lm_q1q2_score": 0.19997006613770815}} {"text": "#include \n#include \n#include \"patternsearch.h\"\n#include \n#include \"main.h\"\n#include \n#include \n#include \"util.h\"\n\nnamespace patternsearch\n{\n\t#define PSUEDORANDOM_DATA_SIZE 30 //2^30 = 1GB\n #define PSUEDORANDOM_DATA_CHUNK_SIZE 6 //2^6 = 64 bytes //must be same as SHA512_DIGEST_LENGTH 64\n #define L2CACHE_TARGET 12 // 2^12 = 4096 bytes\n #define AES_ITERATIONS 15\n\n\t// useful constants\n uint32_t psuedoRandomDataSize=(1< > *results, boost::mutex *mtx){\n\t\t//Allocate temporary memory\n\t\tunsigned char *cacheMemoryOperatingData;\n\t\tunsigned char *cacheMemoryOperatingData2;\t\n\t\tcacheMemoryOperatingData=new unsigned char[cacheMemorySize+16];\n\t\tcacheMemoryOperatingData2=new unsigned char[cacheMemorySize];\n\t\n\t\t//Create references to data as 32 bit arrays\n\t\tuint32_t* cacheMemoryOperatingData32 = (uint32_t*)cacheMemoryOperatingData;\n\t\tuint32_t* cacheMemoryOperatingData322 = (uint32_t*)cacheMemoryOperatingData2;\n //uint32_t* mainMemoryPsuedoRandomData32 = (uint32_t*)mainMemoryPsuedoRandomData;\n\t\t\n\t\t//Search for pattern in psuedorandom data\n\t\t\n\t\tunsigned char key[32] = {0};\n\t\tunsigned char iv[AES_BLOCK_SIZE];\n\t\tint outlen1, outlen2;\n unsigned int useEVP = GetArg(\"-useevp\", 1);\n\t\t\n\t\t//Iterate over the data\n\t\tint searchNumber=comparisonSize/totalThreads;\n\t\tint startLoc=threadNumber*searchNumber;\n\t\tfor(uint32_t k=startLoc;k > pattern_search( uint256 midHash, char *mainMemoryPsuedoRandomData, int totalThreads){\n\n boost::this_thread::disable_interruption di;\n\n\t\tstd::vector< std::pair > results;\n\t\t\n //clock_t t1 = clock();\n\t\tboost::thread_group* sha512Threads = new boost::thread_group();\n\t\tchar *threadsComplete;\n\t\tthreadsComplete=new char[totalThreads];\n\t\tfor (int i = 0; i < totalThreads; i++){\n\t\t\tsha512Threads->create_thread(boost::bind(&SHA512Filler, mainMemoryPsuedoRandomData, i,totalThreads,midHash));\n\t\t}\n\t\t//Wait for all threads to complete\n\t\tsha512Threads->join_all();\n\t\t\n //clock_t t2 = clock();\n //LogPrintf(\"create sha512 data %d\\n\",((double)t2-(double)t1)/CLOCKS_PER_SEC);\n\n\t\tboost::mutex mtx;\n\t\tboost::thread_group* aesThreads = new boost::thread_group();\n\t\tthreadsComplete=new char[totalThreads];\n\t\tfor (int i = 0; i < totalThreads; i++){\n\t\t\taesThreads->create_thread(boost::bind(&aesSearch, mainMemoryPsuedoRandomData, i,totalThreads,&results, &mtx));\n\t\t}\n\t\t//Wait for all threads to complete\n\t\taesThreads->join_all();\n\n //clock_t t3 = clock();\n //LogPrintf(\"aes search %d\\n\",((double)t3-(double)t2)/CLOCKS_PER_SEC);\n\n\t\tdelete aesThreads;\n\t\tdelete sha512Threads;\n boost::this_thread::restore_interruption ri(di);\n\t\treturn results;\n\t}\n\t\n\t\n\t\n bool pattern_verify( uint256 midHash, uint32_t a, uint32_t b ){\n\t\t//return false;\n\t\t\n\t\tclock_t t1 = clock();\n\t\t\n\t\t//Basic check\n if( a >= comparisonSize ) return false;\n\t\t\n\t\t//Allocate memory required\n\t\tunsigned char *cacheMemoryOperatingData;\n\t\tunsigned char *cacheMemoryOperatingData2;\t\n\t\tcacheMemoryOperatingData=new unsigned char[cacheMemorySize+16];\n\t\tcacheMemoryOperatingData2=new unsigned char[cacheMemorySize];\n\t\tuint32_t* cacheMemoryOperatingData32 = (uint32_t*)cacheMemoryOperatingData;\n\t\tuint32_t* cacheMemoryOperatingData322 = (uint32_t*)cacheMemoryOperatingData2;\n\t\t\n\t\tunsigned char hash_tmp[sizeof(midHash)];\n\t\tmemcpy((char*)&hash_tmp[0], (char*)&midHash, sizeof(midHash) );\n\t\tuint32_t* index = (uint32_t*)hash_tmp;\n\t\t\n\t\tuint32_t startLocation=a*cacheMemorySize/chunkSize;\n\t\tuint32_t finishLocation=startLocation+(cacheMemorySize/chunkSize);\n\t\t\t\n //copy data to first l2 cache\n\t\tfor( uint32_t i = startLocation; i < finishLocation; i++){\n\t\t\t*index = i;\n\t\t\tSHA512((unsigned char*)hash_tmp, sizeof(hash_tmp), (unsigned char*)&(cacheMemoryOperatingData[(i-startLocation)*chunkSize]));\n\t\t}\n\t\t\n unsigned int useEVP = GetArg(\"-useevp\", 1);\n\n //allow override for AESNI testing\n /*if(midHash==0){\n useEVP=0;\n }else if(midHash==1){\n useEVP=1;\n }*/\n\n unsigned char key[32] = {0};\n\t\tunsigned char iv[AES_BLOCK_SIZE];\n\t\tint outlen1, outlen2;\n\t\t\n\t\t//memset(cacheMemoryOperatingData2,0,cacheMemorySize);\n\t\tfor(int j=0;j\n#include \n#include \n#include \n#include \"llvm/Support/raw_ostream.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"helper.h\"\n#include \"klee/ExecutionState.h\"\n#include \"Executor.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"TeleScopeHandler.h\"\n\nusing namespace klee;\nusing namespace std;\nusing namespace llvm;\n\n//==============================================\n//============= TeleScopeHandler ===============\n//==============================================\n\nTeleScopeHandler::TeleScopeHandler(int n)\n{\n numLoop = n;\n}\n\nint TeleScopeHandler::ProcessAllPaths(vector states)\n{\n LOG(LOG_MASK_TS, \"Telescoping done, final result is %g\",\n TeleScopeData::finalResult / numLoop);\n return 0;\n}\n\nint TeleScopeHandler::ts_init_handler(ExecutionState *state,\n int small_thres, int real_thres)\n{\n state->tsData.ts_init(state, small_thres, real_thres);\n return 0;\n}\n\nint TeleScopeHandler::ts_pkt_handler(ExecutionState *state,\n int pktID, int cur_reg)\n{\n assert(state == state->tsData.getState());\n state->tsData.ts_pkt(pktID, cur_reg);\n return 0;\n}\n\nint TeleScopeHandler::ts_handler(ExecutionState *state, int cur_reg)\n{\n assert(state == state->tsData.getState());\n state->tsData.ts_final(cur_reg);\n return 0;\n}\n\nint TeleScopeHandler::ts_cmin_pkt_handler(ExecutionState *state,\n int pktID)\n{\n assert(state == state->tsData.getState());\n int cmin_max = state->cmin->getMaxValue();\n state->tsData.ts_pkt(pktID, cmin_max);\n return 0;\n}\n\nint TeleScopeHandler::ts_cmin_handler(ExecutionState *state)\n{\n assert(state == state->tsData.getState());\n int cmin_max = state->cmin->getMaxValue();\n state->tsData.ts_final(cmin_max);\n return 0;\n}\n\n\n//==============================================\n//============= TeleScopeData ==================\n//==============================================\n\ndouble TeleScopeData::finalResult = 0.0;\n\nTeleScopeData::TeleScopeData():\n reg(0),\n smallThres(0),\n realThres(0),\n pktProbs(0)\n{}\n\nTeleScopeData::TeleScopeData(const TeleScopeData &_tsData):\n state(_tsData.state),\n reg(_tsData.reg),\n smallThres(_tsData.smallThres),\n realThres(_tsData.realThres),\n perPktProbs(_tsData.perPktProbs),\n pktProbs(_tsData.pktProbs),\n pktRegs(_tsData.pktRegs)\n{\n\n}\n\nvoid TeleScopeData::dump()\n{\n uint32_t i;\n LOG(LOG_MASK_TS, \"state %d has %ld pkt probs\", state->id, perPktProbs.size());\n for (i = 0; i < pktProbs.size(); i ++) {\n LOG(LOG_MASK_TS, \"pkt[%d] prob=%g\", i, perPktProbs[i])\n }\n\n LOG(LOG_MASK_TS, \"state %d has %ld pkt regs\", state->id, pktRegs.size());\n for (i = 0; i < pktRegs.size(); i ++) {\n LOG(LOG_MASK_TS, \"pkt[%d] reg=%d\", i, pktRegs[i])\n }\n}\n\nint TeleScopeData::ts_init(ExecutionState *s, int small_thres, int real_thres)\n{\n LOG(LOG_MASK_TS, \"Initing TsData, small thres=%d, real thres=%d\",\n small_thres, real_thres);\n state = s;\n smallThres = small_thres;\n realThres = real_thres;\n return 0;\n}\n\nint TeleScopeData::ts_pkt(int pktID, int cur_reg)\n{\n //LOG(LOG_MASK_TS, \"Updating TsData for pkt=%d, pathProb=%g, reg: %d->%d\",\n // pktID, state->getPathProb(), reg, cur_reg);\n\n if (int(pktRegs.size()) != pktID) {\n WARN(\"pktregs size=%ld, pktID=%d\\n\", pktRegs.size(), pktID);\n assert(0);\n }\n assert(int(pktRegs.size()) == pktID);\n assert(int(perPktProbs.size()) == pktID);\n assert(int(pktProbs.size()) == pktID);\n\n reg = cur_reg;\n\n // TODO; Update per-packet constraints and probs\n double pathProb = state->getPathProb();\n pktProbs.push_back(pathProb);\n if (pktID == 0) {\n perPktProbs.push_back(pathProb);\n } else {\n double lastProb = pktProbs[pktID - 1];\n perPktProbs.push_back(pathProb / lastProb);\n }\n pktRegs.push_back(cur_reg);\n\n return 0;\n}\n\nbool AlmostEqual2sComplement(float A, float B, int maxUlps)\n{\n // Make sure maxUlps is non-negative and small enough that the\n // default NAN won't compare as equal to anything.\n assert(maxUlps > 0 && maxUlps < 4 * 1024 * 1024);\n int aInt = *(int*)&A;\n // Make aInt lexicographically ordered as a twos-complement int\n if (aInt < 0)\n aInt = 0x80000000 - aInt;\n // Make bInt lexicographically ordered as a twos-complement int\n int bInt = *(int*)&B;\n if (bInt < 0)\n bInt = 0x80000000 - bInt;\n int intDiff = abs(aInt - bInt);\n if (intDiff <= maxUlps)\n return true;\n return false;\n}\n\nstatic int checkVectorSame(vector v)\n{\n int ret = 0;\n double first = v[0];\n for (auto val: v) {\n if (!AlmostEqual2sComplement(val, first, 1)) {\n ret = 1;\n }\n }\n if (ret == 0) {\n return 0;\n }\n\n assert(ret == 1);\n double second = v[1];\n for (uint32_t i = 2; i < v.size(); i ++) {\n if (!AlmostEqual2sComplement(v[i], second, 1)) {\n ret = 2;\n }\n }\n return ret;\n}\n\nTeleScopeData::PeriodicityType TeleScopeData::ts_periodicity()\n{\n // TODO: for now, we only distinguish two types by comparing\n // TODO: Also check per-packet constraint for periodicity\n\n int ret = checkVectorSame(perPktProbs);\n if (ret == 0) {\n return REPEAT_ALL;\n } else if (ret == 1) {\n return REPEAT_FROM_SECOND;\n } else {\n return NO_PERIODICITY;\n }\n}\n\nint TeleScopeData::ts_final(int cur_reg)\n{\n assert(reg == cur_reg);\n //LOG(LOG_MASK_TS, \"Telescoping state %d with reg=%d, smallThres=%d, real=%d\",\n // state->id, reg, smallThres, realThres);\n\n // check if this state is telescopable\n if (reg == smallThres) {\n LOG(LOG_MASK_TS, \"state %d (cur_reg = smallThres = %d) \"\n \"seems telescopable\", state->id, reg);\n dump();\n\n // infer the final prob result\n double result = 0.0;\n TeleScopeData::PeriodicityType ptype = ts_periodicity();\n\n if (ptype == NO_PERIODICITY) {\n LOG(LOG_MASK_TS, \"state %d turns out not telescopable\", state->id);\n } else {\n if (ptype == REPEAT_ALL) {\n result = pow(perPktProbs[0], realThres);\n } else if (ptype == REPEAT_FROM_SECOND) {\n result = perPktProbs[0] * pow(perPktProbs[1], realThres-1);\n }\n LOG(LOG_MASK_TS, \"telescoping state %d inferred prob of reg==%d is %g\",\n state->id, realThres, result);\n finalResult += result;\n }\n } else {\n //LOG(LOG_MASK_TS, \"state %d (cur_reg=%d smallThres=%d) \"\n // \"seems not telescopable\", state->id, cur_reg, smallThres);\n }\n\n return 0;\n}\n", "meta": {"hexsha": "28e6f733d816b25b575c8eac2557a139099cc607", "size": 6741, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/Core/TeleScopeHandler.cpp", "max_stars_repo_name": "qiaokang92/P4wn", "max_stars_repo_head_hexsha": "cd2418de2dff238f67508898e3bfdf2aae1889a4", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2020-12-26T07:18:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T23:46:13.000Z", "max_issues_repo_path": "lib/Core/TeleScopeHandler.cpp", "max_issues_repo_name": "qiaokang92/P4wn", "max_issues_repo_head_hexsha": "cd2418de2dff238f67508898e3bfdf2aae1889a4", "max_issues_repo_licenses": ["NCSA"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-05-25T03:54:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-25T04:22:29.000Z", "max_forks_repo_path": "lib/Core/TeleScopeHandler.cpp", "max_forks_repo_name": "qiaokang92/P4wn", "max_forks_repo_head_hexsha": "cd2418de2dff238f67508898e3bfdf2aae1889a4", "max_forks_repo_licenses": ["NCSA"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.75, "max_line_length": 81, "alphanum_fraction": 0.6105919003, "num_tokens": 1927, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6297745935070806, "lm_q2_score": 0.3174262655876759, "lm_q1q2_score": 0.1999069973789492}} {"text": "//==============================================================================\n// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II\n// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI\n//\n// Distributed under the Boost Software License, Version 1.0.\n// See accompanying file LICENSE.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//==============================================================================\n#ifndef BOOST_SIMD_PREDICATES_FUNCTIONS_SIMD_SSE_SSE2_IS_EQZ_HPP_INCLUDED\n#define BOOST_SIMD_PREDICATES_FUNCTIONS_SIMD_SSE_SSE2_IS_EQZ_HPP_INCLUDED\n#ifdef BOOST_SIMD_HAS_SSE2_SUPPORT\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n BOOST_DISPATCH_IMPLEMENT ( is_eqz_\n , boost::simd::tag::sse2_\n , (A0)\n , ((simd_ < int64_\n , boost::simd::tag::sse_\n >\n ))\n )\n {\n typedef typename meta::as_logical::type result_type;\n\n BOOST_SIMD_FUNCTOR_CALL(1)\n {\n typedef typename dispatch::meta::downgrade::type base;\n\n const base tmp1 = boost::simd::bitwise_cast(is_eqz(boost::simd::bitwise_cast(a0)));\n const base tmp2 = details::shuffle<1,0,3,2>(tmp1);\n return boost::simd::bitwise_cast(b_and(tmp1, tmp2));\n }\n };\n} } }\n\n#endif\n#endif\n", "meta": {"hexsha": "c810834b6d372d17429e85c506476c688486df5e", "size": 1958, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/base/include/boost/simd/predicates/functions/simd/sse/sse2/is_eqz.hpp", "max_stars_repo_name": "psiha/nt2", "max_stars_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2017-05-19T18:10:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T02:18:13.000Z", "max_issues_repo_path": "modules/boost/simd/base/include/boost/simd/predicates/functions/simd/sse/sse2/is_eqz.hpp", "max_issues_repo_name": "psiha/nt2", "max_issues_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/base/include/boost/simd/predicates/functions/simd/sse/sse2/is_eqz.hpp", "max_forks_repo_name": "psiha/nt2", "max_forks_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-02T12:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-31T12:46:14.000Z", "avg_line_length": 42.5652173913, "max_line_length": 101, "alphanum_fraction": 0.5515832482, "num_tokens": 431, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5506073802837477, "lm_q2_score": 0.36296921241058616, "lm_q1q2_score": 0.199853527169048}} {"text": "/* methods.cpp */\n\n#include \"linalg.h\"\n#include \"methods.h\"\n#include \"structs.h\"\n\n#include \n#include \n#include \n#include \n#include \n\nextern \"C\"\n{\n void _ETS_IK(PyObject *ets, int n, double *q, double *Tep, double *ret)\n {\n // double E;\n double *Te = (double *)PyMem_RawCalloc(16, sizeof(double));\n double *e = (double *)PyMem_RawCalloc(6, sizeof(double));\n\n double *a = (double *)PyMem_RawCalloc(6, sizeof(double));\n // a[0] = 1.0;\n // a[1] = 4.0;\n // a[2] = 2.0;\n // a[3] = 5.0;\n // a[4] = 3.0;\n // a[5] = 6.0;\n a[0] = 1.0;\n a[1] = 2.0;\n a[2] = 3.0;\n a[3] = 4.0;\n a[4] = 5.0;\n a[5] = 6.0;\n double *b = (double *)PyMem_RawCalloc(12, sizeof(double));\n b[0] = 11.0;\n b[1] = 15.0;\n b[2] = 19.0;\n b[3] = 12.0;\n b[4] = 16.0;\n b[5] = 20.0;\n b[6] = 13.0;\n b[7] = 17.0;\n b[8] = 21.0;\n b[9] = 14.0;\n b[10] = 18.0;\n b[11] = 22.0;\n // b[0] = 11.0;\n // b[1] = 12.0;\n // b[2] = 13.0;\n // b[3] = 14.0;\n // b[4] = 15.0;\n // b[5] = 16.0;\n // b[6] = 17.0;\n // b[7] = 18.0;\n // b[8] = 19.0;\n // b[9] = 20.0;\n // b[10] = 21.0;\n // b[11] = 22.0;\n\n // double *U = (double *)PyMem_RawCalloc(16, sizeof(double));\n // double *invU = (double *)PyMem_RawCalloc(16, sizeof(double));\n // double *temp = (double *)PyMem_RawCalloc(16, sizeof(double));\n // double *ret = (double *)PyMem_RawCalloc(16, sizeof(double));\n // Py_ssize_t m;\n int arrived = 0, iter = 0;\n\n while (arrived == 0 && iter < 500)\n {\n // Current pose Te\n // _ETS_fkine(ets, q, (double *)NULL, NULL, Te);\n\n // Angle axis error e\n _angle_axis(Te, Tep, e);\n\n // Squared error E\n // E = 0.5 * e @ We @ e\n // E = 0.5 * (e[0] * e[0] + e[1] * e[1] + e[2] * e[2] + e[3] * e[3] + e[4] * e[4] + e[5] * e[5]);\n }\n\n // _ETS_fkine(ets, q, (double *)NULL, NULL, Te);\n\n // for (int i = 0; i < 2; i++)\n // {\n // for (int j = 0; j < 4; j++)\n // {\n // ret[i * 4 + j] = 0.0;\n // }\n // }\n\n // _mult_T(2, 3, 0, a, 3, 4, 0, b, ret);\n // _mult_T(3, 2, 1, a, 3, 4, 0, b, ret);\n // _mult_T(3, 2, 1, a, 4, 3, 1, b, ret);\n // _mult_T(2, 3, 0, a, 4, 3, 1, b, ret);\n\n // int j = 0;\n }\n\n void _ETS_hessian(int n, MapMatrixJc &J, MapMatrixHr &H)\n {\n for (int j = 0; j < n; j++)\n {\n for (int i = j; i < n; i++)\n {\n H.block<3, 1>(j * 6, i) = J.block<3, 1>(3, j).cross(J.block<3, 1>(0, i));\n H.block<3, 1>(j * 6 + 3, i) = J.block<3, 1>(3, j).cross(J.block<3, 1>(3, i));\n\n if (i != j)\n {\n H.block<3, 1>(i * 6, j) = H.block<3, 1>(j * 6, i);\n H.block<3, 1>(i * 6 + 3, j) = Eigen::Vector3d::Zero();\n }\n }\n }\n }\n\n void _ETS_jacob0(ETS *ets, double *q, double *tool, MapMatrixJc &eJ)\n {\n // ET *et;\n // double T[16];\n // MapMatrix4dc eT(T);\n // Matrix4dc U;\n // Matrix4dc invU;\n // Matrix4dc temp;\n // Matrix4dc ret;\n\n // int j = 0;\n\n // U = Eigen::Matrix4d::Identity();\n\n // // Get the forward kinematics into T\n // _ETS_fkine(ets, q, (double *)NULL, tool, eT);\n\n // for (int i = 0; i < ets->m; i++)\n // {\n // et = ets->ets[i];\n\n // if (et->isjoint)\n // {\n // _ET_T(et, &ret(0), q[et->jindex]);\n // temp = U * ret;\n // U = temp;\n\n // if (i == ets->m - 1 && tool != NULL)\n // {\n // MapMatrix4dc e_tool(tool);\n // temp = U * e_tool;\n // U = temp;\n // }\n\n // _inv(&U(0), &invU(0));\n // temp = invU * eT;\n\n // if (et->axis == 0)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 2) * temp(1, 3) - U(Eigen::seq(0, 2), 1) * temp(2, 3);\n\n // eJ(Eigen::seq(3, 5), j) = U(Eigen::seq(0, 2), 0);\n // }\n // else if (et->axis == 1)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 0) * temp(2, 3) - U(Eigen::seq(0, 2), 2) * temp(0, 3);\n // eJ(Eigen::seq(3, 5), j) = U(Eigen::seq(0, 2), 1);\n // }\n // else if (et->axis == 2)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 1) * temp(0, 3) - U(Eigen::seq(0, 2), 0) * temp(1, 3);\n // eJ(Eigen::seq(3, 5), j) = U(Eigen::seq(0, 2), 2);\n // }\n // else if (et->axis == 3)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 0);\n // eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n // }\n // else if (et->axis == 4)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 1);\n // eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n // }\n // else if (et->axis == 5)\n // {\n // eJ(Eigen::seq(0, 2), j) = U(Eigen::seq(0, 2), 2);\n // eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n // }\n // j++;\n // }\n // else\n // {\n // _ET_T(et, &ret(0), q[et->jindex]);\n // temp = U * ret;\n // U = temp;\n // }\n // }\n\n ET *et;\n Eigen::Matrix tJ(6, ets->n);\n double T[16];\n MapMatrix4dc eT(T);\n Matrix4dc U = Eigen::Matrix4d::Identity();\n Matrix4dc invU;\n Matrix4dc temp;\n Matrix4dc ret;\n int j = ets->n - 1;\n\n if (tool != NULL)\n {\n Matrix4dc e_tool(tool);\n temp = e_tool * U;\n U = temp;\n }\n\n for (int i = ets->m - 1; i >= 0; i--)\n {\n et = ets->ets[i];\n\n if (et->isjoint)\n {\n if (et->axis == 0)\n {\n tJ(Eigen::seq(0, 2), j) = U(2, Eigen::seq(0, 2)) * U(1, 3) - U(1, Eigen::seq(0, 2)) * U(2, 3);\n tJ(Eigen::seq(3, 5), j) = U(0, Eigen::seq(0, 2));\n }\n else if (et->axis == 1)\n {\n tJ(Eigen::seq(0, 2), j) = U(0, Eigen::seq(0, 2)) * U(2, 3) - U(2, Eigen::seq(0, 2)) * U(0, 3);\n tJ(Eigen::seq(3, 5), j) = U(1, Eigen::seq(0, 2));\n }\n else if (et->axis == 2)\n {\n tJ(Eigen::seq(0, 2), j) = U(1, Eigen::seq(0, 2)) * U(0, 3) - U(0, Eigen::seq(0, 2)) * U(1, 3);\n tJ(Eigen::seq(3, 5), j) = U(2, Eigen::seq(0, 2));\n }\n else if (et->axis == 3)\n {\n tJ(Eigen::seq(0, 2), j) = U(0, Eigen::seq(0, 2));\n tJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n else if (et->axis == 4)\n {\n tJ(Eigen::seq(0, 2), j) = U(1, Eigen::seq(0, 2));\n tJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n else if (et->axis == 5)\n {\n tJ(Eigen::seq(0, 2), j) = U(2, Eigen::seq(0, 2));\n tJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n\n _ET_T(et, &ret(0), q[et->jindex]);\n temp = ret * U;\n U = temp;\n j--;\n }\n else\n {\n _ET_T(et, &ret(0), q[et->jindex]);\n temp = ret * U;\n U = temp;\n }\n }\n\n Eigen::Matrix ev;\n ev.topLeftCorner<3, 3>() = U.topLeftCorner<3, 3>();\n ev.topRightCorner<3, 3>() = Eigen::Matrix3d::Zero();\n ev.bottomLeftCorner<3, 3>() = Eigen::Matrix3d::Zero();\n ev.bottomRightCorner<3, 3>() = U.topLeftCorner<3, 3>();\n eJ = ev * tJ;\n }\n\n void _ETS_jacobe(ETS *ets, double *q, double *tool, MapMatrixJc &eJ)\n {\n ET *et;\n double T[16];\n MapMatrix4dc eT(T);\n Matrix4dc U = Eigen::Matrix4d::Identity();\n Matrix4dc invU;\n Matrix4dc temp;\n Matrix4dc ret;\n int j = ets->n - 1;\n\n if (tool != NULL)\n {\n Matrix4dc e_tool(tool);\n temp = e_tool * U;\n U = temp;\n }\n\n for (int i = ets->m - 1; i >= 0; i--)\n {\n et = ets->ets[i];\n\n if (et->isjoint)\n {\n if (et->axis == 0)\n {\n eJ(Eigen::seq(0, 2), j) = U(2, Eigen::seq(0, 2)) * U(1, 3) - U(1, Eigen::seq(0, 2)) * U(2, 3);\n eJ(Eigen::seq(3, 5), j) = U(0, Eigen::seq(0, 2));\n }\n else if (et->axis == 1)\n {\n eJ(Eigen::seq(0, 2), j) = U(0, Eigen::seq(0, 2)) * U(2, 3) - U(2, Eigen::seq(0, 2)) * U(0, 3);\n eJ(Eigen::seq(3, 5), j) = U(1, Eigen::seq(0, 2));\n }\n else if (et->axis == 2)\n {\n eJ(Eigen::seq(0, 2), j) = U(1, Eigen::seq(0, 2)) * U(0, 3) - U(0, Eigen::seq(0, 2)) * U(1, 3);\n eJ(Eigen::seq(3, 5), j) = U(2, Eigen::seq(0, 2));\n }\n else if (et->axis == 3)\n {\n eJ(Eigen::seq(0, 2), j) = U(0, Eigen::seq(0, 2));\n eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n else if (et->axis == 4)\n {\n eJ(Eigen::seq(0, 2), j) = U(1, Eigen::seq(0, 2));\n eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n else if (et->axis == 5)\n {\n eJ(Eigen::seq(0, 2), j) = U(2, Eigen::seq(0, 2));\n eJ(Eigen::seq(3, 5), j) = Eigen::Vector3d::Zero();\n }\n\n _ET_T(et, &ret(0), q[et->jindex]);\n temp = ret * U;\n U = temp;\n j--;\n }\n else\n {\n _ET_T(et, &ret(0), q[et->jindex]);\n temp = ret * U;\n U = temp;\n }\n }\n }\n\n void _ETS_fkine(ETS *ets, double *q, double *base, double *tool, MapMatrix4dc &e_ret)\n {\n ET *et;\n Matrix4dc temp;\n Matrix4dc current;\n\n if (base != NULL)\n {\n MapMatrix4dc e_base(base);\n current = e_base;\n }\n else\n {\n current = Eigen::Matrix4d::Identity();\n }\n\n for (int i = 0; i < ets->m; i++)\n {\n et = ets->ets[i];\n\n _ET_T(et, &e_ret(0), q[et->jindex]);\n temp = current * e_ret;\n current = temp;\n }\n\n if (tool != NULL)\n {\n MapMatrix4dc e_tool(tool);\n e_ret = current * e_tool;\n }\n else\n {\n e_ret = current;\n }\n }\n\n void _ET_T(ET *et, double *ret, double eta)\n {\n // Check if static and return static transform\n if (!et->isjoint)\n {\n _copy(et->T, ret);\n return;\n }\n\n if (et->isflip)\n {\n eta = -eta;\n }\n\n // Calculate ET trasform based on eta\n et->op(ret, eta);\n }\n\n} /* extern \"C\" */", "meta": {"hexsha": "0d9b8d0ac106b747241d41383ab500c9fcae6e17", "size": 11994, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "roboticstoolbox/core/methods.cpp", "max_stars_repo_name": "Russ76/robotics-toolbox-python", "max_stars_repo_head_hexsha": "4b3e82a6522757ffde1f83aef8d05b3ad475e9de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "roboticstoolbox/core/methods.cpp", "max_issues_repo_name": "Russ76/robotics-toolbox-python", "max_issues_repo_head_hexsha": "4b3e82a6522757ffde1f83aef8d05b3ad475e9de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "roboticstoolbox/core/methods.cpp", "max_forks_repo_name": "Russ76/robotics-toolbox-python", "max_forks_repo_head_hexsha": "4b3e82a6522757ffde1f83aef8d05b3ad475e9de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5969387755, "max_line_length": 123, "alphanum_fraction": 0.3490078373, "num_tokens": 4130, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5506073655352404, "lm_q2_score": 0.3629691917376783, "lm_q1q2_score": 0.19985351043313856}} {"text": "#include \n#include \n#include \n#include \n#include \"objects_detection_lib.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n\n#include \n\nusing namespace boost::filesystem;\nusing namespace cv;\nusing namespace std;\n\nDEFINE_string(config, \"\", \"configuration\");\nDEFINE_string(model, \"\", \"model\");\nDEFINE_string(input, \"\", \"input\");\nDEFINE_string(output, \"\", \"output\");\nDEFINE_int32(realtime, 1, \"realtime\");\nDEFINE_int32(batchSize, 8, \"batch size\");\nDEFINE_int32(width, 640, \"width\");\nDEFINE_int32(height, 480, \"height\");\nDEFINE_double(threshold, 0.5, \"threshold\");\nint main(int argc, char* argv[]) {\n gflags::ParseCommandLineFlags(&argc, &argv, true);\n \n // Initialize torch and load model\n sol::state lua;\n lua.open_libraries();\n\n lua.script(\"require 'torch'; require 'cunn'; require 'cudnn'; require 'image';\");\n lua.script(\"torch.setdefaulttensortype('torch.FloatTensor');\");\n //lua.script(\"cudnn.benchmark = true;\");\n lua.script(\"cudnn.fastest = true;\");\n\n lua.script(\"m = torch.load('\" + FLAGS_model + \"'):cuda();\");\n lua.script(\"m:evaluate();\");\n\n stringstream ss; ss << \"input = torch.FloatTensor(\" << FLAGS_batchSize << \", 3, 224, 224);\";\n lua.script(ss.str());\n\n THFloatTensor *input = lua[\"input\"];\n float *data = input->storage->data;\n\n // warm up\n auto start = chrono::high_resolution_clock::now();\n for (int i=0; i < 100; i++) {\n lua.script(\"result = m:forward(input:cuda()):float();\");\n }\n\n // Initialize region proposal \n objects_detection::init_objects_detection(FLAGS_config, false /*use_ground_plane*/, false /*use_stixels*/);\n\n // Set up input and start \n VideoCapture cap;\n deque files;\n map timestamped_files;\n if (FLAGS_input == \"\") {\n cap.open(1);\n cap.set(CV_CAP_PROP_FRAME_WIDTH, FLAGS_width);\n cap.set(CV_CAP_PROP_FRAME_HEIGHT, FLAGS_height);\n } else {\n path p(FLAGS_input);\n assert(exists(p));\n if (is_regular_file(p)) {\n files.push_back(p);\n } else {\n path log = p / \"log.txt\";\n if (FLAGS_realtime && exists(log)) {\n ifstream in(log.string(), fstream::in);\n int frame; double time;\n while(in >> frame >> time) {\n stringstream ss; ss << setw(4) << setfill('0') << frame+1 << \".png\";\n timestamped_files[time] = FLAGS_input + \"/\" + ss.str();\n }\n } else {\n FLAGS_realtime = 0;\n copy(directory_iterator(p), directory_iterator(), back_inserter(files));\n sort(files.begin(), files.end());\n }\n }\n }\n \n VideoWriter outputVideo, proposalVideo;\n if (FLAGS_output == \"\") {\n namedWindow(\"Proposals\");\n namedWindow(\"Output\");\n } else {\n mkdir(FLAGS_output.c_str(), 0777);\n outputVideo.open((path(FLAGS_output) / \"output.avi\").string(), CV_FOURCC('M', 'J', 'P', 'G'), 30, Size(FLAGS_width, FLAGS_height));\n proposalVideo.open((path(FLAGS_output) / \"proposal.avi\").string(), CV_FOURCC('M', 'J', 'P', 'G'), 30, Size(FLAGS_width, FLAGS_height));\n }\n\n double cur_timestamp = 0;\n double video_timestamp = 0;\n int num_proposals = 0;\n int num_frames = 0;\n while(true) {\n path filename;\n double timestamp;\n Mat image; \n if (FLAGS_input == \"\") {\n timestamp = 0;\n cap >> image;\n } else if (timestamped_files.size() > 0) {\n timestamp = cur_timestamp;\n auto it = --timestamped_files.upper_bound(timestamp);\n filename = it->second;\n cout << filename << endl;\n image = imread(filename.string(), CV_LOAD_IMAGE_COLOR);\n if (++it == timestamped_files.end()) timestamped_files.clear();\n } else {\n if (files.empty()) break;\n timestamp = timestamp + 1/30.;\n filename = files.front();\n image = imread(filename.string(), CV_LOAD_IMAGE_COLOR);\n files.pop_front(); \n }\n if (image.rows != FLAGS_height || image.cols != FLAGS_width) {\n resize(image, image, Size(FLAGS_width, FLAGS_height));\n }\n\n cout << timestamp << \" processing \" << filename << endl;\n num_frames++;\n auto start = chrono::high_resolution_clock::now();\n\n objects_detection::input_image_const_view_t input_view =\n boost::gil::interleaved_view(image.cols, image.rows,\n reinterpret_cast(image.data), static_cast(image.step));\n\n objects_detection::set_monocular_image(input_view);\n objects_detection::compute();\n\n std::vector detections = objects_detection::get_detections();\n cout << detections.size() << \" regions detected.\" << endl;\n num_proposals += detections.size();\n \n Mat proposals = image.clone();\n \n int detected = 0;\n for (int i = 0; i < detections.size(); i += FLAGS_batchSize) {\n vector bboxes;\n for (int j = 0, e = min(FLAGS_batchSize, (int)detections.size()-i); j < e; j++) {\n auto d = detections[i+j];\n int l = max(0, (int)d.bounding_box.min_corner().x()), t = max(0, (int)d.bounding_box.min_corner().y());\n Rect r(l, t, min(FLAGS_width-1, (int)d.bounding_box.max_corner().x()) - l + 1, min(FLAGS_height-1, (int)d.bounding_box.max_corner().y()) - t + 1);\n rectangle(proposals, r, Scalar(0, 0, 255));\n bboxes.push_back(r);\n\n Mat patch;\n image(r).convertTo(patch, CV_32FC3, 1/255.0);\n resize(patch, patch, Size(224, 224));\n\n float *D = (float*)patch.data;\n int step = 3*patch.cols;\n for (int r = 0, R = patch.rows; r < R; r++) {\n for (int c = 0, C = patch.cols; c < C; c++) {\n data[j*3*224*224 + r*224 + c] = (D[step*r + 3*c+2] - 0.485) / 0.229;\n data[j*3*224*224 + 224*224 + r*224 + c] = (D[step*r + 3*c+1] - 0.456) / 0.224;\n data[j*3*224*224 + 2*224*224 + r*224 + c] = (D[step*r + 3*c] - 0.406) / 0.225;\n }\n }\n }\n\n lua.script(\"result = m:forward(input:cuda()):float();\");\n THFloatTensor *result = lua[\"result\"];\n for (int j = 0, e = min(FLAGS_batchSize, (int)detections.size()-i); j < e; j++) {\n float neg = exp(result->storage->data[j*2]), pos = exp(result->storage->data[j*2+1]);\n float p = pos / (pos + neg);\n if (p >= FLAGS_threshold) {\n detected++;\n rectangle(image, bboxes[j], Scalar(0, 0, 255 * p));\n }\n }\n }\n cout << detected << \" pedestrians detected.\" << endl;\n\n auto elapsed = chrono::duration(chrono::high_resolution_clock::now() - start).count();\n cur_timestamp = cur_timestamp + elapsed;\n\n if (FLAGS_output == \"\") {\n imshow(\"Proposals\", proposals);\n imshow(\"Output\", image);\n waitKey(1);\n } else {\n do {\n outputVideo << image;\n proposalVideo << proposals;\n video_timestamp += 1/30.; \n } while (FLAGS_realtime && video_timestamp < cur_timestamp);\n }\n }\n cout << \"Done! Time/frame: \" << cur_timestamp * 1000 / num_frames << \"ms. #Proposals/frame: \" << num_proposals * 1.0 / num_frames << endl;\n if (FLAGS_output == \"\") waitKey(0);\n return 0;\n}\n", "meta": {"hexsha": "84d575864cac25c6b44c5e2a22c14d899286539f", "size": 7141, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tx1/forward.cpp", "max_stars_repo_name": "noranart/Pedestrian-Detection-in-TX1", "max_stars_repo_head_hexsha": "7c31c38338fa9d01db25690e30d04ae9bd6a2f74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2017-02-27T19:11:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-09T13:46:58.000Z", "max_issues_repo_path": "tx1/forward.cpp", "max_issues_repo_name": "noranart/Pedestrian-Detection-in-TX1", "max_issues_repo_head_hexsha": "7c31c38338fa9d01db25690e30d04ae9bd6a2f74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tx1/forward.cpp", "max_forks_repo_name": "noranart/Pedestrian-Detection-in-TX1", "max_forks_repo_head_hexsha": "7c31c38338fa9d01db25690e30d04ae9bd6a2f74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2017-06-26T23:32:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-04T01:14:14.000Z", "avg_line_length": 35.3514851485, "max_line_length": 154, "alphanum_fraction": 0.6095784904, "num_tokens": 1952, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6224593312018545, "lm_q2_score": 0.320821300824607, "lm_q1q2_score": 0.19969821234659385}} {"text": "#include \n#include \n#include \n#include \n#include \n\nusing namespace std;\nusing namespace boost;\nusing namespace fp_core::physics;\n\n// ---------------------------------------------------------------------------\n// Test Class\n// ---------------------------------------------------------------------------\n\nclass PhysicsBoxTest : public FpCoreTest {\nprotected:\n string mPath;\n\n virtual void SetUp() {\n\tinitEnvironment();\n }\n};\n\nTEST_F(PhysicsBoxTest, Constructor) {\n\tPhysicsBox box(300,400);\n\tboost::shared_ptr shape = static_pointer_cast(box.getBox2dShape(1.0f));\n\tASSERT_EQ(4, shape->GetVertexCount());\n\tASSERT_EQ(300, shape->GetVertex(1).x - shape->GetVertex(0).x);\n\tASSERT_EQ(400, shape->GetVertex(2).y - shape->GetVertex(0).y);\n}\n", "meta": {"hexsha": "6b25a54bba1ac8249271461c93859e47253eedd2", "size": 878, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fp_core/src/test/src/Physics/PhysicsBox.cpp", "max_stars_repo_name": "submain/fruitpunch", "max_stars_repo_head_hexsha": "31773128238830d3d335c1915877dc0db56836cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fp_core/src/test/src/Physics/PhysicsBox.cpp", "max_issues_repo_name": "submain/fruitpunch", "max_issues_repo_head_hexsha": "31773128238830d3d335c1915877dc0db56836cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fp_core/src/test/src/Physics/PhysicsBox.cpp", "max_forks_repo_name": "submain/fruitpunch", "max_forks_repo_head_hexsha": "31773128238830d3d335c1915877dc0db56836cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-08-14T02:51:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-14T02:51:47.000Z", "avg_line_length": 28.3225806452, "max_line_length": 104, "alphanum_fraction": 0.6127562642, "num_tokens": 205, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5039061705290806, "lm_q2_score": 0.3960681662740417, "lm_q1q2_score": 0.19958119293562748}} {"text": "// Software License for MTL\n// \n// Copyright (c) 2007 The Trustees of Indiana University. \n// 2008 Dresden University of Technology and the Trustees of Indiana University.\n// 2010 SimuNova UG (haftungsbeschr\u00e4nkt), www.simunova.com. \n// All rights reserved.\n// Authors: Peter Gottschling and Andrew Lumsdaine\n// \n// This file is part of the Matrix Template Library\n// \n// See also license.mtl.txt in the distribution.\n\n#ifndef MTL_POW_INCLUDE\n#define MTL_POW_INCLUDE\n\n#include \n\nnamespace mtl {\n \n namespace vec {\n\n /// Raise Vector \\a v to power \\a exp\n template \n pow_by_view pow(const Vector& v, const Exponent& exp)\n {\n return pow_by_view(v, exp);\n }\n \n } // namespace vec\n\n} // namespace mtl\n\n#endif // MTL_POW_INCLUDE\n", "meta": {"hexsha": "4fd672ba7ccbb7c2de4c1a1977c934d68d1eb8a4", "size": 924, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/numeric/mtl/operation/pow.hpp", "max_stars_repo_name": "lit-uriy/mtl4-mirror", "max_stars_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_stars_repo_licenses": ["MTLL"], "max_stars_count": 24.0, "max_stars_repo_stars_event_min_datetime": "2019-03-26T15:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:00:45.000Z", "max_issues_repo_path": "boost/numeric/mtl/operation/pow.hpp", "max_issues_repo_name": "lit-uriy/mtl4-mirror", "max_issues_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_issues_repo_licenses": ["MTLL"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-17T12:35:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-03T15:46:25.000Z", "max_forks_repo_path": "boost/numeric/mtl/operation/pow.hpp", "max_forks_repo_name": "lit-uriy/mtl4-mirror", "max_forks_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_forks_repo_licenses": ["MTLL"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-12-01T13:40:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T08:39:54.000Z", "avg_line_length": 27.1764705882, "max_line_length": 94, "alphanum_fraction": 0.6645021645, "num_tokens": 229, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.39606816627404173, "lm_q1q2_score": 0.19958119293562748}} {"text": "#ifndef KGRAPH_VALUE_TYPE\n#define KGRAPH_VALUE_TYPE float\n#endif\n\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"kgraph.h\"\n#include \"kgraph-data.h\"\n\nusing namespace boost::timer;\nusing namespace std;\nusing namespace boost;\nusing namespace kgraph;\nnamespace po = boost::program_options; \n\n\ntypedef KGRAPH_VALUE_TYPE value_type;\n\nint main(int argc, char *argv[]) {\n string data_path;\n string output_path;\n KGraph::IndexParams params;\n unsigned D;\n unsigned skip;\n unsigned gap;\n unsigned synthetic;\n float noise;\n\n bool lshkit = true;\n\n po::options_description desc_visible(\"General options\");\n desc_visible.add_options()\n (\"help,h\", \"produce help message.\")\n (\"version,v\", \"print version information.\")\n (\"data\", po::value(&data_path), \"input path\")\n (\"output\", po::value(&output_path), \"output path\")\n (\",K\", po::value(¶ms.K)->default_value(default_K), \"number of nearest neighbor\")\n (\"controls,C\", po::value(¶ms.controls)->default_value(default_controls), \"number of control pounsigneds\")\n ;\n\n po::options_description desc_hidden(\"Expert options\");\n desc_hidden.add_options()\n (\"iterations,I\", po::value(¶ms.iterations)->default_value(default_iterations), \"\")\n (\",S\", po::value(¶ms.S)->default_value(default_S), \"\")\n (\",R\", po::value(¶ms.R)->default_value(default_R), \"\")\n (\",L\", po::value(¶ms.L)->default_value(default_L), \"\")\n (\"delta\", po::value(¶ms.delta)->default_value(default_delta), \"\")\n (\"recall\", po::value(¶ms.recall)->default_value(default_recall), \"\")\n (\"prune\", po::value(¶ms.prune)->default_value(default_prune), \"\")\n (\"noise\", po::value(&noise)->default_value(0), \"noise\")\n (\"seed\", po::value(¶ms.seed)->default_value(default_seed), \"\")\n (\"dim,D\", po::value(&D), \"dimension, see format\")\n (\"skip\", po::value(&skip)->default_value(0), \"see format\")\n (\"gap\", po::value(&gap)->default_value(0), \"see format\")\n (\"raw\", \"read raw binary file, need to specify D.\")\n (\"synthetic\", po::value(&synthetic)->default_value(0), \"generate synthetic data, for performance evaluation only, specify number of points\")\n ;\n\n po::options_description desc(\"Allowed options\");\n desc.add(desc_visible).add(desc_hidden);\n\n po::positional_options_description p;\n p.add(\"data\", 1);\n p.add(\"output\", 1);\n\n po::variables_map vm;\n po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);\n po::notify(vm);\n\n if (vm.count(\"raw\") == 1) {\n lshkit = false;\n }\n\n if (vm.count(\"version\")) {\n cout << \"KGraph version \" << KGraph::version() << endl;\n return 0;\n }\n\n if (vm.count(\"help\")\n || (synthetic && (vm.count(\"dim\") == 0 || vm.count(\"data\")))\n || (!synthetic && (vm.count(\"data\") == 0 || (vm.count(\"dim\") == 0 && !lshkit)))) {\n cout << \"Usage: index [OTHER OPTIONS]... INPUT [OUTPUT]\" << endl;\n cout << desc_visible << endl;\n cout << desc_hidden << endl;\n return 0;\n }\n\n if (params.S == 0) {\n params.S = params.K;\n }\n\n if (lshkit && (synthetic == 0)) { // read dimension information from the data file\n static const unsigned LSHKIT_HEADER = 3;\n ifstream is(data_path.c_str(), ios::binary);\n unsigned header[LSHKIT_HEADER]; /* entry size, row, col */\n is.read((char *)header, sizeof header);\n BOOST_VERIFY(is);\n BOOST_VERIFY(header[0] == sizeof(value_type));\n is.close();\n D = header[2];\n skip = LSHKIT_HEADER * sizeof(unsigned);\n gap = 0;\n }\n\n Matrix data;\n if (synthetic) {\n if (!std::is_floating_point::value) {\n throw runtime_error(\"synthetic data not implemented for non-floating-point values.\");\n }\n data.resize(synthetic, D);\n cerr << \"Generating synthetic data...\" << endl;\n default_random_engine rng(params.seed);\n uniform_real_distribution distribution(-1.0, 1.0);\n data.zero(); // important to do that\n for (unsigned i = 0; i < synthetic; ++i) {\n value_type *row = data[i];\n for (unsigned j = 0; j < D; ++j) {\n row[j] = distribution(rng);\n }\n }\n }\n else {\n data.load(data_path, D, skip, gap);\n }\n if (noise != 0) {\n if (!std::is_floating_point::value) {\n throw runtime_error(\"noise injection not implemented for non-floating-point value.\");\n }\n tr1::ranlux64_base_01 rng;\n double sum = 0, sum2 = 0;\n for (unsigned i = 0; i < data.size(); ++i) {\n for (unsigned j = 0; j < data.dim(); ++j) {\n value_type v = data[i][j];\n sum += v;\n sum2 += v * v;\n }\n }\n double total = double(data.size()) * data.dim();\n double avg2 = sum2 / total, avg = sum / total;\n double dev = sqrt(avg2 - avg * avg);\n cerr << \"Adding Gaussian noise w/ \" << noise << \"x sigma(\" << dev << \")...\" << endl;\n boost::normal_distribution gaussian(0, noise * dev);\n for (unsigned i = 0; i < data.size(); ++i) {\n for (unsigned j = 0; j < data.dim(); ++j) {\n data[i][j] += gaussian(rng);\n }\n }\n }\n\n MatrixOracle oracle(data);\n KGraph::IndexInfo info;\n KGraph *kgraph = KGraph::create(); //(oracle, params, &info);\n {\n auto_cpu_timer timer;\n kgraph->build(oracle, params, output_path.c_str(), &info);\n cerr << info.stop_condition << endl;\n }\n\n if (output_path.size()) {\n \n //Note that we modify the index save procedure to reduce index size \n //kgraph->save(output_path.c_str());\n }\n\n \n \n delete kgraph;\n\n return 0;\n}\n", "meta": {"hexsha": "a984deab85ce581a40c3e9e17583974a960205e4", "size": 6180, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "algorithms/KGraph/src/kgraph_index.cpp", "max_stars_repo_name": "sourabhpoddar404/nns_benchmark", "max_stars_repo_head_hexsha": "44cdd81ab984c87c2246a0464a7ac93321c58815", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 150.0, "max_stars_repo_stars_event_min_datetime": "2016-06-03T16:39:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T05:32:56.000Z", "max_issues_repo_path": "algorithms/KGraph/src/kgraph_index.cpp", "max_issues_repo_name": "sourabhpoddar404/nns_benchmark", "max_issues_repo_head_hexsha": "44cdd81ab984c87c2246a0464a7ac93321c58815", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2016-06-03T13:43:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-28T07:42:02.000Z", "max_forks_repo_path": "algorithms/KGraph/src/kgraph_index.cpp", "max_forks_repo_name": "sourabhpoddar404/nns_benchmark", "max_forks_repo_head_hexsha": "44cdd81ab984c87c2246a0464a7ac93321c58815", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 42.0, "max_forks_repo_forks_event_min_datetime": "2016-05-18T05:53:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T19:57:52.000Z", "avg_line_length": 33.4054054054, "max_line_length": 144, "alphanum_fraction": 0.5920711974, "num_tokens": 1564, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.3960681662740417, "lm_q1q2_score": 0.19958119293562745}} {"text": "// graph-tool -- a general graph modification and manipulation thingy\n//\n// Copyright (C) 2006-2015 Tiago de Paula Peixoto \n//\n// This program is free software; you can redistribute it and/or\n// modify it under the terms of the GNU General Public License\n// as published by the Free Software Foundation; either version 3\n// of the License, or (at your option) any later version.\n//\n// This program is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License\n// along with this program. If not, see .\n\n#include \"graph.hh\"\n#include \"graph_filtering.hh\"\n\n#include \n\nusing namespace graph_tool;\nusing namespace boost;\n\nstruct check_iso\n{\n\n template \n void operator()(Graph1& g1, Graph2* g2, InvMap cinv_map1, InvMap cinv_map2,\n int64_t max_inv, IsoMap map, VertexIndexMap index1,\n VertexIndexMap index2, bool& result) const\n {\n auto inv_map1 = cinv_map1.get_unchecked(num_vertices(g1));\n auto inv_map2 = cinv_map2.get_unchecked(num_vertices(*g2));\n\n vinv_t vinv1(inv_map1, max_inv);\n vinv_t vinv2(inv_map2, max_inv);\n\n result = isomorphism(g1, *g2,\n isomorphism_map(map.get_unchecked(num_vertices(g1))).\n vertex_invariant1(vinv1).\n vertex_invariant2(vinv2).\n vertex_index1_map(index1).\n vertex_index2_map(index2));\n }\n\n template \n struct vinv_t\n {\n vinv_t(Prop& prop, int64_t max)\n : _prop(prop), _max(max) {}\n Prop& _prop;\n int64_t _max;\n\n template \n int64_t operator()(Vertex v) const\n {\n return _prop[v];\n };\n\n int64_t max() const { return _max; }\n\n typedef int64_t result_type;\n typedef size_t argument_type;\n };\n};\n\nstruct directed_graph_view_pointers:\n mpl::transform >::type {};\n\nstruct undirected_graph_view_pointers:\n mpl::transform >::type {};\n\ntypedef property_map_types::apply >::type\n vertex_props_t;\n\nbool check_isomorphism(GraphInterface& gi1, GraphInterface& gi2,\n boost::any ainv_map1, boost::any ainv_map2,\n int64_t max_inv, boost::any aiso_map)\n{\n bool result;\n\n typedef property_map_type::apply::type\n iso_map_t;\n auto iso_map = any_cast(aiso_map);\n\n typedef property_map_type::apply::type\n inv_map_t;\n auto inv_map1 = any_cast(ainv_map1);\n auto inv_map2 = any_cast(ainv_map2);\n\n if (gi1.GetDirected() != gi2.GetDirected())\n return false;\n if (gi1.GetDirected())\n {\n run_action()\n (gi1, std::bind(check_iso(),\n placeholders::_1, placeholders::_2,\n inv_map1, inv_map2, max_inv, iso_map,\n gi1.GetVertexIndex(),\n gi2.GetVertexIndex(), std::ref(result)),\n directed_graph_view_pointers())\n (gi2.GetGraphView());\n }\n else\n {\n run_action()\n (gi1, std::bind(check_iso(),\n placeholders::_1, placeholders::_2,\n inv_map1, inv_map2, max_inv, iso_map,\n gi1.GetVertexIndex(),\n gi2.GetVertexIndex(), std::ref(result)),\n undirected_graph_view_pointers())\n (gi2.GetGraphView());\n }\n\n return result;\n}\n", "meta": {"hexsha": "4412be9a632088941bd67d475e6c60195e83aa06", "size": 4497, "ext": "cc", "lang": "C++", "max_stars_repo_path": "graph-tool/src/graph/topology/graph_isomorphism.cc", "max_stars_repo_name": "johankaito/fufuka", "max_stars_repo_head_hexsha": "32a96ecf98ce305c2206c38443e58fdec88c788d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-08-04T19:41:53.000Z", "max_stars_repo_stars_event_max_datetime": "2015-08-04T19:41:53.000Z", "max_issues_repo_path": "graph-tool/src/graph/topology/graph_isomorphism.cc", "max_issues_repo_name": "johankaito/fufuka", "max_issues_repo_head_hexsha": "32a96ecf98ce305c2206c38443e58fdec88c788d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph-tool/src/graph/topology/graph_isomorphism.cc", "max_forks_repo_name": "johankaito/fufuka", "max_forks_repo_head_hexsha": "32a96ecf98ce305c2206c38443e58fdec88c788d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4094488189, "max_line_length": 82, "alphanum_fraction": 0.5981765622, "num_tokens": 1020, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.3960681662740417, "lm_q1q2_score": 0.19958119293562745}} {"text": "#include \n#include \"svm_test_input.hpp\"\n#include \"../src/svm_impl/classifiers_generator.h\"\n#include \"../src/svm_impl/regressions_generator.h\"\n#include \"../src/svm_impl/regression_params.hpp\"\n#include \"../src/svm_impl/classifier_params.hpp\"\n#include \"../src/svm_impl/svm_model.h\"\n#include \n#include \n#include \n\nnamespace \n{\n\nconst mlmodels::class_data labels = { 0, 0, 1, 1, 0, 2, 1, 2, 1, 0 };\n\nconst mlmodels::training_data dataf = make_multi_array(\n { { 100.f, 10.f }, \n { 150.f, 10.f }, \n { 600.f, 200.f }, \n { 600.f, 10.f }, \n { 10.f, 100.f }, \n { 455.f, 10.f }, \n { 345.f, 255.f }, \n { 10.f, 501.f }, \n { 401.f, 255.f }, \n { 30.f, 150.f } \n }\n);\n\n} // end of namespace\n\nBOOST_AUTO_TEST_CASE(test_invalid_data_for_model)\n{\n using namespace mlmodels;\n auto model = svm::classifier::create_model(svm::classifier::c_rbf_train{}, \n training_data{}, class_data{}\n );\n BOOST_TEST((model.get() == nullptr), \"we are expecting to fail in model generation as we have invalid data for the model\");\n // while it should be the same - test for regression type as well\n model = svm::regression::create_model(svm::regression::epsilon_linear_train{},\n training_data{}, class_data{}\n );\n BOOST_TEST((model.get() == nullptr), \"we are expecting to fail in model generation as we have invalid data for the model\");\n}\n\nBOOST_AUTO_TEST_CASE(test_train_model)\n{\n using namespace mlmodels;\n try {\n auto model = svm::classifier::create_model(svm::classifier::nu_poly_train{}, train_data, train_labels);\n BOOST_TEST((model.get() != nullptr), \"we are expecting to successfully pass training with this - if not then we have an anexpected issue here\");\n } catch (const std::exception& e) {\n BOOST_TEST(false, \"failed to create model: \"<::max());\n auto ret2 = svm::test(model, dataf);\n BOOST_TEST(ret2.empty());\n model = svm::regression::create_model(svm::regression::epsilon_sig_train{},\n training_data{}, class_data{}\n );\n BOOST_TEST((model.get() == nullptr), \"we are expecting to fail in model generation as we have invalid data for the model\");\n ret = svm::predict(model, labels);\n BOOST_TEST(ret == std::numeric_limits::max());\n ret2 = svm::test(model, dataf);\n BOOST_TEST(ret2.empty());\n}\n\nBOOST_AUTO_TEST_CASE(test_invalid_model_train_args)\n{\n using namespace mlmodels;\n // by passing labels and data with the a different number of elements\n // want to see if this is not accepted\n try {\n auto model = svm::regression::create_model(svm::regression::nu_sig_train{}, dataf, train_labels);\n BOOST_TEST((model.get() == nullptr), \"we should get invalid model with these values\");\n } catch (const std::exception&) {\n BOOST_TEST(false, \"we should get an empty model\");\n } catch (...) {\n BOOST_TEST(false, \"we should not get a starndard exception, got some other exception\");\n }\n}\n\nBOOST_AUTO_TEST_CASE(test_invalid_prediction_data_for_model)\n{\n using namespace mlmodels;\n // make sure that if we are passing wrong number of features we would fail to predict\n try {\n auto model = svm::classifier::create_model(svm::classifier::c_linear_train{}, train_data, train_labels);\n BOOST_TEST_REQUIRE((model.get() != nullptr), \"failed to train the model!!\");\n // now try to run prediction and test, and see that it would fail since we are passing the wrong number of features\n auto ret = svm::test(model, dataf);\n BOOST_TEST(ret.empty(), \"the args to the model test is wrong - wrong number of features, should return empty\"); \n auto ret2 = svm::predict(model, labels);\n BOOST_TEST((ret2 == std::numeric_limits::max()), \"the args to predict are wrong, we pass the wrong number of features\");\n } catch (...) {\n BOOST_TEST(false, \"we should not get any exception here\");\n }\n}\nBOOST_AUTO_TEST_CASE(test_prediction_is_valid_return_value)\n{\n using namespace mlmodels;\n // do the same for regression model\n try {\n //auto model = svm::regression::create_model(svm::regression::epsilon_linear_train{}, dataf, labels);\n auto model = svm::classifier::create_model(svm::classifier::c_linear_train{}, dataf, labels);\n BOOST_TEST_REQUIRE((model.get() != nullptr), \"failed to train the model!!\");\n // pass a row of valid size for prediction\n const class_data row{11.f, 110.f};\n auto ret = svm::test(model, dataf);\n BOOST_TEST(not ret.empty(), \"the args to the model test is - good, should not return empty\"); \n auto ret2 = svm::predict(model, row);\n BOOST_TEST((ret2 != std::numeric_limits::max()), \"the args to predict are good, we pass good number of features\");\n } catch (...) {\n BOOST_TEST(false, \"we should not get any exception here\");\n }\n return;\n // in this test we would make sure that if we are passing a valid arguments we are getting\n // valid results - by valid it means that they are not out of bound, but we are not checking \n // if this is a correct prediction mathematically\n try {\n auto model = svm::classifier::create_model(svm::classifier::c_linear_train{}, train_data, train_labels);\n BOOST_TEST_REQUIRE((model.get() != nullptr), \"failed to train the model!!\");\n // pass a row of valid size for prediction\n const class_data row{0.0416667, -1,-0.333333, -0.283019, -0.260274, 1, 1, 0.343511, 1, -1, -1, -0.333333, -1};\n auto ret = svm::test(model, test_data);\n BOOST_TEST(not ret.empty(), \"the args to the model test is - good, should not return empty\"); \n auto ret2 = svm::predict(model, row);\n BOOST_TEST((ret2 != std::numeric_limits::max()), \"the args to predict are good, we pass good number of features\");\n } catch (...) {\n BOOST_TEST(false, \"we should not get any exception here\");\n }\n \n}\n\n", "meta": {"hexsha": "c002886ed68b022ca51ee222b10b835e3fc086b1", "size": 7407, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/libs/ml_models/ut/test_svm_impl.cpp", "max_stars_repo_name": "boazsade/machine_learinig_models", "max_stars_repo_head_hexsha": "eb1f9eda0e4e25a6d028b25682dfb20628a20624", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/libs/ml_models/ut/test_svm_impl.cpp", "max_issues_repo_name": "boazsade/machine_learinig_models", "max_issues_repo_head_hexsha": "eb1f9eda0e4e25a6d028b25682dfb20628a20624", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libs/ml_models/ut/test_svm_impl.cpp", "max_forks_repo_name": "boazsade/machine_learinig_models", "max_forks_repo_head_hexsha": "eb1f9eda0e4e25a6d028b25682dfb20628a20624", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0062111801, "max_line_length": 152, "alphanum_fraction": 0.6484406642, "num_tokens": 1793, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.3960681662740417, "lm_q1q2_score": 0.19958119293562745}} {"text": "#include \"dvs_hot_pixel_filter/utils.h\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nDECLARE_bool(no_stats);\n\nnamespace dvs_hot_pixel_filter {\nnamespace utils {\n\nconst std::string OUTPUT_FOLDER = \"./stats/\";\n\nbool parse_arguments(int argc, char* argv[],\n std::string* path_to_input_rosbag)\n{\n if(argc < 2)\n {\n std::cerr << \"Error: not enough input arguments.\\n\"\n \"Usage:\\n\\trosrun dvs_hot_pixel_filter hot_pixel_filter path_to_bag.bag\\n\\n\"\n \"Additional (optional) command-line flags include:\\n\"\n \"\\t--n_hot_pix=\\n\"\n \"\\t--n_std=\\n\"\n \"\\t--no_stats (do not save stats on disk)\"<< std::endl;\n return false;\n }\n\n *path_to_input_rosbag = std::string(argv[1]);\n std::cout << \"Input bag: \" << *path_to_input_rosbag << std::endl;\n return true;\n}\n\nvoid write_histogram_image(const std::string filename,\n const cv::Mat& histogram,\n const std::vector& hot_pixels)\n{\n cv::Mat display_image;\n\n if (!hot_pixels.empty())\n {\n cv::Vec3b colour = cv::Vec3b(255, 0, 0);\n cv::Mat local_hist;\n histogram.copyTo(local_hist);\n // create mask\n cv::Mat mask = cv::Mat::zeros(histogram.size(), CV_8UC1);\n for (auto point : hot_pixels)\n {\n mask.at(point) = 1;\n }\n double max;\n cv::minMaxLoc(local_hist, nullptr, &max);\n local_hist.setTo(max, mask);\n cv::normalize(local_hist, display_image, 0, 255, cv::NORM_MINMAX, CV_8UC1);\n cv::applyColorMap(display_image, display_image, cv::COLORMAP_HOT);\n display_image.setTo(colour, mask);\n }\n else\n {\n cv::normalize(histogram, display_image, 0, 255, cv::NORM_MINMAX, CV_8UC1);\n cv::applyColorMap(display_image, display_image, cv::COLORMAP_HOT);\n }\n\n cv::Mat large_image;\n cv::resize(display_image, large_image, cv::Size(), 3, 3, cv::INTER_NEAREST);\n cv::imwrite(filename, large_image);\n}\n\nstd::string extract_bag_name(const std::string fullname)\n{\n int pos = 0;\n int len = fullname.length();\n // go from the back to the first forward- or back-slash.\n for (int i = len; i > 0; i--)\n {\n if (fullname[i] == '/' || fullname[i] == '\\\\')\n {\n pos = i + 1;\n break;\n }\n }\n int count = 4;\n // now go from there to the first '.'\n for (int i = 0; i < len; i++)\n {\n if (fullname[pos + i] == '.')\n {\n count = i;\n break;\n }\n }\n std::string bag_name = fullname.substr(pos, count);\n return bag_name;\n}\n\nvoid build_histograms(rosbag::View& view,\n topic_mats& histograms)\n{\n std::cout << \"Building event count histogram(s)...\" << std::endl;\n\n std::vector seen_topics;\n for(const rosbag::MessageInstance& m : view)\n {\n if(m.getDataType() == \"dvs_msgs/EventArray\")\n {\n const std::string topic_name = m.getTopic();\n // pointer to the message\n dvs_msgs::EventArrayConstPtr s = m.instantiate();\n const cv::Size msg_size = cv::Size(s->width, s->height);\n\n cv::Mat& histogram = histograms[topic_name];\n\n // initialise event_count_histogram if we haven't seen the topic yet\n if ( !contains(topic_name, seen_topics) )\n {\n histogram = cv::Mat::zeros(msg_size, CV_64FC1);\n seen_topics.push_back(topic_name);\n std::cout << \"added \" << topic_name << \" to seen_topics\" << std::endl;\n }\n\n if (msg_size != histogram.size())\n {\n std::cerr << \"Error: a new event message in \" << topic_name <<\n \" does not match the existing topic histogram size.\\n message: \" <<\n msg_size << \"\\t histogram: \" << histogram.size() << std::endl;\n return;\n }\n\n for(auto e : s->events)\n {\n // accumulate events without discrimination\n histogram.at(e.y, e.x)++;\n }\n }\n }\n\n std::cout << \"...done!\" << std::endl;\n}\n\nvoid detect_hot_pixels(const topic_mats& histograms_by_topic,\n const double& num_std_devs,\n const int num_hot_pixels,\n topic_points& hot_pixels_by_topic)\n{\n for(const auto& topic : histograms_by_topic)\n {\n const std::string topic_name = topic.first;\n const cv::Mat& histogram = topic.second;\n std::vector& hot_pixels = hot_pixels_by_topic[topic_name];\n if (num_hot_pixels == -1)\n {\n // auto-detect hot pixels\n double threshold;\n dvs_hot_pixel_filter::utils::find_threshold(\n histogram, num_std_devs, threshold);\n dvs_hot_pixel_filter::utils::hot_pixels_by_threshold(\n histogram, threshold, hot_pixels);\n }\n else\n {\n // user-specified number of hot pixels\n dvs_hot_pixel_filter::utils::hot_pixels_by_ranking(\n histogram, num_hot_pixels, hot_pixels);\n }\n }\n}\n\nvoid hot_pixels_by_threshold(const cv::Mat& histogram,\n const double& threshold,\n std::vector& hot_pixels)\n{\n for (int y = 0; y < histogram.rows; y++)\n {\n for (int x = 0; x < histogram.cols; x++)\n {\n if (histogram.at(y, x) > threshold)\n {\n hot_pixels.push_back(cv::Point(x, y));\n }\n }\n }\n}\nvoid hot_pixels_by_ranking(const cv::Mat& histogram,\n const double& num_hot_pixels,\n std::vector& hot_pixels)\n{\n cv::Mat local_hist;\n histogram.copyTo(local_hist);\n\n for (int i = 0; i < num_hot_pixels; i++)\n {\n double max;\n cv::Point maxLoc;\n cv::minMaxLoc(local_hist, nullptr, &max, nullptr, &maxLoc);\n\n hot_pixels.push_back(maxLoc);\n local_hist.at(maxLoc) = 0;\n }\n}\n\nvoid find_threshold(const cv::Mat& histogram,\n const double num_std_devs,\n double& threshold)\n{\n cv::Scalar mean_Scalar, stdDev_Scalar;\n cv::meanStdDev(histogram, mean_Scalar, stdDev_Scalar, histogram > 0);\n\n const double mean = mean_Scalar[0];\n const double stdDev = stdDev_Scalar[0];\n threshold = mean + num_std_devs*stdDev;\n}\n\nvoid write_all_msgs(rosbag::View& view,\n topic_points& hot_pixels_by_topic,\n rosbag::Bag& output_bag)\n{\n constexpr int log_every_n_messages = 10000;\n const uint32_t num_messages = view.size();\n uint32_t message_index = 0;\n std::cout << \"Writing...\" << std::endl;\n // write the new rosbag without hot pixels by iterating over all messages\n for(rosbag::MessageInstance const m : view)\n {\n write_msg(\n m, hot_pixels_by_topic, output_bag);\n if(message_index++ % log_every_n_messages == 0)\n {\n std::cout << \"Message: \" << message_index << \" / \" << num_messages << std::endl;\n }\n }\n\n std::cout << \"Message: \" << num_messages << \" / \" << num_messages << std::endl;\n std::cout << \"...done!\" << std::endl;\n}\n\nvoid write_event_msg(const std::string topic_name,\n const dvs_msgs::EventArrayConstPtr event_array_ptr,\n const std::vector& hot_pixels,\n rosbag::Bag& output_bag)\n{\n std::vector events;\n for(auto e : event_array_ptr->events)\n {\n if (!contains(cv::Point(e.x, e.y), hot_pixels))\n {\n events.push_back(e);\n }\n }\n\n if (events.size() > 0)\n {\n // Write new event array message to output rosbag\n\n dvs_msgs::EventArray event_array_msg;\n event_array_msg.events = events;\n event_array_msg.width = event_array_ptr->width;\n event_array_msg.height = event_array_ptr->height;\n event_array_msg.header.stamp = events.back().ts;\n\n output_bag.write(topic_name, event_array_msg.header.stamp, event_array_msg);\n }\n}\n\nvoid write_msg(const rosbag::MessageInstance& m,\n topic_points& hot_pixels_topic,\n rosbag::Bag& output_bag)\n{\n if(m.getDataType() == \"dvs_msgs/EventArray\")\n {\n const std::string topic_name = m.getTopic();\n std::vector& hot_pixels = hot_pixels_topic[topic_name];\n dvs_msgs::EventArrayConstPtr event_array_ptr = m.instantiate();\n write_event_msg(topic_name, event_array_ptr, hot_pixels, output_bag);\n\n }\n else if(m.getDataType() == \"sensor_msgs/Image\")\n {\n sensor_msgs::ImageConstPtr img_msg = m.instantiate();\n output_bag.write(m.getTopic(), img_msg->header.stamp, m);\n }\n else if(m.getDataType() == \"sensor_msgs/Imu\")\n {\n sensor_msgs::ImuConstPtr imu_msg = m.instantiate();\n output_bag.write(m.getTopic(), imu_msg->header.stamp, m);\n }\n else\n {\n output_bag.write(m.getTopic(), m.getTime(), m);\n }\n}\n\nstd::string usable_filename(const std::string filename_in)\n{\n std::string filename = filename_in;\n std::replace( filename.begin(), filename.end(), '/', '_'); // replace all '/' to '_'\n std::replace( filename.begin(), filename.end(), '\\\\', '_'); // replace all '\\' to '_'\n return filename;\n}\n\nvoid write_hot_pixels(const std::string filename,\n const std::vector& hot_pixels)\n{\n std::ofstream hot_pixels_file;\n hot_pixels_file.open(filename);\n // the important part\n for (const auto& point : hot_pixels)\n {\n hot_pixels_file << point.x << \", \" << point.y << \"\\n\";\n }\n hot_pixels_file.close();\n}\n\nvoid save_stats(const std::string bag_name,\n const std::string topic_name,\n const cv::Mat& histogram,\n const std::vector& hot_pixels,\n const bool one_topic)\n{\n cv::Mat histogram_after;\n histogram.copyTo(histogram_after);\n for (auto point : hot_pixels)\n {\n histogram_after.at(point) = 0;\n }\n\n const double num_events = cv::sum(histogram)[0];\n const double num_events_after = cv::sum(histogram_after)[0];\n const double percent_events_discarded = (1 - num_events_after/num_events)*100;\n\n std::cout << std::setprecision(4) << topic_name << \"\\t\" << num_events <<\n \"\\t\" << hot_pixels.size() << \"\\t\\t0\\t(before)\" << std::endl;\n\n std::cout << std::setprecision(4) << topic_name << \"\\t\" << num_events_after <<\n \"\\t0\\t\\t\" << percent_events_discarded << \"\\t(after)\" << std::endl;\n\n if (!FLAGS_no_stats)\n {\n // save images\n std::string dstDir = OUTPUT_FOLDER + bag_name + \"/\";\n if (!one_topic)\n {\n dstDir += usable_filename(topic_name) + \"/\";\n }\n boost::filesystem::create_directories(dstDir); // create if needed\n std::string fname_b = dstDir + \"hist_before.png\";\n std::string fname_a = dstDir + \"hist_after.png\";\n std::string fname_hp = dstDir + \"hot_pixels.txt\";\n\n write_histogram_image(fname_b, histogram);\n write_histogram_image(fname_a, histogram_after, hot_pixels);\n write_hot_pixels(fname_hp, hot_pixels);\n }\n}\n\n} // namespace utils\n} // namespace dvs_hot_pixel_filter\n", "meta": {"hexsha": "b44c487b41c6529e99e2cc7654cb1ac5c9ab77a3", "size": 10907, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "dvs_hot_pixel_filter/src/utils.cpp", "max_stars_repo_name": "Tobias-Fischer/dvs_tools", "max_stars_repo_head_hexsha": "10d88232f6376c4b95941baa26358929a6356a87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18.0, "max_stars_repo_stars_event_min_datetime": "2019-05-20T12:53:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T19:19:21.000Z", "max_issues_repo_path": "dvs_hot_pixel_filter/src/utils.cpp", "max_issues_repo_name": "Tobias-Fischer/dvs_tools", "max_issues_repo_head_hexsha": "10d88232f6376c4b95941baa26358929a6356a87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dvs_hot_pixel_filter/src/utils.cpp", "max_forks_repo_name": "Tobias-Fischer/dvs_tools", "max_forks_repo_head_hexsha": "10d88232f6376c4b95941baa26358929a6356a87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2020-04-15T16:41:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T00:22:04.000Z", "avg_line_length": 30.2132963989, "max_line_length": 89, "alphanum_fraction": 0.6262033556, "num_tokens": 2830, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.3960681662740417, "lm_q1q2_score": 0.19958119293562745}} {"text": "//\n// main.cpp\n// ~~~~~~~~\n//\n// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)\n//\n// Distributed under the Boost Software License, Version 1.0. (See accompanying\n// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"server.hpp\"\n\nusing namespace http::server ;\nusing namespace std ;\n\nclass TileHandler: public request_handler {\npublic:\n void handle_request(const request& req, reply& rep) {\n\n boost::smatch m ;\n boost::regex_match(req.path_, m, rx_) ;\n\n int zoom = stoi(m.str(1)) ;\n int tx = stoi(m.str(2)) ;\n int ty = stoi(m.str(3)) ;\n string extension = m.str(4) ;\n\n cout << tx << ' ' << ty << ' ' << zoom << endl ;\n }\n\n static boost::regex rx_ ;\n};\n\nboost::regex TileHandler::rx_(R\"(/map/[^/]+/tiles/[^/]+/(\\d+)/(\\d+)/(\\d+)\\.([^/]+))\") ;\n\nclass Factory: public request_handler_factory {\npublic:\n Factory() = default ;\n\n std::shared_ptr create(const request &req) {\n boost::smatch m ;\n if ( boost::regex_match(req.path_, m, TileHandler::rx_) )\n return std::shared_ptr(new TileHandler()) ;\n else\n return nullptr ;\n }\n};\n\nint main(int argc, char* argv[])\n{\n try\n {\n // Check command line arguments.\n if (argc != 5)\n {\n std::cerr << \"Usage: http_server
\\n\";\n std::cerr << \" For IPv4, try:\\n\";\n std::cerr << \" receiver 0.0.0.0 80 1 .\\n\";\n std::cerr << \" For IPv6, try:\\n\";\n std::cerr << \" receiver 0::0 80 1 .\\n\";\n return 1;\n }\n\n // Initialise the server.\n std::size_t num_threads = boost::lexical_cast(argv[3]);\n http::server::server s(make_shared(), argv[1], argv[2], num_threads);\n\n // Run the server until stopped.\n s.run();\n }\n catch (std::exception& e)\n {\n std::cerr << \"exception: \" << e.what() << \"\\n\";\n }\n\n return 0;\n}\n", "meta": {"hexsha": "b0851980af7d75bcaf5df0f857f9c721a2dc5535", "size": 2119, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/main.cpp", "max_stars_repo_name": "malasiot/wsrv", "max_stars_repo_head_hexsha": "b61344ecb6e528cbe6e7f8348d2df466a3920a42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2017-05-11T21:44:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T08:35:56.000Z", "max_issues_repo_path": "src/server/main.cpp", "max_issues_repo_name": "malasiot/mftools", "max_issues_repo_head_hexsha": "1edeec673110cdd5fa904ff2ff88289b6c3ec324", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2016-10-27T10:10:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-07T21:27:02.000Z", "max_forks_repo_path": "src/server/main.cpp", "max_forks_repo_name": "malasiot/mftools", "max_forks_repo_head_hexsha": "1edeec673110cdd5fa904ff2ff88289b6c3ec324", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2017-10-17T08:18:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T11:44:23.000Z", "avg_line_length": 25.2261904762, "max_line_length": 87, "alphanum_fraction": 0.5832940066, "num_tokens": 606, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5039061705290805, "lm_q2_score": 0.3960681662740416, "lm_q1q2_score": 0.19958119293562743}} {"text": "////////////////////////////////////////////////////////////////////////////////\n// Copyright 2019 FZI Research Center for Information Technology\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are met:\n//\n// 1. Redistributions of source code must retain the above copyright notice,\n// this list of conditions and the following disclaimer.\n//\n// 2. Redistributions in binary form must reproduce the above copyright notice,\n// this list of conditions and the following disclaimer in the documentation\n// and/or other materials provided with the distribution.\n//\n// 3. Neither the name of the copyright holder nor the names of its\n// contributors may be used to endorse or promote products derived from this\n// software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n// POSSIBILITY OF SUCH DAMAGE.\n////////////////////////////////////////////////////////////////////////////////\n\n//-----------------------------------------------------------------------------\n/*!\\file DampedLeastSquaresSolver.cpp\n *\n * \\author Stefan Scherzinger \n * \\date 2020/03/27\n *\n */\n//-----------------------------------------------------------------------------\n\n// this package\n#include \n\n// Pluginlib\n#include \n\n// other\n#include \n\n/**\n * \\class cartesian_controller_base::DampedLeastSquaresSolver \n *\n * Users may explicitly specify this solver with \\a \"damped_least_squares\" as \\a\n * ik_solver in their controllers.yaml configuration file for each controller:\n *\n * \\code{.yaml}\n * :\n * type: \"\"\n * ik_solver: \"damped_least_squares\"\n * ...\n *\n * solver:\n * ...\n * damped_least_squares:\n * alpha: 0.5\n * \\endcode\n *\n */\nPLUGINLIB_EXPORT_CLASS(cartesian_controller_base::DampedLeastSquaresSolver, cartesian_controller_base::IKSolver)\n\n\n\n\n\nnamespace cartesian_controller_base{\n\n DampedLeastSquaresSolver::DampedLeastSquaresSolver()\n : m_alpha(0.01)\n {\n }\n\n DampedLeastSquaresSolver::~DampedLeastSquaresSolver(){}\n\n trajectory_msgs::JointTrajectoryPoint DampedLeastSquaresSolver::getJointControlCmds(\n ros::Duration period,\n const ctrl::Vector6D& net_force)\n {\n // Compute joint jacobian\n m_jnt_jacobian_solver->JntToJac(m_current_positions,m_jnt_jacobian);\n\n // Compute joint velocities according to:\n // \\f$ \\dot{q} = ( J^T J + \\alpha^2 I )^{-1} J^T f \\f$\n ctrl::MatrixND identity;\n identity.setIdentity(m_number_joints, m_number_joints);\n\n m_current_velocities.data =\n (m_jnt_jacobian.data.transpose() * m_jnt_jacobian.data\n + m_alpha * m_alpha * identity).inverse() * m_jnt_jacobian.data.transpose() * net_force;\n\n // Integrate once, starting with zero motion\n m_current_positions.data = m_last_positions.data + 0.5 * m_current_velocities.data * period.toSec();\n\n // Make sure positions stay in allowed margins\n applyJointLimits();\n\n // Apply results\n trajectory_msgs::JointTrajectoryPoint control_cmd;\n for (int i = 0; i < m_number_joints; ++i)\n {\n control_cmd.positions.push_back(m_current_positions(i));\n control_cmd.velocities.push_back(m_current_velocities(i));\n\n // Accelerations should be left empty. Those values will be interpreted\n // by most hardware joint drivers as max. tolerated values. As a\n // consequence, the robot will move very slowly.\n }\n control_cmd.time_from_start = period; // valid for this duration\n\n return control_cmd;\n }\n\n bool DampedLeastSquaresSolver::init(ros::NodeHandle& nh,\n const KDL::Chain& chain,\n const KDL::JntArray& upper_pos_limits,\n const KDL::JntArray& lower_pos_limits)\n {\n IKSolver::init(nh, chain, upper_pos_limits, lower_pos_limits);\n\n m_jnt_jacobian_solver.reset(new KDL::ChainJntToJacSolver(m_chain));\n m_jnt_jacobian.resize(m_number_joints);\n\n // Connect dynamic reconfigure and overwrite the default values with values\n // on the parameter server. This is done automatically if parameters with\n // the according names exist.\n m_callback_type = boost::bind(\n &DampedLeastSquaresSolver::dynamicReconfigureCallback, this, _1, _2);\n\n m_dyn_conf_server.reset(\n new dynamic_reconfigure::Server(\n ros::NodeHandle(nh.getNamespace() + \"/solver/damped_least_squares\")));\n m_dyn_conf_server->setCallback(m_callback_type);\n return true;\n }\n\n void DampedLeastSquaresSolver::dynamicReconfigureCallback(IKConfig& config, uint32_t level)\n {\n m_alpha = config.alpha;\n }\n\n} // namespace\n", "meta": {"hexsha": "66a6b5321963265b1c504330bdc6a1eb9809c44f", "size": 5588, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cartesian_controller_base/src/DampedLeastSquaresSolver.cpp", "max_stars_repo_name": "graziegrazie/cartesian_controllers", "max_stars_repo_head_hexsha": "40156bbbd45de17f0e03e9007863d087295c318f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 137.0, "max_stars_repo_stars_event_min_datetime": "2019-11-01T07:14:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:14:52.000Z", "max_issues_repo_path": "cartesian_controller_base/src/DampedLeastSquaresSolver.cpp", "max_issues_repo_name": "graziegrazie/cartesian_controllers", "max_issues_repo_head_hexsha": "40156bbbd45de17f0e03e9007863d087295c318f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 39.0, "max_issues_repo_issues_event_min_datetime": "2020-05-14T20:40:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:17:50.000Z", "max_forks_repo_path": "cartesian_controller_base/src/DampedLeastSquaresSolver.cpp", "max_forks_repo_name": "graziegrazie/cartesian_controllers", "max_forks_repo_head_hexsha": "40156bbbd45de17f0e03e9007863d087295c318f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 37.0, "max_forks_repo_forks_event_min_datetime": "2019-11-01T07:05:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T08:26:35.000Z", "avg_line_length": 37.2533333333, "max_line_length": 112, "alphanum_fraction": 0.6791338583, "num_tokens": 1224, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.596433160611502, "lm_q2_score": 0.33458944788835565, "lm_q1q2_score": 0.1995602419113094}} {"text": "#ifndef __SUPERREACTIONNETWORK_CPP_\n#define __SUPERREACTIONNETWORK_CPP_\n\n#include \n#include //for json_reader\n#include //for write_xml\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include /* log */\n\n#include \"../../../include/tools/misc/misc_template.h\"\n#include \"../../../include/reactionNetwork/superReactionNetwork/superReactionNetwork.h\"\n\n//infinitesimal dt\n#define INFINITESIMAL_DT 1.0E-14\n\nnamespace reactionNetwork_sr {\n\n\tsuperReactionNetwork::superReactionNetwork(std::vector uncertainties, std::size_t random_seed_for_this_core, std::string cwd_in)\n\t{\n\n\t\t//current working directory\n\t\tthis->cwd = cwd_in;\n\n\t\t//read configuration file \"setting.json\"\n\t\tboost::property_tree::read_json(this->cwd + std::string(\"/input/setting.json\"), rnk_pt, std::locale());\n\t\tread_init_config();\n\n\t\t//random seed for this core\n\t\tthis->random_seed_for_this_core = static_cast(random_seed_for_this_core);\n\n\t\trand = new random_sr::random(this->random_seed_for_this_core);\n\n\t\tstd::vector edgeVector; std::vector edgePro; std::vector vertex_info;\n\t\tread_chem_out_spe_for_network_info(edgeVector, edgePro, vertex_info);\n\n\t\tset_species_initial_concentration();\n\n\t\t//initialize graph\n\t\tinitGraph(edgeVector, edgePro);\n\t\tupdate_vertex_info(vertex_info);\n\n\t\tthis->follow_hypothesized_atom = this->check_hypothesized_atom();\n\t\tthis->condense_chatterings = this->check_condense_chatterings();\n\n\t\t//read reaction constraint or species constraint information from file, then set whether apply pathway constraint\n\t\tthis->update_pathway_constraint_from_file_rnk();\n\t\tthis->apply_pathway_constraint = this->check_apply_pathway_constraint();\n\n\t\t//update super atom info\n\t\tthis->update_super_atom_info(rnk_pt.get(\"pathway.super_atom\"));\n\t\tif (this->follow_hypothesized_atom) {\n\t\t\tthis->read_atom_scheme();\n\t\t\tthis->update_hypothesized_atom_info(rnk_pt.get(\"pathway.atom_followed\"));\n\t\t}\n\t\tset_spe_out_reaction_info();\n\t\tset_reaction_out_spe_info();\n\t\tset_out_spe_index_branching_ratio_map_map_with_constraint();\n\n\t\t//set terminal species\n\t\tset_terminal_spe();\n\n\t\tinitiate_M_matrix();\n\t\tinitiate_R_matrix();\n\n\t\tset_is_reaction_rate_nonzero_from_setting_file();\n\n\t\t//print();\n\t\t//print_network();\n\t\t//std::cout << \"test\" << std::endl;\n\t\t//print_initial_spe_label_json();\n\n\t}\n\n\tsuperReactionNetwork::~superReactionNetwork()\n\t{\n\t\tdelete rand;\n\t}\n\n\t////Read initial configuration file named \"setting.cfg\"\n\tbool superReactionNetwork::read_init_config()\n\t{\n\t\tset_min_time(this->rnk_pt.get(\"time.min_time\"));\n\t\tset_max_time(this->rnk_pt.get(\"time.max_time\"));\n\t\tset_sys_min_time(this->rnk_pt.get(\"time.sys_min_time\"));\n\t\tset_absolute_end_t(this->rnk_pt.get(\"pathway.end_t\") * this->rnk_pt.get(\"time.tau\"));\n\n\t\treturn true;\n\t}\n\n\tvoid superReactionNetwork::read_chem_out_spe_for_network_info(const std::string & cwd, std::vector& element_v, std::vector &species_network_v, std::vector &reaction_network_v, rsp::spe_name_index_map_t & spe_name_index_map, std::vector& edgeVector, std::vector& edgePro, std::vector& vertex_info, bool w2f)\n\t{\n\t\t/*\n\t\t* read species information\n\t\t*/\n\t\tstd::vector species_v;\n\t\t//read element, species and reaction information\n\t\trsp::relationshipParser::read_chem_out_ele_spe(element_v, species_v, spe_name_index_map, cwd + \"/input/chem.out\");\n\n\t\tfor (std::size_t i = 0; i < species_v.size(); ++i) {/*for1*/\n\t\t\trsp::spe_info_base species_network_temp;\n\t\t\tspecies_network_temp.prob_max = species_v[i].prob_max;\n\t\t\tspecies_network_temp.prob_min = species_v[i].prob_min;\n\t\t\tspecies_network_temp.reaction_k_index_s_coef_v = species_v[i].reaction_k_index_s_coef_v;\n\t\t\tspecies_network_temp.spe_component = species_v[i].spe_component;\n\t\t\tspecies_network_temp.spe_conc = species_v[i].spe_conc;\n\t\t\tspecies_network_temp.spe_index = species_v[i].spe_index;\n\t\t\tspecies_network_temp.spe_name = species_v[i].spe_name;\n\t\t\tspecies_network_temp.survival_probability = species_v[i].survival_probability;\n\n\t\t\tspecies_network_v.push_back(species_network_temp);\n\t\t}/*for1*/\n\n\t\t //update vetex information, here our vertex just store the index of vextex in species space, look the difinition of VertexProperties_graph\n\t\tvertex_info.resize(species_network_v.size());\n\t\tfor (std::size_t i = 0; i < species_network_v.size(); ++i) {\n\t\t\tvertex_info[i].vertex = species_network_v[i].spe_index;\n\t\t}\n\n\n\t\t/*\n\t\t* read reaction information\n\t\t*/\n\t\tstd::vector reaction_v;\n\t\t//include duplicated reactions\n\t\trsp::relationshipParser::read_chem_out_reaction(species_v, reaction_v, spe_name_index_map, cwd + \"/input/chem.out\");\n\t\trsp::relationshipParser::set_reaction_net_reactant_product(reaction_v);\n\t\t//reactionNetwork and chemkin index lookup table, notice chemkin index is Fortran index style\n\t\trsp::reactionNetwork_chemkin_index_map_t reactionNetwork_chemkin_index_map;\n\t\trsp::relationshipParser::read_reactionNetwork_chemkin_index_map(reactionNetwork_chemkin_index_map, cwd + \"/input/chem.out\");\n\n\n\t\trsp::index_int_t edge_counter = 0;\n\t\t//construct A->B transition\n\t\tEdgeProperties_graph edgePro_tmp;\n\n\t\tfor (rsp::reactionNetwork_chemkin_index_map_t::const_iterator itr = reactionNetwork_chemkin_index_map.begin(); itr != reactionNetwork_chemkin_index_map.end(); ++itr) {\n\t\t\trsp::reaction_info_base reaction_info_base_temp;\n\t\t\t//just need the first one if have multiple duplicated reactions\n\t\t\t//std::cout << itr->first << \"\\t-->\\t\";\n\t\t\t//std::cout << itr->second[0] << \"\\t\";\n\t\t\t//need to convert Fortran style index into C++ style index\n\t\t\trsp::index_int_t reaction_v_ind = static_cast(abs(itr->second[0])) - 1;\n\t\t\t//std::cout << reaction_v[reaction_v_ind].reaction_name << \"\\t\";\n\t\t\t//std::cout << reaction_v[reaction_v_ind].reaction_direction << \"\\n\";\n\n\t\t\t//forward reaction\n\t\t\tif (itr->second[0] > 0) {\n\t\t\t\treaction_info_base_temp.reaction_direction = rsp::forward;\n\n\t\t\t\t//edge vector\n\t\t\t\tfor (std::size_t i = 0; i < reaction_v[reaction_v_ind].net_reactant.size(); ++i) {/*for i*/\n\t\t\t\t\tfor (std::size_t j = 0; j < reaction_v[reaction_v_ind].net_product.size(); ++j) {/*for j*/\n\t\t\t\t\t\t//std::cout << \"[\" << reaction_v[reaction_v_ind].net_reactant[i].first << \",\" << reaction_v[reaction_v_ind].net_product[j].first << \"]\" << \"\\t\";\n\t\t\t\t\t\tedgeVector.push_back(std::make_pair(reaction_v[reaction_v_ind].net_reactant[i].first, reaction_v[reaction_v_ind].net_product[j].first));\n\t\t\t\t\t\tedgePro_tmp.edge_index = edge_counter; ++edge_counter;\n\t\t\t\t\t\tedgePro_tmp.reaction_index = itr->first;\n\t\t\t\t\t\tedgePro_tmp.s_coef_reactant = reaction_v[reaction_v_ind].net_reactant[i].second;\n\t\t\t\t\t\tedgePro_tmp.s_coef_product = reaction_v[reaction_v_ind].net_product[j].second;\n\n\t\t\t\t\t\tedgePro.push_back(edgePro_tmp);\n\n\t\t\t\t\t}/*for j*/\n\t\t\t\t}/*for i*/\n\t\t\t\t //std::cout << std::endl;\n\t\t\t}/*if*/\n\t\t\t //backward reaction\n\t\t\telse if (itr->second[0] < 0) {\n\t\t\t\treaction_info_base_temp.reaction_direction = rsp::backward;\n\t\t\t\t//edge vector\n\t\t\t\tfor (std::size_t i = 0; i < reaction_v[reaction_v_ind].net_product.size(); ++i) {/*for i*/\n\t\t\t\t\tfor (std::size_t j = 0; j < reaction_v[reaction_v_ind].net_reactant.size(); ++j) {/*for j*/\n\t\t\t\t\t\t//std::cout << \"[\" << reaction_v[reaction_v_ind].net_product[i].first << \",\" << reaction_v[reaction_v_ind].net_reactant[j].first << \"]\" << \"\\t\";\n\t\t\t\t\t\tedgeVector.push_back(std::make_pair(reaction_v[reaction_v_ind].net_product[i].first, reaction_v[reaction_v_ind].net_reactant[j].first));\n\t\t\t\t\t\tedgePro_tmp.edge_index = edge_counter; ++edge_counter;\n\t\t\t\t\t\tedgePro_tmp.reaction_index = itr->first;\n\t\t\t\t\t\tedgePro_tmp.s_coef_reactant = reaction_v[reaction_v_ind].net_product[i].second;\n\t\t\t\t\t\tedgePro_tmp.s_coef_product = reaction_v[reaction_v_ind].net_reactant[j].second;\n\n\t\t\t\t\t\tedgePro.push_back(edgePro_tmp);\n\n\t\t\t\t\t}/*for j*/\n\n\t\t\t\t}/*for i*/\n\n\n\t\t\t}/*else if*/\n\t\t\treaction_info_base_temp.reaction_name = reaction_v[reaction_v_ind].reaction_name;\n\t\t\treaction_network_v.push_back(reaction_info_base_temp);\n\n\t\t}/*for*/\n\n\t\tif (w2f == true)\n\t\t{\n\t\t\trsp::relationshipParser::spe_information_s2f(species_v, cwd + std::string(\"/input/species_labelling.csv\"));\n\t\t\trsp::relationshipParser::spe_information_s2json(species_v, cwd + std::string(\"/input/species_information.json\"));\n\n\t\t\trsp::relationshipParser::reaction_information_s2f(species_v, reaction_v, reactionNetwork_chemkin_index_map, cwd + std::string(\"/input/reaction_labelling.csv\"));\n\t\t\trsp::relationshipParser::reaction_information_s2json(species_v, reaction_v, reactionNetwork_chemkin_index_map, cwd + std::string(\"/input/reaction_information.json\"));\n\t\t}\n\t}\n\n\tvoid superReactionNetwork::read_chem_out_spe_for_network_info(std::vector &edgeVector, std::vector &edgePro, std::vector& vertex_info)\n\t{\n\n\t\tread_chem_out_spe_for_network_info(this->cwd, this->element_v, this->species_network_v, this->reaction_network_v, this->spe_name_index_map, edgeVector, edgePro, vertex_info);\n\t}\n\n\tvoid superReactionNetwork::update_super_atom_info(std::string super_atom)\n\t{\n\t\tthis->super_atom = super_atom;\n\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\tthis->species_network_v[i].spe_component[super_atom] = 0;\n\n\t\t\tfor (auto x : this->element_v)\n\t\t\t\tthis->species_network_v[i].spe_component[super_atom] += this->species_network_v[i].spe_component[x.ele_name];\n\t\t}\n\n\t}\n\n\tvoid superReactionNetwork::read_atom_scheme()\n\t{\n\t\tboost::property_tree::read_json(this->cwd + std::string(\"/input/atom_scheme.json\"), this->rnk_atom_scheme, std::locale());\n\t}\n\n\tbool superReactionNetwork::check_hypothesized_atom()\n\t{\n\t\tif (rnk_pt.get(\"pathway.atom_followed\") == rnk_pt.get(\"pathway.super_atom\"))\n\t\t\treturn false;\n\t\tfor (auto x : this->element_v) {\n\t\t\tif (rnk_pt.get(\"pathway.atom_followed\") == x.ele_name)\n\t\t\t\treturn false;\n\t\t}\n\t\treturn true;\n\t}\n\n\tvoid superReactionNetwork::update_hypothesized_atom_info(std::string hypothesized_atom)\n\t{\n\t\tauto ha_dict = this->rnk_atom_scheme.get_child(hypothesized_atom);\n\n\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\tthis->species_network_v[i].spe_component[hypothesized_atom] = 0;\n\t\t}\n\n\t\tfor (auto key1 : ha_dict) {\n\t\t\tstd::string spe_name = boost::lexical_cast(key1.first);\n\t\t\trsp::index_int_t spe_idx = this->spe_name_index_map[spe_name];\n\t\t\trsp::index_int_t coef = boost::lexical_cast(key1.second.get_value());\n\t\t\tthis->species_network_v[spe_idx].spe_component[hypothesized_atom] = coef;\n\t\t}\n\t}\n\n\tbool superReactionNetwork::check_condense_chatterings()\n\t{\n\t\tif (this->rnk_pt.get(\"network.condense_chatterings\") == \"yes\") {\n\t\t\tstd::cout << \"\\ncondense chatterings.\\n\";\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\n\tvoid superReactionNetwork::update_pathway_constraint_from_file_rnk()\n\t{\n\t\t// check reaction constraint\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"pathway.species_sink_reaction_constraint\")) {\n\n\t\t\trsp::index_int_t species_idx = boost::lexical_cast(std::stoi(key1.first));\n\t\t\tstd::unordered_set< rsp::index_int_t > reaction_set;\n\t\t\tfor (auto key2 : key1.second) {\n\t\t\t\trsp::index_int_t x = key2.second.get_value();\n\t\t\t\t//std::cout << x;\n\t\t\t\treaction_set.insert(x);\n\t\t\t}\n\n\t\t\tthis->sp_pathway_constarint_rnk->species_sink_reaction_set_map[species_idx] = reaction_set;\n\t\t}\n\n\t\tif (this->sp_pathway_constarint_rnk->species_sink_reaction_set_map.size() > 0)\n\t\t\tthis->sp_pathway_constarint_rnk->species_sink_through_reaction_constraint = true;\n\t\telse\n\t\t\tthis->sp_pathway_constarint_rnk->species_sink_through_reaction_constraint = false;\n\n\t\t// check species constraint\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"pathway.reaction_out_species_constraint\")) {\n\n\t\t\trsp::index_int_t reaction_idx = boost::lexical_cast(std::stoi(key1.first));\n\t\t\tstd::unordered_set< rsp::index_int_t > species_set;\n\t\t\tfor (auto key2 : key1.second) {\n\t\t\t\trsp::index_int_t x = key2.second.get_value();\n\t\t\t\t//std::cout << x;\n\t\t\t\tspecies_set.insert(x);\n\t\t\t}\n\n\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_set_map[reaction_idx] = species_set;\n\t\t}\n\t\tif (this->sp_pathway_constarint_rnk->reaction_out_species_set_map.size() > 0)\n\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_constraint = true;\n\t\telse\n\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_constraint = false;\n\n\t\t// general not allowed out species constraint\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"pathway.not_allowed_out_species\"))\n\t\t{\n\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_set.insert(key1.second.get_value());\n\t\t}\n\t\tif (this->sp_pathway_constarint_rnk->not_allowed_out_species_set.size() > 0)\n\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_constraint = true;\n\t\telse\n\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_constraint = false;\n\n\t\t// must react species set\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"pathway.must_react_species\"))\n\t\t{\n\t\t\tthis->sp_pathway_constarint_rnk->must_react_species_set.insert(key1.second.get_value());\n\t\t}\n\n\t}\n\n\tbool superReactionNetwork::check_apply_pathway_constraint()\n\t{\n\t\t// two conditions, 1) pathway.apply_pathway_constraint set to be yes,\n\t\t// 2) either \"reaction_constraint\" is set or \"species_constraint\" is et\n\t\tif (this->rnk_pt.get(\"pathway.apply_pathway_constraint\") == std::string(\"yes\")) {\n\t\t\tif (this->sp_pathway_constarint_rnk->species_sink_through_reaction_constraint == true ||\n\t\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_constraint == true ||\n\t\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_constraint == true)\n\t\t\t\treturn true;\n\t\t}\n\n\t\treturn false;\n\t}\n\n\tvoid superReactionNetwork::set_species_initial_concentration()\n\t{\n\t\t//read with json_parser as property_tree\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\tthis->species_network_v[boost::lexical_cast(key1.first)].spe_conc =\n\t\t\t\tkey1.second.get_value()*this->rnk_pt.get(\"SOHR_init.massConservationFactor\");\n\t\t}\n\n\t\tif (this->rnk_pt.get(\"propagator.normalize_initial_concentration\") == \"yes\") {\n\t\t\t//renormalization\n\t\t\tdouble total_conc = 0.0;\n\t\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\t\ttotal_conc += this->species_network_v[i].spe_conc;\n\t\t\t}//for\n\n\t\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\t\tthis->species_network_v[i].spe_conc /= total_conc;\n\t\t\t}//for\n\t\t}//if\n\n\t}//set_initial_concentration\n\n\trsp::my_time_t superReactionNetwork::get_max_time() const\n\t{\n\t\treturn this->max_time;\n\t}\n\n\trsp::my_time_t superReactionNetwork::return_tau() const\n\t{\n\t\treturn this->rnk_pt.get(\"time.tau\");\n\t}\n\n\trsp::index_int_t superReactionNetwork::return_initial_spe() const\n\t{\n\t\treturn this->rnk_pt.get(\"pathway.init_spe\");\n\t}\n\n\tvoid superReactionNetwork::set_terminal_spe()\n\t{\n\t\t//86 and 89 are terminal species, they transform to each other very fast\n\t\tstd::set terminal_spe_index;\n\n\t\tfor (auto key1 : this->rnk_pt.get_child(\"pathway.terminal_species\")) {\n\t\t\t//std::cout<()<());\n\t\t}\n\n\t\t//search network also, if a species has no out reaction or out species\n\t\t//it shall be a terminal species\n\t\t//under current atom scheme, followed a species\n\t\tstd::string atom_followed = this->rnk_pt.get(\"pathway.atom_followed\");\n\n\t\tfor (auto x : this->species_network_v) {\n\t\t\tif (x.reaction_k_index_s_coef_v.size() == 0) {\n\t\t\t\tterminal_spe_index.insert(x.spe_index);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tbool no_out_spe = true;\n\t\t\t// search all out reactions\n\t\t\tfor (auto y : x.reaction_k_index_s_coef_v) {\n\t\t\t\tauto rxn_idx = y.first;\n\n\t\t\t\t// search all out species\n\t\t\t\tfor (auto s : reaction_network_v[rxn_idx].out_spe_index_branching_ratio_map_map_with_constraint.at(atom_followed)) {\n\t\t\t\t\tif (s.second > 0) {\n\t\t\t\t\t\tno_out_spe = false;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (no_out_spe == false) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (no_out_spe == true) {\n\t\t\t\tterminal_spe_index.insert(x.spe_index);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tthis->terminal_species = terminal_spe_index;\n\t}\n\n\n\tvoid superReactionNetwork::initGraph(const vector& edgeVector, const std::vector& edgePro)\n\t{\n\t\tfor (std::size_t i = 0; i < edgeVector.size(); ++i)\n\t\t{\n\t\t\tAddEdge(edgeVector[i].first, edgeVector[i].second, edgePro[i]);\n\t\t}//for\n\n\t\t//update edge index\n\t\trsp::index_int_t count = 0;\n\t\tfor (edge_range_t er = getEdges(); er.first != er.second; ++er.first)\n\t\t{\n\t\t\tedge_index_to_edge_iterator.push_back(er.first);\n\t\t\tproperties(*er.first).edge_index = count++;\n\t\t}//for\n\t\tnum_edges = get_num_edges();\n\t\tnum_vertices = get_num_vertices();\n\t\t//edge_index_map=get(edge_index, graph);\n\t}\n\n\tvoid superReactionNetwork::update_vertex_info(const std::vector& vertex_info) {\n\t\tvertex_range_t vp;\n\t\tint count = 0;\n\t\tfor (vp = vertices(graph); vp.first != vp.second; ++vp.first, ++count)\n\t\t{\n\t\t\tproperties(*vp.first).vertex = vertex_info[count].vertex;\n\t\t\t//std::cout << properties(*vp.first).vertex << std::endl;\n\t\t}\n\t}\n\n\tvoid superReactionNetwork::set_spe_out_reaction_info()\n\t{\n\t\tmt::vector_sr reaction_index_s_coef_v;\n\t\tfor (rsp::index_int_t i = 0; i < (rsp::index_int_t)this->species_network_v.size(); ++i) {\n\t\t\t//terminal species\n\t\t\tif (std::find(this->terminal_species.begin(), this->terminal_species.end(), i) != this->terminal_species.end())\n\t\t\t\tcontinue;\n\t\t\treaction_index_s_coef_v.clear();\n\t\t\tsearch_for_out_reaction(i, reaction_index_s_coef_v);\n\t\t\tthis->species_network_v[i].reaction_k_index_s_coef_v = reaction_index_s_coef_v;\n\t\t}\n\n\t}\n\n\t/*\n\t * search out edge reactions of a species, record the reactant stoichoimetric coefficient\n\t */\n\tvoid superReactionNetwork::search_for_out_reaction(vertex_t vertex, mt::vector_sr & reaction_index_s_coef_v) {\n\t\tif (vertex >= (vertex_t)getVertexCount())\n\t\t\treturn;\n\t\tfor (out_edge_range_t itr = getOutEdges(vertex); itr.first != itr.second; ++itr.first) {\n\t\t\treaction_index_s_coef_v.insert_sr(std::make_pair(properties(*itr.first).reaction_index, properties(*itr.first).s_coef_reactant));\n\t\t}\n\n\t}\n\n\tbool superReactionNetwork::search_for_out_spe(rsp::index_int_t reaction_index, std::vector& out_spe_index_weight_v, std::string atom_followed)\n\t{\n\t\tmt::vector_sr out_spe_index_weight_v_tmp;\n\n\t\tboost::property_map::type vertex_id = get(vertex_index, graph);\n\t\tstd::pair vp;\n\t\tfor (edge_range_t er = getEdges(); er.first != er.second; ++er.first) {//for\n\t\t\t//if found\n\t\t\tif (properties(*er.first).reaction_index == reaction_index) {//if\n\t\t\t\tstd::size_t spe_index = get(vertex_id, target(*er.first, graph));\n\t\t\t\t//ignore duplicate elements\n\t\t\t\tout_spe_index_weight_v_tmp.insert_sr(\n\t\t\t\t\tstd::make_pair(spe_index,\n\t\t\t\t\t\t/*spe index*/properties(*er.first).s_coef_product* this->species_network_v[spe_index].spe_component[atom_followed] /*weight*/)\n\t\t\t\t);\n\n\t\t\t}//if\n\t\t}//for\n\n\t\tout_spe_index_weight_v = out_spe_index_weight_v_tmp;\n\n\t\treturn true;\n\t}\n\n\tbool superReactionNetwork::set_reaction_out_spe_info(std::string atom_followed)\n\t{\n\t\tstd::vector out_spe_index_weight_v_tmp;\n\t\tfor (rsp::index_int_t i = 0; i < (rsp::index_int_t)this->reaction_network_v.size(); ++i) {\n\t\t\tout_spe_index_weight_v_tmp.clear();\n\t\t\tsearch_for_out_spe(i, out_spe_index_weight_v_tmp, atom_followed);\n\t\t\tthis->reaction_network_v[i].out_spe_index_weight_v_map[atom_followed] = out_spe_index_weight_v_tmp;\n\t\t}\n\n\t\treturn true;\n\t}\n\n\tvoid superReactionNetwork::set_reaction_out_spe_info()\n\t{\n\t\tfor (auto x : this->element_v)\n\t\t\tthis->set_reaction_out_spe_info(x.ele_name);\n\t\t//super atom\n\t\tthis->set_reaction_out_spe_info(rnk_pt.get(\"pathway.super_atom\"));\n\t\tif (this->follow_hypothesized_atom) {\n\t\t\tthis->set_reaction_out_spe_info(rnk_pt.get(\"pathway.atom_followed\"));\n\t\t}\n\t}\n\n\tvoid superReactionNetwork::set_out_spe_index_branching_ratio_map_map_with_constraint(std::string atom_followed)\n\t{\n\n\t\tfor (std::size_t r_index = 0; r_index < this->reaction_network_v.size(); ++r_index) {\n\t\t\t// remember to check constraint first\n\n\t\t\tdouble prob_total = 0.0;\n\t\t\t////calcualte out spe total weight for a reaction\n\t\t\t//for (std::size_t i = 0; i < reaction_network_v[r_index].out_spe_index_weight_v_map[atom_followed].size(); ++i) {\n\t\t\t//\tprob_total += reaction_network_v[r_index].out_spe_index_weight_v_map[atom_followed][i].second;\n\t\t\t//}\n\n\t\t\tfor (auto s_i_w : reaction_network_v[r_index].out_spe_index_weight_v_map[atom_followed]) {\n\t\t\t\tauto s_idx = s_i_w.first;\n\t\t\t\tauto w = s_i_w.second;\n\n\t\t\t\t// not allowed out species constraint\n\t\t\t\tif (this->sp_pathway_constarint_rnk->not_allowed_out_species_constraint == true &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_set.count(s_idx) > 0)\n\t\t\t\t\tcontinue;\n\t\t\t\t// reaction out species constraint\n\t\t\t\tif (this->sp_pathway_constarint_rnk->reaction_out_species_constraint == true &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_set_map.count(r_index) > 0 &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_set_map.at(r_index).count(s_idx) == 0)\n\t\t\t\t\tcontinue;\n\n\t\t\t\t//otherwise, take this out species into account\n\t\t\t\tprob_total += w;\n\t\t\t}\n\n\t\t\tdouble reverse_p_t = 0.0;\n\t\t\tif (prob_total > 0.0)\n\t\t\t\treverse_p_t = 1.0 / prob_total;\n\t\t\t//calculate the fraction\n\t\t\tfor (auto s_i_w : reaction_network_v[r_index].out_spe_index_weight_v_map[atom_followed]) {\n\t\t\t\tauto s_idx = s_i_w.first;\n\t\t\t\tauto w = s_i_w.second;\n\n\t\t\t\t// not allowed out species constraint\n\t\t\t\tif (this->sp_pathway_constarint_rnk->not_allowed_out_species_constraint == true &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->not_allowed_out_species_set.count(s_idx) > 0) {\n\t\t\t\t\treaction_network_v[r_index].out_spe_index_branching_ratio_map_map_with_constraint[atom_followed][s_idx] = 0;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t// reaction out species constraint\n\t\t\t\tif (this->sp_pathway_constarint_rnk->reaction_out_species_constraint == true &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_set_map.count(r_index) > 0 &&\n\t\t\t\t\tthis->sp_pathway_constarint_rnk->reaction_out_species_set_map.at(r_index).count(s_idx) == 0)\n\t\t\t\t{\n\t\t\t\t\treaction_network_v[r_index].out_spe_index_branching_ratio_map_map_with_constraint[atom_followed][s_idx] = 0;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t//otherwise, take this out species into account\n\t\t\t\treaction_network_v[r_index].out_spe_index_branching_ratio_map_map_with_constraint[atom_followed][s_idx] = w * reverse_p_t;\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tvoid superReactionNetwork::set_out_spe_index_branching_ratio_map_map_with_constraint()\n\t{\n\t\tfor (auto x : this->element_v)\n\t\t\tthis->set_out_spe_index_branching_ratio_map_map_with_constraint(x.ele_name);\n\t\t//super atom\n\t\tthis->set_out_spe_index_branching_ratio_map_map_with_constraint(rnk_pt.get(\"pathway.super_atom\"));\n\t\tif (this->follow_hypothesized_atom) {\n\t\t\tthis->set_out_spe_index_branching_ratio_map_map_with_constraint(rnk_pt.get(\"pathway.atom_followed\"));\n\t\t}\n\t}\n\n\n\tvoid superReactionNetwork::print_network(std::string filename)\n\t{\n\t\tstd::ofstream fn((this->cwd + filename).c_str());\n\t\tfn << \"Id,Label,Size\\n\";\n\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\t//\t\tfn<cwd + std::string(\"/output/edge.csv\")).c_str());\n\t\tfe << \"Source,Target,Label,Weight\\n\";\n\n\t\t//this->update_reaction_rate(target_time);\n\t\tstd::pair vp;\n\t\tboost::property_map::type vertex_id = get(vertex_index, graph);\n\t\tfor (edge_range_t er = getEdges(); er.first != er.second; ++er.first) {\n\t\t\tfe << get(vertex_id, source(*er.first, graph)) << \",\" << get(vertex_id, target(*er.first, graph)) << \",\";\n\t\t\tfe << properties(*er.first).edge_index << \",\" << this->reaction_network_v[properties(*er.first).reaction_index].reaction_rate << std::endl;\n\t\t}\n\n\t\tfe.clear(); fe.close();\n\t}\n\n\tvoid superReactionNetwork::print()\n\t{\n\n\t\t////test random number generator\n\t\t//std::ofstream fout((this->cwd + std::string(\"/output/random.csv\")).c_str());\n\t\t//for (size_t i = 0; i < 1000; ++i) {\n\t\t//\tfout << std::setprecision(10) << this->rand->random01() << std::endl;\n\t\t//}\n\t\t//fout.clear(); fout.close();\n\n\t\t////\tfor(size_t i=0; i<10; ++i){\n\t\t////\t\tstd::cout<first << \"\\tindex:\\t\" << itr->second << std::endl;\n\t\t//}\n\n\n\n\t\t////species info\n\t\t//for (size_t i = 0; i < species_network_v.size(); ++i) {\n\t\t//\tstd::cout << species_network_v[i].spe_name << \"\\n\";\n\t\t//\tfor (rsp::spe_component_t::const_iterator itr = species_network_v[i].spe_component.begin(); itr != species_network_v[i].spe_component.end(); ++itr)\n\t\t//\t\tstd::cout << \"\\t-->\" << itr->first << \"\\t\" << itr->second << std::endl;\n\t\t//}\n\n\n\t\t//std::cout << \"\\nnum_vertices: \" << num_vertices << std::endl;\n\t\t//std::cout << \"num_edges: \" << num_edges << std::endl;\n\n\t\t//boost::property_map::type vertex_id = get(vertex_index, graph);\n\n\t\t//std::cout << \"\\nvertices(g) = \\n\";\n\t\t//std::pair vp;\n\t\t//for (vp = vertices(graph); vp.first != vp.second; ++vp.first) {//for\n\t\t//\tstd::cout << get(vertex_id, *vp.first) << \" \";\n\t\t//\tstd::cout << properties(*vp.first).vertex << std::endl;\n\t\t//}//for\n\t\t//std::cout << std::endl;\n\t\t////vertices properties\n\n\n\t\t//std::cout << \"edges(g) = \\n\";\n\n\t\t//for (edge_range_t er = getEdges(); er.first != er.second; ++er.first) {\n\t\t//\tstd::cout << \"(\" << get(vertex_id, source(*er.first, graph))\n\t\t//\t\t<< \",\" << get(vertex_id, target(*er.first, graph)) << \") \";\n\t\t//\tstd::cout << properties(*er.first).edge_index << \" \" << properties(*er.first).reaction_index <<\n\t\t//\t\t\" \" << properties(*er.first).s_coef_reactant << \" \" << properties(*er.first).s_coef_product << std::endl;\n\t\t//}\n\n\t\t//for (edge_range_t er = getEdges(); er.first != er.second; ++er.first) {\n\t\t//\tstd::cout << \"(\" << species_network_v[get(vertex_id, source(*er.first, graph))].spe_name\n\t\t//\t\t<< \",\" << species_network_v[get(vertex_id, target(*er.first, graph))].spe_name << \") \";\n\t\t//\tstd::cout << properties(*er.first).edge_index << \" \" << properties(*er.first).reaction_index <<\n\t\t//\t\t\" \" << properties(*er.first).s_coef_reactant << \" \" << properties(*er.first).s_coef_product << std::endl;\n\t\t//}\n\n\n\t\t/*\n\t\t * generate Mathematica Format graph input file\n\t\t * DirectedEdge[\"A\", \"B\"]\n\t\t */\n\n\t\t //\tfor (edge_range_t er=getEdges(); er.first!=er.second; ++er.first){\n\t\t //\t\tstd::cout << \"DirectedEdge[\\\"\" <edge_index_to_edge_iterator[edge_index_t];\n\t\t//std::cout << \"(\" << get(vertex_id, source(*iter_e, graph))\n\t\t//\t<< \",\" << get(vertex_id, target(*iter_e, graph)) << \") \";\n\t\t//std::cout << properties(*iter_e).edge_index << std::endl;\n\n\n\n\t\t////species out reaction info\n\t\t//for (std::size_t i = 0; i < species_network_v.size(); ++i) {\n\t\t//\tstd::cout << i << \"\\t\" << species_network_v[i].reaction_k_index_s_coef_v.size() << std::endl;\n\t\t//}\n\n\t\t////reaction out spe info\n\t\t//std::string atom_followed(\"H\");\n\t\t//for (std::size_t i = 0; i < reaction_network_v.size(); ++i) {\n\t\t//\tstd::cout << i << \"\\t\" << reaction_network_v[i].out_spe_index_weight_v_map[atom_followed].size() << std::endl;\n\t\t//}\n\n\t\t////std::cout << \"haha:\\t\" << std::endl;\n\t\t//for (std::size_t i = 0; i < species_network_v[2].reaction_k_index_s_coef_v.size(); ++i) {\n\t\t//\tstd::cout << species_network_v[2].reaction_k_index_s_coef_v[i].first << \"\\t\" << species_network_v[2].reaction_k_index_s_coef_v[i].second << std::endl;\n\t\t//}\n\n\n\t\t////reaction rates test\n\t\t//std::cout << \"reaction rates:\\n\";\n\t\t//for (auto x : reaction_network_v) {\n\t\t//\tstd::cout << x.reaction_rate << std::endl;\n\t\t//}\n\n\t\t//for (size_t i = 0; i < this->species_network_v.size(); i++)\n\t\t//{\n\t\t//\tstd::cout << i << \"\\t\" << this->species_network_v[i].spe_name << \", \";\n\t\t//\tfor (auto x : this->species_network_v[i].reaction_k_index_s_coef_v)\n\t\t//\t\tstd::cout << x.first << \"\\t\" << x.second << \", \";\n\t\t//\tstd::cout << \"\\n\";\n\t\t//}\n\n\t\trsp::reactionNetwork_chemkin_index_map_t reactionNetwork_chemkin_index_map;\n\t\trsp::relationshipParser::read_reactionNetwork_chemkin_index_map(reactionNetwork_chemkin_index_map, this->cwd + \"/input/chem.out\");\n\n\t\tstd::string atom_followed(\"O\");\n\t\tstd::ofstream fout((this->cwd + std::string(\"/output/species_reaction_index_helper_\") + atom_followed + std::string(\".csv\")).c_str());\n\n\t\tfor (size_t i = 0; i < this->species_network_v.size(); i++)\n\t\t{\n\t\t\tfout << this->species_network_v[i].spe_name << \"\\n\";\n\t\t\tfor (size_t j = 0; j < this->species_network_v.size(); j++)\n\t\t\t{\n\t\t\t\tif (i < j) {\n\t\t\t\t\tauto p = this->get_R_matrix_element(atom_followed, i, j);\n\t\t\t\t\tif (p.size() > 0) {\n\t\t\t\t\t\tfout << \"\\t-->\" << this->species_network_v[j].spe_name << \"\\n\\t\\t\";\n\t\t\t\t\t\tfor (size_t k = 0; k < p.size(); k++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t//this is edge index, one-step path, a path with only one reaction\n\t\t\t\t\t\t\t//edge index to reaction index\n\t\t\t\t\t\t\tauto iter_e = edge_index_to_edge_iterator[p[k][0]];\n\t\t\t\t\t\t\tauto reaction_index = this->properties(*iter_e).reaction_index;\n\t\t\t\t\t\t\t//fout << this->reaction_network_v[reaction_index].reaction_name;\n\n\t\t\t\t\t\t\tauto reaction_index_in_paper = reactionNetwork_chemkin_index_map[reaction_index].front();\n\n\t\t\t\t\t\t\tif (reaction_index_in_paper > 0)\n\t\t\t\t\t\t\t\tfout << \"R\" << abs(reaction_index_in_paper) - 1;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\t/*fout << \"R\" << -1 * (abs(reaction_index_in_paper) - 1);*/\n\t\t\t\t\t\t\t\tfout << \"R\" << abs(reaction_index_in_paper) - 1 << \"*\";\n\n\t\t\t\t\t\t\tif (k != p.size() - 1)\n\t\t\t\t\t\t\t\tfout << \",\";\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\tfout << \"\\n\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfout.clear(); fout.close();\n\n\n\t}\n\n\tvoid superReactionNetwork::print_initial_spe_label_json(std::string filename) const\n\t{\n\t\tstd::ofstream fout((cwd + filename).c_str());\n\t\tfout << \"{\\n\";\n\n\t\tfor (std::size_t i = 0; i < species_network_v.size(); ++i)\n\t\t{\n\t\t\tstd::size_t NofAtoms = 0;\n\t\t\tstd::size_t TypesofAtoms = 0;\n\t\t\tfout << \"\\\"\" << i << \"\\\":\\n\" << \"{\";\n\t\t\tfout << \"\\\"name\\\":\" << \"\\\"\" << species_network_v[i].spe_name << \"\\\",\\n\";\n\t\t\tfout << \"\\\"structure\\\":\" << \"\\\"\" << species_network_v[i].spe_name << \"\\\",\\n\";\n\n\t\t\tfor (rsp::spe_component_t::const_iterator itr = species_network_v[i].spe_component.begin(); itr != species_network_v[i].spe_component.end(); ++itr) {\n\t\t\t\tNofAtoms += itr->second;\n\t\t\t\tif (itr->second != 0)\n\t\t\t\t\tTypesofAtoms += 1;\n\t\t\t}\n\t\t\tfout << \"\\\"TotalNofAtoms\\\":\" << \"\\\"\" << NofAtoms << \"\\\",\\n\";\n\t\t\tfout << \"\\\"TypesofAtoms\\\":\" << \"\\\"\" << TypesofAtoms << \"\\\",\\n\";\n\n\t\t\tfor (rsp::spe_component_t::const_iterator itr = species_network_v[i].spe_component.begin(); itr != species_network_v[i].spe_component.end(); ++itr) {\n\t\t\t\tif (itr->second != 0)\n\t\t\t\t\tfout << \"\\\"\" << itr->first << \"\\\"\" << \":\" << \"\\\"\" << itr->second << \"\\\",\\n\";\n\t\t\t}\n\n\t\t\tfout << \"\\\"TypesofChemicalMoiety\\\":\" << \"\\\"\" << \"\\\",\\n\";\n\n\t\t\tfor (std::size_t j = 0; j < NofAtoms; ++j) {\n\t\t\t\tfout << \"\\\"\\\":\\\"\\\"\";\n\t\t\t\tif (j != NofAtoms - 1)\n\t\t\t\t\tfout << \",\\n\";\n\t\t\t}\n\t\t\tfout << std::endl << \"}\";\n\t\t\tif (i != species_network_v.size() - 1)\n\t\t\t\tfout << \",\";\n\t\t\tfout << \"\\n\";\n\t\t}\n\n\t\tfout << \"}\\n\";\n\t\tfout.close();\n\n\t}\n\n\n\trsp::index_int_t superReactionNetwork::spe_random_pick_next_reaction(vertex_t curr_spe)\n\t{\n\t\t//probability vector\n\t\tstd::vector prob(this->species_network_v[curr_spe].reaction_k_index_s_coef_v.size());\n\t\tfor (std::size_t i = 0; i < prob.size(); ++i) {\n\t\t\tprob[i] = this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].second* //s_coef_product\n\t\t\t\tthis->reaction_network_v[this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].first].reaction_rate;\n\t\t}\n\n\t\treturn this->species_network_v[curr_spe].reaction_k_index_s_coef_v[\n\t\t\trand->return_index_randomly_given_probability_vector(prob)\n\t\t].first;\n\t}\n\n\tvertex_t superReactionNetwork::reaction_random_pick_next_spe(rsp::index_int_t reaction_index, std::string atom_followed)\n\t{\n\t\t//probability vector\n\t\tstd::vector prob(this->reaction_network_v[reaction_index].out_spe_index_weight_v_map.at(atom_followed).size());\n\t\tfor (std::size_t i = 0; i < this->reaction_network_v[reaction_index].out_spe_index_weight_v_map.at(atom_followed).size(); ++i) {\n\t\t\t//prob[i] = this->reaction_network_v[reaction_index].out_spe_index_weight_v_map[atom_followed][i].second;\n\n\t\t\tauto s_i = this->reaction_network_v[reaction_index].out_spe_index_weight_v_map.at(atom_followed)[i].first;\n\t\t\tprob[i] = this->reaction_network_v[reaction_index].out_spe_index_branching_ratio_map_map_with_constraint.at(atom_followed).at(s_i);\n\t\t}\n\n\t\treturn this->reaction_network_v[reaction_index].out_spe_index_weight_v_map[atom_followed][\n\t\t\trand->return_index_randomly_given_probability_vector(prob)\n\t\t].first;\n\t}\n\n\tvertex_t reactionNetwork_sr::superReactionNetwork::spe_random_pick_next_spe(rsp::index_int_t curr_spe, std::string atom_followed)\n\t{\n\t\tauto spe_rxn_c1_c2_map = this->sp_all_species_group_rnk->out_species_rxns.at(curr_spe);\n\n\t\tstd::vector prob(spe_rxn_c1_c2_map.size(), 0.0);\n\t\tstd::vector spe_index(spe_rxn_c1_c2_map.size(), 0);\n\n\t\tsize_t i = 0;\n\t\tfor (auto s_rxn_c1_c2 : spe_rxn_c1_c2_map)\n\t\t{\n\t\t\tauto next_spe = s_rxn_c1_c2.first;\n\t\t\tspe_index[i] = next_spe;\n\t\t\tprob[i] = spe_spe_branching_ratio(s_rxn_c1_c2.second, -1.0, curr_spe, next_spe, atom_followed, false);\n\n\t\t\t++i;\n\t\t}\n\n\t\treturn spe_index[rand->return_index_randomly_given_probability_vector(prob)];\n\t}\n\n\n\tvoid superReactionNetwork::split_atom_followed_and_pathway(std::string str_in, std::string &atom_followed, std::string &pathway) const\n\t{\n\t\tatom_followed.clear();\n\t\tpathway.clear();\n\n\t\t//find first S, record position\n\t\tauto found = str_in.find(std::string(\"S\"));\n\t\tatom_followed = str_in.substr(0, found);\n\t\t//std::cout<< atom_followed << std::endl;\n\t\tpathway = str_in.substr(found);\n\t\t//std::cout << pathway << std::endl;\n\n\t\t//return true;\n\t}\n\n\tbool superReactionNetwork::parse_pathway_to_vector(std::string pathway_in, std::vector& spe_vec, std::vector& reaction_vec) const\n\t{\n\t\tspe_vec.resize(0);\n\t\treaction_vec.resize(0);\n\n\t\tconst char* pattern1 = \"(S\\\\d+(?:R[-]?\\\\d+)?)\";\n\t\tboost::regex re1(pattern1);\n\n\t\tboost::sregex_iterator it1(pathway_in.begin(), pathway_in.end(), re1);\n\t\tboost::sregex_iterator end1;\n\t\tstd::vector reaction_spe;\n\t\tfor (; it1 != end1; ++it1) {\n\t\t\treaction_spe.push_back(it1->str());\n\t\t}\n\n\t\tconst char* pattern2 = \"S(\\\\d+)(?:R(-?\\\\d+))?\";\n\t\tboost::regex re2(pattern2);\n\n\t\tfor (size_t i = 0; i < reaction_spe.size(); i++)\n\t\t{\n\t\t\tstd::vector rxn_s_idx_str;\n\n\t\t\tboost::smatch result2;\n\t\t\tif (boost::regex_search(reaction_spe[i], result2, re2)) {\n\t\t\t\tfor (std::size_t mi = 1; mi < result2.size(); ++mi) {\n\t\t\t\t\tstring s_rxn_idx(result2[mi].first, result2[mi].second);\n\t\t\t\t\trxn_s_idx_str.push_back(s_rxn_idx);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tspe_vec.push_back(boost::lexical_cast(rxn_s_idx_str[0]));\n\n\t\t\tif (rxn_s_idx_str[1] != std::string(\"\"))\n\t\t\t\treaction_vec.push_back(boost::lexical_cast(rxn_s_idx_str[1]));\n\t\t\telse {\n\t\t\t\t//not the last species\n\t\t\t\tif (i != reaction_spe.size() - 1)\n\t\t\t\t\treaction_vec.push_back(INT_MAX);\n\t\t\t}\n\n\n\t\t}\n\n\t\treturn true;\n\t}\n\n\tint reactionNetwork_sr::superReactionNetwork::get_number_of_elements() const\n\t{\n\t\treturn this->element_v.size();\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::set_reaction_rate(vertex_t i, double reaction_rate)\n\t{\n\t\tthis->reaction_network_v[i].reaction_rate = reaction_rate;\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::set_is_reaction_rate_nonzero_from_setting_file()\n\t{\n\t\tfor (auto key : this->rnk_pt.get_child(\"pathway.non_zero_rate_reaction\")) {\n\t\t\tthis->reaction_network_v[key.second.get_value()].is_reaction_rate_nonzero = true;\n\t\t}\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::set_is_reaction_rate_nonzero_from_previous_iteration()\n\t{\n\t\t//time, half of pathway end time\n\t\tdouble time = 0.5 * this->rnk_pt.get(\"time.tau\");\n\t\tthis->update_reaction_rate(time);\n\t\tfor (std::size_t i = 0; i < this->reaction_network_v.size(); ++i) {\n\t\t\tif (this->reaction_network_v[i].reaction_rate > 0)\n\t\t\t\tthis->reaction_network_v[i].is_reaction_rate_nonzero = true;\n\t\t\telse\n\t\t\t\tthis->reaction_network_v[i].is_reaction_rate_nonzero = false;\n\t\t}\n\t}\n\n\tdouble superReactionNetwork::prob_spe_will_react_in_a_time_range(double init_time, double end_time, size_t curr_spe)\n\t{\n\t\t//pathway constraint case and current species is on the list\n\t\t//in this case, we are making a assumption, that current species must react\n\t\tif (this->apply_pathway_constraint == true && this->sp_pathway_constarint_rnk->must_react_species_set.count(curr_spe) > 0) {\n\t\t\treturn 1.0;\n\t\t}\n\n\t\t// ############################################################################\n\t\t// don't apply_pathway_constraint case or current speices don't have to react\n\t\t// ############################################################################\n\n\t\t////set pathway end time\n\t\t//set_pathway_end_time(pathway_end_time);\n\t\tset_spe_prob_max_at_a_time(init_time, end_time, curr_spe);\n\t\treturn species_network_v[curr_spe].prob_max;\n\n\t}\n\n\tdouble superReactionNetwork::spe_out_by_a_reaction_branching_ratio(rsp::index_int_t curr_spe, rsp::index_int_t next_reaction)\n\t{\n\n\t\t//pathway constraint case and current species is on the list\n\t\tif (this->apply_pathway_constraint == true && this->sp_pathway_constarint_rnk->species_sink_reaction_set_map.count(curr_spe) > 0) {\n\t\t\tif (this->sp_pathway_constarint_rnk->species_sink_through_reaction_constraint == false)\n\t\t\t\treturn 0.0;\n\n\t\t\t//current reaction not on the list\n\t\t\tif (this->sp_pathway_constarint_rnk->species_sink_reaction_set_map.at(curr_spe).count(next_reaction) == 0)\n\t\t\t\treturn 0.0;\n\n\t\t\t// calculate this actually\n\t\t\tdouble prob_total = 0.0, prob_target_reaction = 0.0;\n\t\t\tfor (std::size_t i = 0; i < this->species_network_v[curr_spe].reaction_k_index_s_coef_v.size(); ++i) {//for\n\t\t\t\tauto r_idx = this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].first;\n\t\t\t\t//found next reaction\n\t\t\t\tif (r_idx == next_reaction) {\n\t\t\t\t\tprob_target_reaction = this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].second* //s_coef_product\n\t\t\t\t\t\tthis->reaction_network_v[r_idx].reaction_rate; //reaction rate\n\t\t\t\t\tprob_total += prob_target_reaction;\n\t\t\t\t}\n\t\t\t\t//not found next reaction\n\t\t\t\telse {\n\t\t\t\t\t// not a candidate reaction\n\t\t\t\t\tif (this->sp_pathway_constarint_rnk->species_sink_reaction_set_map.at(curr_spe).count(r_idx) == 0)\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\tprob_total += this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].second* //s_coef_product\n\t\t\t\t\t\tthis->reaction_network_v[r_idx].reaction_rate; //reaction rate\n\t\t\t\t}\n\t\t\t}//for\n\n\t\t\tdouble reaction_branching_ratio;\n\t\t\t//it prob_total ==0.0, it must because I set it to be zero artificially\n\t\t\t//it depends\n\t\t\tif (prob_total == 0.0) {\n\t\t\t\treaction_branching_ratio = 1.0;\n\t\t\t}\n\t\t\telse {\n\t\t\t\treaction_branching_ratio = prob_target_reaction / prob_total;\n\t\t\t}\n\n\t\t\treturn reaction_branching_ratio;\n\n\t\t} // apply_pathway_constraint and current species is on the list\n\n\n\t\t// ############################################################################\n\t\t// don't apply_pathway_constraint case or current speices is not on the list\n\t\t// ############################################################################\n\n\t\t//probability\n\t\tdouble prob_total = 0.0, prob_target_reaction = 0.0;\n\t\tfor (std::size_t i = 0; i < this->species_network_v[curr_spe].reaction_k_index_s_coef_v.size(); ++i) {//for\n\t\t\tauto r_idx = this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].first;\n\t\t\t//found next reaction\n\t\t\tif (r_idx == next_reaction) {\n\t\t\t\tprob_target_reaction = this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].second* //s_coef_product\n\t\t\t\t\tthis->reaction_network_v[r_idx].reaction_rate; //reaction rate\n\t\t\t\tprob_total += prob_target_reaction;\n\t\t\t}\n\t\t\t//not found next reaction\n\t\t\telse {\n\t\t\t\tprob_total += this->species_network_v[curr_spe].reaction_k_index_s_coef_v[i].second* //s_coef_product\n\t\t\t\t\tthis->reaction_network_v[r_idx].reaction_rate; //reaction rate\n\t\t\t}\n\t\t}//for\n\n\t\tdouble reaction_branching_ratio;\n\t\t//it prob_total ==0.0, it must because I set it to be zero artificially\n\t\t//it depends\n\t\tif (prob_total == 0.0) {\n\t\t\treaction_branching_ratio = 1.0;\n\t\t}\n\t\telse {\n\t\t\treaction_branching_ratio = prob_target_reaction / prob_total;\n\t\t}\n\n\t\treturn reaction_branching_ratio;\n\t}\n\n\tstd::pair superReactionNetwork::reaction_spe_branching_ratio_separately(double reaction_time, rsp::index_int_t curr_spe, rsp::index_int_t next_reaction, rsp::index_int_t next_spe, std::string atom_followed, bool update_reaction_rate)\n\t{\n\t\t//update rate in the reaction network\n\t\tif (update_reaction_rate == true)\n\t\t\tthis->update_reaction_rate(reaction_time, curr_spe);\n\n\t\tdouble reaction_branching_ratio = spe_out_by_a_reaction_branching_ratio(curr_spe, next_reaction);\n\n\t\tdouble spe_branching_ratio = 0.0;\n\t\t//next species found\n\t\tif (this->reaction_network_v[next_reaction].out_spe_index_branching_ratio_map_map_with_constraint[atom_followed].count(next_spe) > 0)\n\t\t\tspe_branching_ratio = this->reaction_network_v[next_reaction].out_spe_index_branching_ratio_map_map_with_constraint[atom_followed].at(next_spe);\n\n\t\treturn std::make_pair(reaction_branching_ratio, spe_branching_ratio);\n\t}\n\n\tdouble reactionNetwork_sr::superReactionNetwork::reaction_spe_branching_ratio(double reaction_time, rsp::index_int_t curr_spe, rsp::index_int_t next_reaction, rsp::index_int_t next_spe, std::string atom_followed, bool update_reaction_rate)\n\t{\n\t\t//reaction branching ratio\n\t\tdouble rbr = 0.0;\n\t\t//species branching ratio\n\t\tdouble sbr = 0.0;\n\t\tstd::tie(rbr, sbr) = reaction_spe_branching_ratio_separately(reaction_time, curr_spe, next_reaction, next_spe, atom_followed, update_reaction_rate);\n\n\t\treturn rbr * sbr;\n\t}\n\n\tdouble reactionNetwork_sr::superReactionNetwork::spe_spe_branching_ratio(const std::vector& rxn_c1_c2_vec,\n\t\tdouble reaction_time, rsp::index_int_t curr_spe, rsp::index_int_t next_spe, std::string atom_followed, bool update_reaction_rate)\n\t{\n\t\tdouble ratio_tmp = 0.0;\n\t\tfor (auto rxn_c1_c2 : rxn_c1_c2_vec) {\n\t\t\tauto reaction_index = rxn_c1_c2.r_idx;\n\t\t\t//whether to update reaction rates, is deferred to sub-routine to decice\n\t\t\tratio_tmp += reaction_spe_branching_ratio(reaction_time, curr_spe, reaction_index, next_spe, atom_followed, update_reaction_rate);\n\t\t}\n\t\treturn ratio_tmp;\n\t}\n\n\twhen_where_t superReactionNetwork::chattering_group_move_one_step(int chattering_group_id, double time, std::string & curr_pathway, std::string atom_followed)\n\t{\n\t\t//totally condense chattering, for A<=>B, make new species Z, adding up all internal possiblities\n\t\twhen_where_t when_where;\n\n\t\t//actually move two steps, (1) from one chattering species to another chattering species\n\t\t//(2) from chattering species to the outside\n\t\t/*step 1*/\n\t\tauto next_vertex1 = this->inside_chattering_group_random_pick_next_spe(chattering_group_id, time);\n\n\t\t//if this->condense_chatterings == true \n\t\t//Just don't record chattering group as a reaction, don't record the internal species\n\t\tif (this->condense_chatterings == false) {\n\t\t\tcurr_pathway += \"R\";\n\t\t\t//negative reaction index represent chattering group number\n\t\t\t//since there is no -1 * 0, which means, to the first chattering_group_id 0, negative 0 is still 0,\n\t\t\t//negative sign will not show on pathway string, here we make it to be -1*(chattering_group_id+1)\n\t\t\tcurr_pathway += boost::lexical_cast(-1 * (chattering_group_id + rsp::INDICATOR));\n\n\t\t\tcurr_pathway += \"S\";\n\t\t\tcurr_pathway += boost::lexical_cast(next_vertex1);\n\t\t\t/*step 1*/\n\t\t}\n\n\t\t/*step 2*/\n\t\t//update rate in the reaction network\n\t\tupdate_reaction_rate(time, next_vertex1);\n\n\t\trsp::index_int_t next_reaction_index2 = spe_random_pick_next_reaction(next_vertex1);\n\t\t//random pick next spe\n\t\tvertex_t next_vertex2 = reaction_random_pick_next_spe(next_reaction_index2, atom_followed);\n\n\t\tcurr_pathway += \"R\";\n\t\tcurr_pathway += boost::lexical_cast(next_reaction_index2);\n\n\t\tcurr_pathway += \"S\";\n\t\tcurr_pathway += boost::lexical_cast(next_vertex2);\n\n\t\twhen_where.first = time;\n\t\twhen_where.second = next_vertex2;\n\t\t/*step 2*/\n\n\t\treturn when_where;\n\t}\n\n\twhen_where_t superReactionNetwork::pathway_move_one_step(double time, vertex_t curr_spe, std::string & curr_pathway, std::string atom_followed)\n\t{\n\t\t//Monte-Carlo simulation\n\t\t//generate the random number u_1 between 0 and 1.0\n\t\tdouble u_1 = 0.0;\n\t\tdo {\n\t\t\tu_1 = rand->random01();\n\t\t} while (u_1 == 1.0);\n\t\twhen_where_t when_where(0.0, curr_spe);\n\n\n\t\tint chattering_group_id = this->species_network_v[curr_spe].chattering_group_id;\n\t\t//none chattering case\n\t\tif (chattering_group_id == -1) {\n\t\t\ttime = reaction_time_from_importance_sampling_without_cutoff(time, curr_spe, u_1);\n\t\t\tif (time > this->absolute_end_t) {\n\t\t\t\t//if curr_vertex is a terminal species, should return here\n\t\t\t\twhen_where.first = time;\n\t\t\t\treturn when_where;\n\t\t\t}\n\n\t\t\t//update rate in the reaction network\n\t\t\tupdate_reaction_rate(time, curr_spe);\n\t\t\trsp::index_int_t next_reaction_index = spe_random_pick_next_reaction(curr_spe);\n\t\t\t//random pick next spe\n\t\t\tvertex_t next_vertex = reaction_random_pick_next_spe(next_reaction_index, atom_followed);\n\n\t\t\tcurr_pathway += \"R\";\n\t\t\tcurr_pathway += boost::lexical_cast(next_reaction_index);\n\n\t\t\tcurr_pathway += \"S\";\n\t\t\tcurr_pathway += boost::lexical_cast(next_vertex);\n\n\t\t\twhen_where.first = time;\n\t\t\twhen_where.second = next_vertex;\n\n\t\t\treturn when_where;\n\t\t}\n\t\t//chattering case\n\t\t//if it is chattering, and it is the first time reach chattering group, \"move one step\"\n\t\t//is actually move two steps, add reaction \"G_{group index}\"\n\t\telse {\n\t\t\t//calculate time from total drc of chattering species\n\t\t\ttime = chattering_group_reaction_time_from_importance_sampling_without_cutoff(time, chattering_group_id, u_1);\n\n\t\t\t//time out of range, stop and return\n\t\t\tif (time > this->absolute_end_t) {\n\t\t\t\twhen_where.first = time;\n\t\t\t\treturn when_where;\n\t\t\t}\n\n\t\t\twhen_where = this->chattering_group_move_one_step(chattering_group_id, time, curr_pathway, atom_followed);\n\n\t\t\treturn when_where;\n\t\t}\n\n\n\t}\n\n\tstd::string superReactionNetwork::pathway_sim_once(double init_time, double end_time, vertex_t init_spe, std::string atom_followed)\n\t{\n\t\t//set the pathway end time\n\t\tset_absolute_end_t(end_time);\n\t\tstd::string curr_pathway;\n\t\twhen_where_t when_where(init_time, init_spe);\n\n\t\t//initial species\n\t\tcurr_pathway += \"S\";\n\t\tcurr_pathway += boost::lexical_cast(init_spe);\n\n\t\twhile (when_where.first < absolute_end_t) {\n\t\t\twhen_where = pathway_move_one_step(when_where.first, when_where.second, curr_pathway, atom_followed);\n\t\t}\n\n\t\treturn curr_pathway;\n\t}\n\n\twhen_where_t superReactionNetwork::species_chattering_group_move_one_step(int chattering_group_id, double time, std::string & curr_pathway, std::string atom_followed)\n\t{\n\t\t//totally condense chattering, for A<=>B, make new species Z, adding up all internal possiblities\n\t\twhen_where_t when_where;\n\n\t\t//actually move two steps, (1) from one chattering species to another chattering species\n\t\t//(2) from chattering species to the outside\n\t\t/*step 1*/\n\t\tauto next_vertex1 = this->inside_chattering_group_random_pick_next_spe(chattering_group_id, time);\n\n\t\t//if this->condense_chatterings == true \n\t\t//Just don't record chattering group as a reaction, don't record the internal species\n\t\tif (this->condense_chatterings == false) {\n\t\t\tcurr_pathway += \"R\";\n\t\t\t//negative reaction index represent chattering group number\n\t\t\t//since there is no -1 * 0, which means, to the first chattering_group_id 0, negative 0 is still 0,\n\t\t\t//negative sign will not show on pathway string, here we make it to be -1*(chattering_group_id+1)\n\t\t\tcurr_pathway += boost::lexical_cast(-1 * (chattering_group_id + rsp::INDICATOR));\n\n\t\t\tcurr_pathway += \"S\";\n\t\t\tcurr_pathway += boost::lexical_cast(next_vertex1);\n\t\t\t/*step 1*/\n\t\t}\n\n\t\t/*step 2*/\n\t\t//update rate in the reaction network\n\t\tupdate_reaction_rate(time, next_vertex1);\n\n\t\t//random pick next spe\n\t\tvertex_t next_vertex2 = spe_random_pick_next_spe(next_vertex1, atom_followed);\n\n\t\tcurr_pathway += \"S\";\n\t\tcurr_pathway += boost::lexical_cast(next_vertex2);\n\n\t\twhen_where.first = time;\n\t\twhen_where.second = next_vertex2;\n\t\t/*step 2*/\n\n\t\treturn when_where;\n\t}\n\n\twhen_where_t reactionNetwork_sr::superReactionNetwork::species_pathway_move_one_step(double time, vertex_t curr_spe, std::string & curr_pathway, std::string atom_followed)\n\t{\n\t\t//Monte-Carlo simulation\n\t\t//generate the random number u_1 between 0 and 1.0\n\t\tdouble u_1 = 0.0;\n\t\tdo {\n\t\t\tu_1 = rand->random01();\n\t\t} while (u_1 == 1.0);\n\t\twhen_where_t when_where(0.0, curr_spe);\n\n\n\t\tint chattering_group_id = this->species_network_v[curr_spe].chattering_group_id;\n\t\t//none chattering case\n\t\tif (chattering_group_id == -1) {\n\t\t\ttime = reaction_time_from_importance_sampling_without_cutoff(time, curr_spe, u_1);\n\t\t\tif (time > this->absolute_end_t) {\n\t\t\t\t//if curr_vertex is a terminal species, should return here\n\t\t\t\twhen_where.first = time;\n\t\t\t\treturn when_where;\n\t\t\t}\n\n\t\t\t//update rate in the reaction network\n\t\t\tupdate_reaction_rate(time, curr_spe);\n\t\t\t//random pick next spe\n\t\t\tvertex_t next_vertex = spe_random_pick_next_spe(curr_spe, atom_followed);\n\n\t\t\tcurr_pathway += \"S\";\n\t\t\tcurr_pathway += boost::lexical_cast(next_vertex);\n\n\t\t\twhen_where.first = time;\n\t\t\twhen_where.second = next_vertex;\n\n\t\t\treturn when_where;\n\t\t}\n\t\t//chattering case\n\t\t//if it is chattering, and it is the first time reach chattering group, \"move one step\"\n\t\t//is actually move two steps, add reaction \"G_{group index}\"\n\t\telse {\n\t\t\t//calculate time from total drc of chattering species\n\t\t\ttime = chattering_group_reaction_time_from_importance_sampling_without_cutoff(time, chattering_group_id, u_1);\n\n\t\t\t//time out of range, stop and return\n\t\t\tif (time > this->absolute_end_t) {\n\t\t\t\twhen_where.first = time;\n\t\t\t\treturn when_where;\n\t\t\t}\n\n\t\t\twhen_where = this->species_chattering_group_move_one_step(chattering_group_id, time, curr_pathway, atom_followed);\n\n\t\t\treturn when_where;\n\t\t}\n\n\t}\n\n\tstd::string reactionNetwork_sr::superReactionNetwork::species_pathway_sim_once(double init_time, double end_time, vertex_t init_spe, std::string atom_followed)\n\t{\n\t\t//set the pathway end time\n\t\tset_absolute_end_t(end_time);\n\t\tstd::string curr_pathway;\n\t\twhen_where_t when_where(init_time, init_spe);\n\n\t\t//initial species\n\t\tcurr_pathway += \"S\";\n\t\tcurr_pathway += boost::lexical_cast(init_spe);\n\n\t\twhile (when_where.first < absolute_end_t) {\n\t\t\twhen_where = species_pathway_move_one_step(when_where.first, when_where.second, curr_pathway, atom_followed);\n\t\t}\n\n\t\treturn curr_pathway;\n\t}\n\n\tstd::pair superReactionNetwork::pathway_prob_sim_move_one_step(double &when_time, vertex_t curr_spe, rsp::index_int_t next_reaction, vertex_t next_spe, std::string atom_followed)\n\t{\n\t\tif (when_time >= (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\t//return std::make_pair(1.0, 1.0);\n\t\t\treturn std::make_pair(0.0, 0.0);\n\t\t}\n\n\t\tthis->set_spe_prob_max_at_a_time(when_time, absolute_end_t, curr_spe);\n\n\t\tdouble u_1;\n\t\tif (species_network_v[curr_spe].prob_max > 0.0) {\n\t\t\tu_1 = rand->random_min_max(0, species_network_v[curr_spe].prob_max);\n\t\t}\n\t\telse {\n\t\t\tu_1 = 0.0;\n\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t}\n\n\t\twhen_time = reaction_time_from_importance_sampling(when_time, curr_spe, u_1);\n\n\t\t//pathway_prob *= reaction_spe_branching_ratio(when_time, curr_spe, next_reaction, next_spe, atom_followed);\n\t\treturn reaction_spe_branching_ratio_separately(when_time, curr_spe, next_reaction, next_spe, atom_followed);\n\n\t}\n\n\tbool superReactionNetwork::chattering_group_pathway_prob_sim_move_one_step(int chattering_group_id, const std::vector &spe_vec, const std::vector &reaction_vec, std::size_t &i, double &when_time, const double end_time, double & pathway_prob, std::string atom_followed, bool spe_branching)\n\t{\n\t\t//add time delay first, regenerate random number, inverse to get exact time, get steady state time first\n\t\t//then calculate steady state ratios\n\t\tdouble chattering_group_prob = prob_chattering_group_will_react_in_a_time_range(when_time, end_time, chattering_group_id);\n\t\tpathway_prob *= chattering_group_prob;\n\n\t\t//avoid problems around boundary\n\t\tif (when_time < (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\tdouble u_1 = 1.0;\n\t\t\tif (chattering_group_prob > 0.0) {\n\t\t\t\tu_1 = rand->random_min_max(0, chattering_group_prob);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tu_1 = 0.0;\n\t\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t\t}\n\n\t\t\twhen_time = chattering_group_reaction_time_from_importance_sampling_without_cutoff(when_time, chattering_group_id, u_1);\n\n\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t/*step 1*/\n\t\t\t\t//based on drc at this time, calculate probability going out by that direction\n\t\t\t\tauto drc_prob_unnormalized = this->chattering_group_probability_vector(chattering_group_id, when_time);\n\t\t\t\tdouble drc_prob_sum = std::accumulate(drc_prob_unnormalized.begin(), drc_prob_unnormalized.end(), 0.0);\n\t\t\t\t//make sure there is at least one direction out, there is no, dead end, return 0.0 probability\n\t\t\t\tif (drc_prob_sum <= 0.0) {\n\t\t\t\t\tpathway_prob = 0.0;\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\n\t\t\t\tdouble chattering_group_total_prob = 0.0;\n\t\t\t\t//don't know the species leaving the chattering group, gonna search\n\t\t\t\tfor (auto s_i : this->sp_chattering_rnk->species_chattering_group_mat[chattering_group_id]) {\n\t\t\t\t\t//fot species s_i, search wether it has desired reaction as a sink reaction\n\t\t\t\t\tfor (auto r_coef : this->species_network_v[s_i].reaction_k_index_s_coef_v) {\n\t\t\t\t\t\tif (r_coef.first == reaction_vec[i]) {\n\t\t\t\t\t\t\tauto tmp = drc_prob_unnormalized[this->sp_chattering_rnk->spe_idx_2_chattering_group_id_idx[s_i].second] / drc_prob_sum;\n\t\t\t\t\t\t\tauto r_s_br = reaction_spe_branching_ratio_separately(when_time, s_i, reaction_vec[i], spe_vec[i + 1], atom_followed);\n\t\t\t\t\t\t\ttmp *= r_s_br.first;\n\t\t\t\t\t\t\tif (spe_branching == true)\n\t\t\t\t\t\t\t\ttmp *= r_s_br.second;\n\t\t\t\t\t\t\tchattering_group_total_prob += tmp;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tpathway_prob *= chattering_group_total_prob;\n\t\t\t\t//move one step in this case\n\t\t\t\ti += 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t/*step 1*/\n\t\t\t\t//based on drc at this time, calculate probability going out by that direction\n\t\t\t\tauto drc_prob_unnormalized = this->chattering_group_probability_vector(chattering_group_id, when_time);\n\t\t\t\tdouble drc_prob_sum = std::accumulate(drc_prob_unnormalized.begin(), drc_prob_unnormalized.end(), 0.0);\n\t\t\t\t//make sure there is at least one direction out, there is no, dead end, return 0.0 probability\n\t\t\t\tif (drc_prob_sum <= 0.0)\n\t\t\t\t\treturn false;\n\t\t\t\t//notice out species is spe_vec[i + 1], next_species1\n\t\t\t\tpathway_prob *= drc_prob_unnormalized[this->sp_chattering_rnk->spe_idx_2_chattering_group_id_idx[spe_vec[i + 1]].second] / drc_prob_sum;\n\t\t\t\t/*step 1*/\n\t\t\t\t/*step 2*/\n\t\t\t\tauto r_s_br = reaction_spe_branching_ratio_separately(when_time, spe_vec[i + 1], reaction_vec[i + 1], spe_vec[i + 2], atom_followed);\n\t\t\t\tpathway_prob *= r_s_br.first;\n\t\t\t\tif (spe_branching == true)\n\t\t\t\t\tpathway_prob *= r_s_br.second;\n\t\t\t\t/*step 2*/\n\t\t\t\t//move two steps actually\n\t\t\t\ti += 2;\n\t\t\t}\n\n\t\t\treturn true;\n\t\t}//boundary time problem\n\t\telse {\n\t\t\t// gotta to change i\n\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t//move one step actually\n\t\t\t\ti += 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t//move two steps instead\n\t\t\t\ti += 2;\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\t}\n\n\tdouble superReactionNetwork::pathway_prob_input_pathway_sim_once(double const init_time, const double end_time, const std::vector &spe_vec, const std::vector &reaction_vec, std::string atom_followed, bool spe_branching, bool terminal_sp)\n\t{\n\t\t//set pathway end time\n\t\tset_absolute_end_t(end_time);\n\n\t\t//basically, we assume there must be a reaction at the beginning, so should multiply be the 1-P_min(tau=0|t;S^{0})\n\t\tdouble pathway_prob = 1.0;\n\t\t//save one step reaction branching ratio and species branching ratio\n\t\tdouble r_br = 1.0, s_br = 1.0;\n\t\tdouble when_time = init_time;\n\n\t\t//start from the first reaction\n\t\tfor (size_t i = 0; i < spe_vec.size() - 1;)\n\t\t{\n\t\t\tint chattering_group_id = this->species_network_v[spe_vec[i]].chattering_group_id;\n\n\t\t\t//none-chattering reaction\n\t\t\tif (chattering_group_id == -1) {\n\t\t\t\tpathway_prob *= prob_spe_will_react_in_a_time_range(when_time, end_time, spe_vec[i]);\n\t\t\t\tstd::tie(r_br, s_br) = pathway_prob_sim_move_one_step(when_time, spe_vec[i], reaction_vec[i], spe_vec[i + 1], atom_followed);\n\n\t\t\t\tpathway_prob *= r_br;\n\n\t\t\t\tif (spe_branching == true)\n\t\t\t\t\tpathway_prob *= s_br;\n\n\t\t\t\t//move one step\n\t\t\t\t++i;\n\t\t\t}\n\t\t\t//chattering reaction, chattering case\n\t\t\telse {\n\t\t\t\tauto good_chattering_prob = this->chattering_group_pathway_prob_sim_move_one_step(chattering_group_id, spe_vec, reaction_vec, i, when_time, end_time, pathway_prob, atom_followed, spe_branching);\n\t\t\t\tif (!good_chattering_prob)\n\t\t\t\t\treturn 0.0;\n\t\t\t}//if chattering case\n\n\t\t}\n\n\t\t//got to multiply by P_min or says (1-P_max)\n\t\tset_spe_prob_max_at_a_time(when_time, end_time, spe_vec.back());\n\n\t\tif (terminal_sp == true)\n\t\t\tpathway_prob *= (1 - species_network_v[spe_vec.back()].prob_max);\n\n\t\treturn pathway_prob;\n\t}\n\n\n\tdouble reactionNetwork_sr::superReactionNetwork::species_pathway_prob_sim_move_one_step(double &when_time, vertex_t curr_spe, vertex_t next_spe, std::string atom_followed)\n\t{\n\t\tif (when_time >= (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\t//return 1.0;\n\t\t\treturn 0.0;\n\t\t}\n\n\t\tthis->set_spe_prob_max_at_a_time(when_time, absolute_end_t, curr_spe);\n\n\t\tdouble u_1;\n\t\tif (species_network_v[curr_spe].prob_max > 0.0) {\n\t\t\tu_1 = rand->random_min_max(0, species_network_v[curr_spe].prob_max);\n\t\t}\n\t\telse {\n\t\t\tu_1 = 0.0;\n\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t}\n\n\t\twhen_time = reaction_time_from_importance_sampling(when_time, curr_spe, u_1);\n\n\t\t//pathway_prob *= spe_spe_branching_ratio(this->sp_all_species_group_rnk->out_species_rxns.at(curr_spe).at(next_spe),\n\t\t//\twhen_time, curr_spe, next_spe, atom_followed, true);\n\n\t\treturn spe_spe_branching_ratio(this->sp_all_species_group_rnk->out_species_rxns.at(curr_spe).at(next_spe),\n\t\t\twhen_time, curr_spe, next_spe, atom_followed, true);\n\t}\n\n\tbool superReactionNetwork::species_chattering_group_pathway_prob_sim_move_one_step(int chattering_group_id, const std::vector& spe_vec, std::size_t & i, double & when_time, const double end_time, double & pathway_prob, std::string atom_followed)\n\t{\n\t\t//add time delay first, regenerate random number, inverse to get exact time, get steady state time first\n\t\t//then calculate steady state ratios\n\t\tdouble chattering_group_prob = prob_chattering_group_will_react_in_a_time_range(when_time, end_time, chattering_group_id);\n\t\tpathway_prob *= chattering_group_prob;\n\n\t\t//avoid problems around boundary\n\t\tif (when_time < (end_time - INFINITESIMAL_DT)) {\n\t\t\tdouble u_1 = 1.0;\n\t\t\tif (chattering_group_prob > 0.0) {\n\t\t\t\tu_1 = rand->random_min_max(0, chattering_group_prob);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tu_1 = 0.0;\n\t\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t\t}\n\n\t\t\twhen_time = chattering_group_reaction_time_from_importance_sampling_without_cutoff(when_time, chattering_group_id, u_1);\n\n\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t/*step 1*/\n\t\t\t\t//based on drc at this time, calculate probability going out by that direction\n\t\t\t\tauto drc_prob_unnormalized = this->chattering_group_probability_vector(chattering_group_id, when_time);\n\t\t\t\tdouble drc_prob_sum = std::accumulate(drc_prob_unnormalized.begin(), drc_prob_unnormalized.end(), 0.0);\n\t\t\t\t//make sure there is at least one direction out, there is no, dead end, return 0.0 probability\n\t\t\t\tif (drc_prob_sum <= 0.0) {\n\t\t\t\t\tpathway_prob = 0.0;\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\n\n\t\t\t\tdouble chattering_group_total_prob = 0.0;\n\t\t\t\t//don't know the species leaving the chattering group, gonna search\n\t\t\t\tfor (auto s_i : this->sp_chattering_rnk->species_chattering_group_mat[chattering_group_id]) {\n\t\t\t\t\t//fot species s_i, search wether it has desired species as a sink species\n\t\t\t\t\tif (this->sp_all_species_group_rnk->out_species_rxns.at(s_i).count(spe_vec[i + 1]) >= 1) {\n\t\t\t\t\t\tauto tmp = drc_prob_unnormalized[this->sp_chattering_rnk->spe_idx_2_chattering_group_id_idx[s_i].second] / drc_prob_sum;\n\t\t\t\t\t\ttmp *= spe_spe_branching_ratio(this->sp_all_species_group_rnk->out_species_rxns.at(s_i).at(spe_vec[i + 1]),\n\t\t\t\t\t\t\twhen_time, s_i, spe_vec[i + 1], atom_followed, true);\n\t\t\t\t\t\tchattering_group_total_prob += tmp;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tpathway_prob *= chattering_group_total_prob;\n\t\t\t\t//move one step in this case\n\t\t\t\ti += 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t/*step 1*/\n\t\t\t\t//based on drc at this time, calculate probability going out by that direction\n\t\t\t\tauto drc_prob_unnormalized = this->chattering_group_probability_vector(chattering_group_id, when_time);\n\t\t\t\tdouble drc_prob_sum = std::accumulate(drc_prob_unnormalized.begin(), drc_prob_unnormalized.end(), 0.0);\n\t\t\t\t//make sure there is at least one direction out, there is no, dead end, return 0.0 probability\n\t\t\t\tif (drc_prob_sum <= 0.0)\n\t\t\t\t\treturn false;\n\t\t\t\t//notice out species is spe_vec[i + 1], next_species1\n\t\t\t\tpathway_prob *= drc_prob_unnormalized[this->sp_chattering_rnk->spe_idx_2_chattering_group_id_idx[spe_vec[i + 1]].second] / drc_prob_sum;\n\t\t\t\t/*step 1*/\n\n\t\t\t\t/*step 2*/\n\t\t\t\tpathway_prob *= spe_spe_branching_ratio(this->sp_all_species_group_rnk->out_species_rxns.at(spe_vec[i + 1]).at(spe_vec[i + 2]),\n\t\t\t\t\twhen_time, spe_vec[i + 1], spe_vec[i + 2], atom_followed, true);\n\t\t\t\t/*step 2*/\n\t\t\t\t//move two steps actually\n\t\t\t\ti += 2;\n\t\t\t}\n\n\t\t\treturn true;\n\t\t}//boundary time problem\n\t\telse {\n\t\t\t// gotta to change i\n\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t//move one step actually\n\t\t\t\ti += 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\t//move two steps instead\n\t\t\t\ti += 2;\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\t}\n\n\tdouble reactionNetwork_sr::superReactionNetwork::species_pathway_prob_input_pathway_sim_once(const double init_time, const double end_time, const std::vector& spe_vec, const std::vector& reaction_vec, std::string atom_followed)\n\t{\n\t\t//set pathway end time\n\t\tset_absolute_end_t(end_time);\n\n\t\t//basically, we assume there must be a reaction at the beginning, so should multiply be the 1-P_min(tau=0|t;S^{0})\n\t\tdouble pathway_prob = 1.0;\n\t\tdouble when_time = init_time;\n\n\t\t//start from the first reaction\n\t\tfor (size_t i = 0; i < spe_vec.size() - 1;)\n\t\t{\n\t\t\tint chattering_group_id = this->species_network_v[spe_vec[i]].chattering_group_id;\n\n\t\t\t//none-chattering reaction\n\t\t\tif (chattering_group_id == -1) {\n\t\t\t\tpathway_prob *= prob_spe_will_react_in_a_time_range(when_time, end_time, spe_vec[i]);\n\t\t\t\tpathway_prob *= species_pathway_prob_sim_move_one_step(when_time, spe_vec[i], spe_vec[i + 1], atom_followed);\n\t\t\t\t//move one step\n\t\t\t\t++i;\n\t\t\t}\n\t\t\t//chattering reaction, chattering case\n\t\t\telse {\n\t\t\t\tauto good_chattering_group_prob = this->species_chattering_group_pathway_prob_sim_move_one_step(chattering_group_id, spe_vec, i, when_time, end_time, pathway_prob, atom_followed);\n\t\t\t\tif (!good_chattering_group_prob)\n\t\t\t\t\treturn 0.0;\n\t\t\t}//if chattering case\n\n\t\t}\n\n\t\t//got to multiply by P_min or says (1-P_max)\n\t\tset_spe_prob_max_at_a_time(when_time, end_time, spe_vec.back());\n\n\t\tpathway_prob *= (1 - species_network_v[spe_vec.back()].prob_max);\n\n\t\treturn pathway_prob;\n\t}\n\n\n\tdouble superReactionNetwork::pathway_AT_sim_move_one_step(double when_time, vertex_t curr_spe)\n\t{\n\t\tif (when_time >= (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\treturn when_time;\n\t\t}\n\n\t\tthis->set_spe_prob_max_at_a_time(when_time, absolute_end_t, curr_spe);\n\n\t\tdouble u_1;\n\t\tif (species_network_v[curr_spe].prob_max > 0.0) {\n\t\t\tu_1 = rand->random_min_max(0, species_network_v[curr_spe].prob_max);\n\t\t}\n\t\telse {\n\t\t\tu_1 = 0.0;\n\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t}\n\n\t\twhen_time = reaction_time_from_importance_sampling(when_time, curr_spe, u_1);\n\n\t\treturn when_time;\n\t}\n\n\tdouble superReactionNetwork::pathway_AT_input_pathway_sim_once(const double init_time, const double end_time, const std::vector& spe_vec, const std::vector& reaction_vec)\n\t{\n\t\t//set pathway end time\n\t\tset_absolute_end_t(end_time);\n\n\t\t//basically, we assume there must be a reaction at the beginning, so should multiply be the 1-P_min(tau=0|t;S^{0})\n\t\tdouble when_time = init_time;\n\n\t\t//start from the first reaction\n\t\tfor (size_t i = 0; i < reaction_vec.size();)\n\t\t{\n\t\t\t//none-chattering reaction\n\t\t\tif (reaction_vec[i] >= 0) {\n\t\t\t\twhen_time = pathway_AT_sim_move_one_step(when_time, spe_vec[i]);\n\t\t\t\t//move one step\n\t\t\t\t++i;\n\t\t\t}\n\t\t\t//chattering reaction, chattering case\n\t\t\telse {\n\t\t\t\tint chattering_group_id = this->species_network_v[spe_vec[i]].chattering_group_id;\n\n\t\t\t\t//add time delay first, regenerate random number, inverse to get exact time, get steady state time first\n\t\t\t\t//then calculate steady state ratios\n\t\t\t\tdouble chattering_group_prob = prob_chattering_group_will_react_in_a_time_range(when_time, end_time, chattering_group_id);\n\n\t\t\t\t//avoid problems around boundary\n\t\t\t\tif (when_time < (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\t\t\tdouble u_1 = 1.0;\n\t\t\t\t\tif (chattering_group_prob > 0.0) {\n\t\t\t\t\t\tu_1 = rand->random_min_max(0, chattering_group_prob);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tu_1 = 0.0;\n\t\t\t\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t\t\t\t}\n\n\t\t\t\t\twhen_time = chattering_group_reaction_time_from_importance_sampling_without_cutoff(when_time, chattering_group_id, u_1);\n\t\t\t\t}//boundary time problem\n\n\t\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t\t//move one step actually\n\t\t\t\t\ti += 1;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t//move two steps instead\n\t\t\t\t\ti += 2;\n\t\t\t\t}\n\n\t\t\t}//if chattering case\n\t\t}\n\n\t\treturn when_time;\n\t}\n\n\tdouble superReactionNetwork::pathway_AT_no_IT_input_pathway_sim_once(const double init_time, const double end_time, const std::vector& spe_vec, const std::vector& reaction_vec)\n\t{\n\t\t//set pathway end time\n\t\tset_absolute_end_t(end_time);\n\n\t\t//basically, we assume there must be a reaction at the beginning, so should multiply be the 1-P_min(tau=0|t;S^{0})\n\t\tdouble when_time = init_time;\n\n\t\tbool is_IT = false;\n\t\tdouble IT = 0.0;\n\n\t\t//start from the first reaction\n\t\tfor (size_t i = 0; i < reaction_vec.size();)\n\t\t{\n\t\t\t//none-chattering reaction\n\t\t\tif (reaction_vec[i] >= 0) {\n\t\t\t\twhen_time = pathway_AT_sim_move_one_step(when_time, spe_vec[i]);\n\n\t\t\t\tif (is_IT == false) {\n\t\t\t\t\tis_IT = true;\n\t\t\t\t\tIT = when_time;\n\t\t\t\t}\n\n\t\t\t\t//move one step\n\t\t\t\t++i;\n\t\t\t}\n\t\t\t//chattering reaction, chattering case\n\t\t\telse {\n\t\t\t\tint chattering_group_id = this->species_network_v[spe_vec[i]].chattering_group_id;\n\n\t\t\t\t//add time delay first, regenerate random number, inverse to get exact time, get steady state time first\n\t\t\t\t//then calculate steady state ratios\n\t\t\t\tdouble chattering_group_prob = prob_chattering_group_will_react_in_a_time_range(when_time, end_time, chattering_group_id);\n\n\t\t\t\t//avoid problems around boundary\n\t\t\t\tif (when_time < (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\t\t\tdouble u_1 = 1.0;\n\t\t\t\t\tif (chattering_group_prob > 0.0) {\n\t\t\t\t\t\tu_1 = rand->random_min_max(0, chattering_group_prob);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tu_1 = 0.0;\n\t\t\t\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t\t\t\t}\n\n\n\t\t\t\t\twhen_time = chattering_group_reaction_time_from_importance_sampling_without_cutoff(when_time, chattering_group_id, u_1);\n\n\t\t\t\t\tif (is_IT == false) {\n\t\t\t\t\t\tis_IT = true;\n\t\t\t\t\t\tIT = when_time;\n\t\t\t\t\t}\n\n\t\t\t\t}//boundary time problem\n\n\t\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t\t//move one step actually\n\t\t\t\t\ti += 1;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t//move two steps instead\n\t\t\t\t\ti += 2;\n\t\t\t\t}\n\n\t\t\t}//if chattering case\n\n\t\t}\n\n\t\treturn when_time - IT;\n\t}\n\n\tstd::pair superReactionNetwork::pathway_AT_with_SP_input_pathway_sim_once(const double init_time, const double end_time, const std::vector& spe_vec, const std::vector& reaction_vec)\n\t{\n\t\t//set pathway end time\n\t\tset_absolute_end_t(end_time);\n\n\t\t//basically, we assume there must be a reaction at the beginning, so should multiply be the 1-P_min(tau=0|t;S^{0})\n\t\tdouble when_time = init_time;\n\n\t\t//start from the first reaction\n\t\tfor (size_t i = 0; i < reaction_vec.size();)\n\t\t{\n\t\t\t//none-chattering reaction\n\t\t\tif (reaction_vec[i] >= 0) {\n\t\t\t\twhen_time = pathway_AT_sim_move_one_step(when_time, spe_vec[i]);\n\t\t\t\t//move one step\n\t\t\t\t++i;\n\t\t\t}\n\t\t\t//chattering reaction, chattering case\n\t\t\telse {\n\t\t\t\tint chattering_group_id = this->species_network_v[spe_vec[i]].chattering_group_id;\n\n\t\t\t\t//add time delay first, regenerate random number, inverse to get exact time, get steady state time first\n\t\t\t\t//then calculate steady state ratios\n\t\t\t\tdouble chattering_group_prob = prob_chattering_group_will_react_in_a_time_range(when_time, end_time, chattering_group_id);\n\n\t\t\t\t//avoid problems around boundary\n\t\t\t\tif (when_time < (absolute_end_t - INFINITESIMAL_DT)) {\n\t\t\t\t\tdouble u_1 = 1.0;\n\t\t\t\t\tif (chattering_group_prob > 0.0) {\n\t\t\t\t\t\tu_1 = rand->random_min_max(0, chattering_group_prob);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tu_1 = 0.0;\n\t\t\t\t\t\t//u_1 = INFINITESIMAL_DT;\n\t\t\t\t\t}\n\n\t\t\t\t\twhen_time = chattering_group_reaction_time_from_importance_sampling_without_cutoff(when_time, chattering_group_id, u_1);\n\t\t\t\t}//boundary time problem\n\n\t\t\t\tif (this->condense_chatterings == true) {\n\t\t\t\t\t//move one step actually\n\t\t\t\t\ti += 1;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\t//move two steps instead\n\t\t\t\t\ti += 2;\n\t\t\t\t}\n\n\t\t\t}//if chattering case\n\t\t}\n\n\n\t\t//got to multiply by P_min or says (1-P_max)\n\t\tset_spe_prob_max_at_a_time(when_time, end_time, spe_vec.back());\n\n\t\treturn std::make_pair(when_time, 1 - species_network_v[spe_vec.back()].prob_max);\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::initiate_M_matrix(std::string atom_followed)\n\t{\n\t\t//resize and initialization\n\t\tthis->atom_M_matrix[atom_followed].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_M_matrix[atom_followed].size(); ++i)\n\t\t\tthis->atom_M_matrix[atom_followed][i].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_M_matrix[atom_followed].size(); ++i)\n\t\t\tfor (std::size_t j = 0; j < this->atom_M_matrix[atom_followed][i].size(); ++j)\n\t\t\t\tthis->atom_M_matrix[atom_followed][i][j] = 0;\n\n\t\t//actually build M-matrix\n\t\tfor (std::size_t i = 0; i < this->atom_M_matrix[atom_followed].size(); ++i) {\n\t\t\tfor (std::size_t j = 0; j < this->species_network_v[i].reaction_k_index_s_coef_v.size(); ++j) {\n\t\t\t\tfor (std::size_t k = 0; k < this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed].size(); ++k) {\n\t\t\t\t\t//both contains atom followed\n\t\t\t\t\tif (this->species_network_v[i].spe_component[atom_followed] != 0 && this->species_network_v[this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed][k].first].spe_component[atom_followed] != 0) {\n\t\t\t\t\t\tthis->atom_M_matrix[atom_followed][i][this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed][k].first] += 1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::initiate_M_matrix()\n\t{\n\t\tfor (auto x : this->element_v)\n\t\t\tthis->initiate_M_matrix(x.ele_name);\n\n\t\tthis->initiate_M_matrix(rnk_pt.get(\"pathway.super_atom\"));\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::print_M_matrix(std::string atom_followed)\n\t{\n\t\tfor (std::size_t i = 0; i < this->atom_M_matrix[atom_followed].size(); ++i) {\n\t\t\tstd::cout << i << \",\\t\" << this->species_network_v[i].spe_name << \",\\t\";\n\t\t\tfor (std::size_t j = 0; j < this->atom_M_matrix[atom_followed][i].size(); ++j) {\n\t\t\t\tstd::cout << this->atom_M_matrix[atom_followed][i][j] << \"\\t\";\n\t\t\t}\n\t\t\tstd::cout << \"\\n\";\n\t\t}\n\n\t}\n\n\tmatrix_sr::size_t_matrix_t reactionNetwork_sr::superReactionNetwork::return_M_matrix(std::string atom_followed)\n\t{\n\t\treturn this->atom_M_matrix[atom_followed];\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::initiate_R_matrix_v1(std::string atom_followed)\n\t{\n\t\t//resize and initialization\n\t\tthis->atom_R_matrix[atom_followed].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i)\n\t\t\tthis->atom_R_matrix[atom_followed][i].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i)\n\t\t\tfor (std::size_t j = 0; j < this->atom_R_matrix[atom_followed][i].size(); ++j)\n\t\t\t\tthis->atom_R_matrix[atom_followed][i][j] = {};\n\n\t\t//actually build M-matrix\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i) {\n\t\t\tfor (std::size_t j = 0; j < this->species_network_v[i].reaction_k_index_s_coef_v.size(); ++j) {\n\t\t\t\tfor (std::size_t k = 0; k < this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed].size(); ++k) {\n\t\t\t\t\t//both contains atom followed\n\t\t\t\t\tif (this->species_network_v[i].spe_component[atom_followed] != 0 && this->species_network_v[this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed][k].first].spe_component[atom_followed] != 0) {\n\t\t\t\t\t\tthis->atom_R_matrix[atom_followed][i][this->reaction_network_v[this->species_network_v[i].reaction_k_index_s_coef_v[j].first].out_spe_index_weight_v_map[atom_followed][k].first].push_back({ this->species_network_v[i].reaction_k_index_s_coef_v[j].first });\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::initiate_R_matrix_v2(std::string atom_followed)\n\t{\n\t\t//resize and initialization\n\t\tthis->atom_R_matrix[atom_followed].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i)\n\t\t\tthis->atom_R_matrix[atom_followed][i].resize(this->species_network_v.size());\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i)\n\t\t\tfor (std::size_t j = 0; j < this->atom_R_matrix[atom_followed][i].size(); ++j)\n\t\t\t\tthis->atom_R_matrix[atom_followed][i][j] = {};\n\n\t\t//iterate over all edges\n\t\tedge_iter iter_beg, iter_end;\n\t\tboost::tie(iter_beg, iter_end) = getEdges();\n\t\t//source and target index\n\t\tstd::size_t s_i, t_i;\n\t\tfor (; iter_beg != iter_end; ++iter_beg) {\n\t\t\ts_i = boost::source(*iter_beg, this->graph);\n\t\t\tt_i = boost::target(*iter_beg, this->graph);\n\n\t\t\tif (species_network_v[s_i].spe_component[atom_followed] != 0 && species_network_v[t_i].spe_component[atom_followed] != 0)\n\t\t\t\tthis->atom_R_matrix[atom_followed][s_i][t_i].push_back({ properties(*iter_beg).edge_index });\n\t\t}\n\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::initiate_R_matrix()\n\t{\n\t\tfor (auto x : this->element_v)\n\t\t\t//this->initiate_R_matrix_v1(x.ele_name);\n\t\t\tthis->initiate_R_matrix_v2(x.ele_name);\n\n\t\tthis->initiate_R_matrix_v2(this->rnk_pt.get(\"pathway.super_atom\"));\n\t}\n\n\tmatrix_sr::path_R_matrix_t reactionNetwork_sr::superReactionNetwork::return_R_matrix(std::string atom_followed)\n\t{\n\t\treturn this->atom_R_matrix[atom_followed];\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::print_R_matrix(std::string atom_followed)\n\t{\n\t\tfor (std::size_t i = 0; i < this->atom_R_matrix[atom_followed].size(); ++i) {\n\t\t\tstd::cout << i << \",\\t\" << this->species_network_v[i].spe_name << \",\\t\";\n\t\t\tfor (std::size_t j = 0; j < this->atom_R_matrix[atom_followed][i].size(); ++j) {\n\t\t\t\t//std::cout << \"(\" << i << \",\" << j << \")\\t\";\n\t\t\t\tstd::cout << \"(\" << this->species_network_v[i].spe_name << \",\" << this->species_network_v[j].spe_name << \")\\t\";\n\t\t\t\tif (this->atom_R_matrix[atom_followed][i][j].size() == 0) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfor (std::size_t k = 0; k < this->atom_R_matrix[atom_followed][i][j].size(); ++k) {\n\t\t\t\t\tfor (std::size_t l = 0; l < this->atom_R_matrix[atom_followed][i][j][k].size(); ++l)\n\t\t\t\t\t\tstd::cout << this->atom_R_matrix[atom_followed][i][j][k][l] << \"\\t\";\n\t\t\t\t}\n\t\t\t}\n\t\t\tstd::cout << std::endl;\n\t\t}\n\t}\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::get_M_matrix_element(std::string atom_followed, std::size_t i, std::size_t j)\n\t{\n\t\treturn this->atom_M_matrix[atom_followed][i][j];\n\t}\n\n\tmatrix_sr::path_R_matrix_element_t reactionNetwork_sr::superReactionNetwork::get_R_matrix_element(std::string atom_followed, std::size_t i, std::size_t j)\n\t{\n\t\treturn this->atom_R_matrix[atom_followed][i][j];\n\t}\n\n\tstd::string reactionNetwork_sr::superReactionNetwork::R_matrix_path_representation_to_string(matrix_sr::path_t p)\n\t{\n\t\tstd::string path_t;\n\t\tif (p.size() > 0) {\n\t\t\tedge_iter iter_e = edge_index_to_edge_iterator[p[0]];\n\t\t\tpath_t += std::string(\"S\") + boost::lexical_cast(boost::source(*iter_e, this->graph));\n\n\t\t\tfor (std::size_t i = 0; i < p.size(); ++i) {\n\t\t\t\titer_e = edge_index_to_edge_iterator[p[i]];\n\t\t\t\tpath_t += std::string(\"R\") + boost::lexical_cast(properties(*iter_e).reaction_index);\n\t\t\t\tpath_t += std::string(\"S\") + boost::lexical_cast(boost::target(*iter_e, this->graph));\n\t\t\t}\n\t\t}\n\t\treturn path_t;\n\t}\n\n\tbool reactionNetwork_sr::superReactionNetwork::contains_zero_reaction_rate_reactions(matrix_sr::path_t p)\n\t{\n\t\t//arrow guard\n\t\tif (p.size() == 0)\n\t\t\treturn false;\n\t\tfor (auto x : p) {\n\t\t\tedge_iter iter_e = edge_index_to_edge_iterator[x];\n\t\t\tif (reaction_network_v[properties(*iter_e).reaction_index].is_reaction_rate_nonzero == false)\n\t\t\t\treturn false;\n\t\t}\n\t\treturn true;\n\t}\n\n\tstd::vector reactionNetwork_sr::superReactionNetwork::get_path_string_element_i_j(const matrix_sr::path_R_matrix_t &pRm, std::size_t i, std::size_t j)\n\t{\n\t\tif (pRm.size() == 0)\n\t\t\treturn std::vector(1, std::string(\"S\") + boost::lexical_cast(i));\n\n\t\tmatrix_sr::path_R_matrix_element_t p = pRm[i][j];\n\t\tstd::vector vs;\n\n\t\tfor (auto x : p) {\n\t\t\tstd::string ps = R_matrix_path_representation_to_string(x);\n\t\t\tif (!ps.empty())\n\t\t\t\tvs.push_back(ps);\n\t\t}\n\t\treturn vs;\n\t}\n\n\tstd::vector reactionNetwork_sr::superReactionNetwork::get_path_string_update_matrix_element_i_j_topN(matrix_sr::path_R_matrix_t &pRm, const std::size_t i, const std::size_t j,\n\t\tconst std::string atom_followed, const std::size_t topN, const double start_time, const double end_time)\n\t{\n\t\tif (pRm.size() == 0)\n\t\t\treturn std::vector(1, std::string(\"S\") + boost::lexical_cast(i));\n\n\t\tmatrix_sr::path_R_matrix_element_t p = pRm[i][j];\n\t\tmatrix_sr::path_R_matrix_element_t p_new;\n\t\tstd::multimap, std::greater > prob_path_map;\n\n\t\t//if there is less than topN path, do nothing\n\t\t//if there are more than topN path, delete the unimportant ones\n\t\tfor (std::size_t k = 0; k < p.size(); ++k) {\n\t\t\tstd::string ps = R_matrix_path_representation_to_string(p[k]);\n\t\t\tdouble prob = calculate_path_weight_based_on_path_probability(ps, atom_followed, start_time, end_time);\n\n\t\t\tif (prob_path_map.size() < topN) {\n\t\t\t\tprob_path_map.insert(std::make_pair(prob, std::make_pair(ps, k)));\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (prob <= prob_path_map.crbegin()->first)\n\t\t\t\t\tcontinue;\n\t\t\t\telse {\n\t\t\t\t\tprob_path_map.erase(std::prev(prob_path_map.end()));\n\t\t\t\t\tprob_path_map.insert(std::make_pair(prob, std::make_pair(ps, k)));\n\t\t\t\t}\n\t\t\t} //if vs;\n\t\tfor (auto x : p_new) {\n\t\t\tstd::string ps = R_matrix_path_representation_to_string(x);\n\t\t\tif (!ps.empty())\n\t\t\t\tvs.push_back(ps);\n\t\t}\n\n\t\treturn vs;\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::path_string_vector_s2f(std::vector vs, std::string filename)\n\t{\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto x : vs)\n\t\t\tfout << x << std::endl;\n\n\t\tfout.close(); fout.clear();\n\t}\n\n\tvoid reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_s2f(std::string atom_followed, std::size_t n, std::string filename)\n\t{\n\t\tstd::unordered_set us;\n\t\tfor (std::size_t k = 0; k <= n; ++k) {\n\t\t\tauto pRmn = matrix_sr::matrix_power(this->atom_R_matrix[atom_followed], k);\n\t\t\tfor (auto key : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\t\tstd::size_t si = boost::lexical_cast(key.first);\n\t\t\t\t//doesn't contain atom_followed\n\t\t\t\tif (species_network_v[si].spe_component.at(atom_followed) == 0)\n\t\t\t\t\tcontinue;\n\t\t\t\tif (k == 0) {\n\t\t\t\t\tus.insert(std::string(\"S\") + boost::lexical_cast(si));\n\t\t\t\t\t//std::cout << std::string(\"S\") + boost::lexical_cast(si) << std::endl;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfor (std::size_t sj = 0; sj < this->species_network_v.size(); ++sj) {\n\t\t\t\t\tauto vs = this->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj);\n\t\t\t\t\tfor (auto s : vs)\n\t\t\t\t\t\tus.insert(s);\n\t\t\t\t\t//std::cout << s << std::endl;\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\t//save to file\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto s : us)\n\t\t\tfout << s << std::endl;\n\t\tfout.close(); fout.clear();\n\t}\n\n\tvoid superReactionNetwork::heuristic_path_string_vector_si_sj_n_s2f(std::string atom_followed, std::size_t si, std::size_t sj, std::size_t n, std::string filename)\n\t{\n\t\tstd::unordered_set us;\n\t\tfor (std::size_t k = 0; k <= n; ++k) {\n\t\t\tauto pRmn = matrix_sr::matrix_power(this->atom_R_matrix[atom_followed], k);\n\n\t\t\tif (k == 0) {\n\t\t\t\tus.insert(std::string(\"S\") + boost::lexical_cast(si));\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tauto vs = this->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj);\n\t\t\tfor (auto s : vs)\n\t\t\t\tus.insert(s);\n\t\t}\n\n\t\t//save to file\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto s : us)\n\t\t\tfout << s << std::endl;\n\t\tfout.close(); fout.clear();\n\t}\n\n\tstd::set reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_s2m(std::string atom_followed, std::size_t n)\n\t{\n\t\tstd::set us;\n\t\tfor (std::size_t k = 0; k <= n; ++k) {\n\t\t\tauto pRmn = matrix_sr::matrix_power(this->atom_R_matrix[atom_followed], k);\n\t\t\tfor (auto key : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\t\tstd::size_t si = boost::lexical_cast(key.first);\n\t\t\t\t//doesn't contain atom_followed\n\t\t\t\tif (species_network_v[si].spe_component.at(atom_followed) == 0)\n\t\t\t\t\tcontinue;\n\t\t\t\tif (k == 0) {\n\t\t\t\t\tus.insert(std::string(\"S\") + boost::lexical_cast(si));\n\t\t\t\t\t//std::cout << std::string(\"S\") + boost::lexical_cast(si) << std::endl;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfor (std::size_t sj = 0; sj < this->species_network_v.size(); ++sj) {\n\t\t\t\t\tauto vs = this->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj);\n\t\t\t\t\tfor (auto s : vs)\n\t\t\t\t\t\tus.insert(s);\n\t\t\t\t\t//std::cout << s << std::endl;\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\treturn us;\n\t}\n\n\tstd::set reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_sorted_based_on_path_length(std::string atom_followed, std::size_t n, std::size_t topN)\n\t{\n\t\tstd::vector > p_map_v(this->species_network_v.size());\n\n\t\tmatrix_sr::path_R_matrix_t pRmn;\n\t\tfor (std::size_t k = 0; k <= n; ++k) {\n\t\t\t//auto pRmn = matrix_sr::matrix_power(this->atom_R_matrix[atom_followed], k);\n\t\t\tif (k == 0)\n\t\t\t\tpRmn = matrix_sr::path_R_matrix_t();\n\t\t\telse if (k == 1)\n\t\t\t\tpRmn = this->atom_R_matrix[atom_followed];\n\t\t\telse\n\t\t\t\tpRmn = matrix_sr::matrix_multiplication(pRmn, this->atom_R_matrix[atom_followed]);\n\n\t\t\tfor (auto key : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\t\tstd::size_t si = boost::lexical_cast(key.first);\n\t\t\t\t//doesn't contain atom_followed\n\t\t\t\tif (species_network_v[si].spe_component.at(atom_followed) == 0)\n\t\t\t\t\tcontinue;\n\t\t\t\tif (k == 0) {\n\t\t\t\t\tstd::string path_name = std::string(\"S\") + boost::lexical_cast(si);\n\t\t\t\t\tp_map_v[si].insert(std::make_pair(calculate_path_weight_path_length(path_name), path_name));\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfor (std::size_t sj = 0; sj < this->species_network_v.size(); ++sj) {\n\t\t\t\t\tauto vs = this->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj);\n\t\t\t\t\tfor (auto s : vs) {\n\t\t\t\t\t\tdouble prob = calculate_path_weight_path_length(s);\n\t\t\t\t\t\tif (p_map_v[sj].size() < topN) {\n\t\t\t\t\t\t\tp_map_v[sj].insert(std::make_pair(prob, s));\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (prob >= p_map_v[sj].crbegin()->first)\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tp_map_v[sj].erase(std::prev(p_map_v[sj].end()));\n\t\t\t\t\t\t\t\tp_map_v[sj].insert(std::make_pair(prob, s));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} //if us;\n\t\tfor (auto pmv : p_map_v)\n\t\t\tfor (auto ps : pmv)\n\t\t\t\t//add atom followed to the beging of path\n\t\t\t\tus.insert(atom_followed + ps.second);\n\n\t\treturn us;\n\t}\n\n\tstd::set reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_sorted_based_on_path_prob(std::string atom_followed, std::size_t n, std::size_t topN, double end_time_ratio)\n\t{\n\t\tstd::vector > > prob_path_map_v(this->species_network_v.size());\n\t\tmatrix_sr::path_R_matrix_t pRmn;\n\t\tfor (std::size_t k = 0; k <= n; ++k) {\n\t\t\t//auto pRmn = matrix_sr::matrix_power(this->atom_R_matrix[atom_followed], k);\n\t\t\tif (k == 0)\n\t\t\t\tpRmn = matrix_sr::path_R_matrix_t();\n\t\t\telse if (k == 1)\n\t\t\t\tpRmn = this->atom_R_matrix[atom_followed];\n\t\t\telse\n\t\t\t\tpRmn = matrix_sr::matrix_multiplication(pRmn, this->atom_R_matrix[atom_followed]);\n\n\t\t\tauto species_with_initial_concentration = return_species_index_with_initial_concentration();\n\t\t\tauto species_without_initial_concentration = return_species_index_without_initial_concentration();\n\n\t\t\t//species with initial concentration\n\t\t\tfor (auto si : species_with_initial_concentration) {\n\t\t\t\t//doesn't contain atom_followed\n\t\t\t\tif (species_network_v[si].spe_component.at(atom_followed) == 0)\n\t\t\t\t\tcontinue;\n\t\t\t\tif (k == 0) {\n\t\t\t\t\tstd::string path_name = std::string(\"S\") + boost::lexical_cast(si);\n\t\t\t\t\tprob_path_map_v[si].insert(std::make_pair(calculate_path_weight_based_on_path_probability(path_name, atom_followed, 0.0, end_time_ratio*this->rnk_pt.get(\"time.tau\")), path_name));\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\t//in the mean time, we should change the matrix element so that it doesn't contain too many elements\n\t\t\t\t//become too big-->lots of memory\n\t\t\t\tfor (std::size_t sj = 0; sj < this->species_network_v.size(); ++sj) {\n\t\t\t\t\t//be a little cautious, a little open, 10*topN\n\t\t\t\t\tauto vs = this->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj, atom_followed, 10 * topN, 0.0, end_time_ratio*this->rnk_pt.get(\"time.tau\"));\n\t\t\t\t\tfor (auto s : vs) {\n\t\t\t\t\t\tdouble prob = calculate_path_weight_based_on_path_probability(s, atom_followed, 0.0, end_time_ratio*this->rnk_pt.get(\"time.tau\"));\n\t\t\t\t\t\tif (prob_path_map_v[sj].size() < topN) {\n\t\t\t\t\t\t\tprob_path_map_v[sj].insert(std::make_pair(prob, s));\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (prob <= prob_path_map_v[sj].crbegin()->first)\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t\tprob_path_map_v[sj].erase(std::prev(prob_path_map_v[sj].end()));\n\t\t\t\t\t\t\t\tprob_path_map_v[sj].insert(std::make_pair(prob, s));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} //if species_network_v.size(); ++sj)\n\t\t\t\t\t//be a little cautious, a little open, 10*topN\n\t\t\t\t\tthis->get_path_string_update_matrix_element_i_j_topN(pRmn, si, sj, atom_followed, 10 * topN, 0.0, end_time_ratio*this->rnk_pt.get(\"time.tau\"));\n\t\t\t}\n\n\t\t}\n\t\tstd::set us;\n\t\tfor (auto pmv : prob_path_map_v)\n\t\t\tfor (auto ps : pmv)\n\t\t\t\t//add atom followed to the beging of path\n\t\t\t\tus.insert(atom_followed + ps.second);\n\n\t\treturn us;\n\t}\n\n\tdouble reactionNetwork_sr::superReactionNetwork::calculate_path_weight_path_length(std::string path)\n\t{\n\t\treturn (double)(std::count(path.begin(), path.end(), 'S'));\n\t}\n\n\tdouble reactionNetwork_sr::superReactionNetwork::calculate_path_weight_based_on_path_probability(std::string path, std::string atom_followed, double start_time, double end_time)\n\t{\n\t\tstd::vector spe_vec; std::vector reaction_vec;\n\t\tdouble prob = 0.0;\n\t\tthis->parse_pathway_to_vector(path, spe_vec, reaction_vec);\n\t\tprob = pathway_prob_input_pathway_sim_once(start_time, end_time, spe_vec, reaction_vec, atom_followed);\n\n\t\t//take the initial concentration of initial species into account\n\t\tif (this->species_network_v[spe_vec[0]].spe_conc != 0)\n\t\t\tprob *= this->species_network_v[spe_vec[0]].spe_conc;\n\n\t\tprob *= this->species_network_v[spe_vec[0]].spe_component[atom_followed];\n\t\tprob /= this->species_network_v[spe_vec.back()].spe_component[atom_followed];\n\n\t\treturn prob;\n\t}\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_by_stage_number_path_length_all_elements(const std::size_t stage_n, std::string filename, std::size_t topN)\n\t{\n\t\tassert(stage_n >= 0);\n\t\t//fetch path length first\n\t\tstd::vector path_n_v;\n\t\tfor (auto key : this->rnk_pt.get_child(\"pathway.max_path_length\"))\n\t\t\tpath_n_v.push_back(key.second.get_value());\n\n\t\tstd::size_t path_n;\n\t\t//if iteration n is less than path_n_v lenght, fetch by index, otherwise take the last element\n\t\tif (stage_n < path_n_v.size())\n\t\t\tpath_n = path_n_v[stage_n];\n\t\telse\n\t\t\tpath_n = path_n_v.back();\n\n\t\tif (stage_n == 0)\n\t\t\tthis->set_is_reaction_rate_nonzero_from_setting_file();\n\t\telse\n\t\t\tthis->set_is_reaction_rate_nonzero_from_previous_iteration();\n\n\t\tstd::vector > all_path_2;\n\n\t\tfor (auto x : this->element_v) {\n\t\t\tall_path_2.push_back(this->heuristic_path_string_vector_sorted_based_on_path_length(x.ele_name, path_n, topN));\n\t\t}\n\n\t\tset all_path;\n\t\tfor (auto us : all_path_2)\n\t\t\tfor (auto s : us)\n\t\t\t\tall_path.insert(s);\n\n\t\t//save to file\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto x : all_path)\n\t\t\tfout << x << std::endl;\n\t\tfout.close(); fout.clear();\n\n\t\treturn this->element_v.size();\n\t}\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_by_stage_number_path_prob_all_elements(const std::size_t stage_n, std::string filename, std::size_t topN, double end_time_ratio)\n\t{\n\t\tassert(stage_n >= 0);\n\t\t//fetch path length first\n\t\tstd::vector path_n_v;\n\t\tfor (auto key : this->rnk_pt.get_child(\"pathway.max_path_length\"))\n\t\t\tpath_n_v.push_back(key.second.get_value());\n\n\t\tstd::size_t path_n;\n\t\t//if iteration n is less than path_n_v lenght, fetch by index, otherwise take the last element\n\t\tif (stage_n < path_n_v.size())\n\t\t\tpath_n = path_n_v[stage_n];\n\t\telse\n\t\t\tpath_n = path_n_v.back();\n\n\t\tif (stage_n == 0)\n\t\t\tthis->set_is_reaction_rate_nonzero_from_setting_file();\n\t\telse\n\t\t\tthis->set_is_reaction_rate_nonzero_from_previous_iteration();\n\n\t\tstd::vector > all_path_2;\n\n\t\tfor (auto x : this->element_v) {\n\t\t\tall_path_2.push_back(this->heuristic_path_string_vector_sorted_based_on_path_prob(x.ele_name, path_n, topN, end_time_ratio));\n\t\t}\n\n\t\tset all_path;\n\t\tfor (auto us : all_path_2)\n\t\t\tfor (auto s : us)\n\t\t\t\tall_path.insert(s);\n\n\t\t//save to file\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto x : all_path)\n\t\t\tfout << x << std::endl;\n\t\tfout.close(); fout.clear();\n\n\t\treturn this->element_v.size();\n\t}\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_by_stage_number_path_prob_all_elements_s2m(const std::size_t stage_n, std::vector &path_all_v, std::size_t topN, double end_time_ratio)\n\t{\n\t\tpath_all_v.resize(0);\n\n\t\tassert(stage_n >= 0);\n\t\t//fetch path length first\n\t\tstd::vector path_n_v;\n\t\tfor (auto key : this->rnk_pt.get_child(\"pathway.max_path_length\"))\n\t\t\tpath_n_v.push_back(key.second.get_value());\n\n\t\tstd::size_t path_n;\n\t\t//if iteration n is less than path_n_v lenght, fetch by index, otherwise take the last element\n\t\tif (stage_n < path_n_v.size())\n\t\t\tpath_n = path_n_v[stage_n];\n\t\telse\n\t\t\tpath_n = path_n_v.back();\n\n\t\tif (stage_n == 0)\n\t\t\tthis->set_is_reaction_rate_nonzero_from_setting_file();\n\t\telse\n\t\t\tthis->set_is_reaction_rate_nonzero_from_previous_iteration();\n\n\t\tstd::vector > all_path_2;\n\n\t\tfor (auto x : this->element_v) {\n\t\t\tall_path_2.push_back(this->heuristic_path_string_vector_sorted_based_on_path_prob(x.ele_name, path_n, topN, end_time_ratio));\n\t\t}\n\n\t\tset all_path;\n\t\tfor (auto us : all_path_2)\n\t\t\tfor (auto s : us)\n\t\t\t\tall_path.insert(s);\n\t\tfor (auto p : all_path)\n\t\t\tpath_all_v.push_back(p);\n\n\t\treturn this->element_v.size();\n\n\t}\n\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::heuristic_path_string_vector_by_stage_number_path_prob_super_element(const std::size_t stage_n, std::string filename, std::size_t topN, double end_time_ratio)\n\t{\n\t\tassert(stage_n >= 0);\n\t\t//fetch path length first\n\t\tstd::vector path_n_v;\n\t\tfor (auto key : this->rnk_pt.get_child(\"pathway.max_path_length\"))\n\t\t\tpath_n_v.push_back(key.second.get_value());\n\n\t\tstd::size_t path_n;\n\t\t//if iteration n is less than path_n_v lenght, fetch by index, otherwise take the last element\n\t\tif (stage_n < path_n_v.size())\n\t\t\tpath_n = path_n_v[stage_n];\n\t\telse\n\t\t\tpath_n = path_n_v.back();\n\n\t\tif (stage_n == 0)\n\t\t\tthis->set_is_reaction_rate_nonzero_from_setting_file();\n\t\telse\n\t\t\tthis->set_is_reaction_rate_nonzero_from_previous_iteration();\n\n\t\tauto us = this->heuristic_path_string_vector_sorted_based_on_path_prob(this->rnk_pt.get(\"pathway.super_atom\"), path_n, topN, end_time_ratio);\n\n\t\t//save to file\n\t\tstd::ofstream fout(filename.c_str());\n\t\tfor (auto x : us)\n\t\t\tfout << x << std::endl;\n\t\tfout.close(); fout.clear();\n\n\t\treturn 1;\n\t}\n\n\tstd::set reactionNetwork_sr::superReactionNetwork::return_species_index_with_initial_concentration() const\n\t{\n\t\tstd::set species_with_initial_concentration;\n\t\tfor (auto key : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\tspecies_with_initial_concentration.insert(boost::lexical_cast(key.first));\n\t\t}\n\t\treturn species_with_initial_concentration;\n\t}\n\n\tstd::set reactionNetwork_sr::superReactionNetwork::return_species_index_without_initial_concentration() const\n\t{\n\t\tauto species_with_initial_concentration = return_species_index_with_initial_concentration();\n\t\tstd::set species_without_initial_concentration;\n\n\t\tfor (std::size_t i = 0; i < this->species_network_v.size(); ++i) {\n\t\t\t//not in species_with_initial_concentration\n\t\t\tif (species_with_initial_concentration.count(i) == 0)\n\t\t\t\tspecies_without_initial_concentration.insert(i);\n\t\t}\n\t\treturn species_without_initial_concentration;\n\t}\n\n\tstd::set > reactionNetwork_sr::superReactionNetwork::return_species_index_and_initial_concentration() const\n\t{\n\t\tstd::set > species_concentration;\n\n\t\tfor (auto key : this->rnk_pt.get_child(\"chem_init.species_index_concentration\")) {\n\t\t\tspecies_concentration.emplace(boost::lexical_cast(key.first), key.second.get_value());\n\t\t}\n\t\treturn species_concentration;\n\t}\n\n\n\tvoid reactionNetwork_sr::superReactionNetwork::generate_path_by_running_monte_carlo_trajectory_s2m(std::vector &statistics_v, std::size_t Ntrajectory, std::string atom_followed, double end_time_ratio)\n\t{\n\t\t//std::vector statistics_v(this->species_network_v.size());\n\n\t\tauto species_with_initial_concentration = return_species_index_with_initial_concentration();\n\n\t\t//species with initial concentration, initial concentration is not zero\n\t\tfor (auto si : species_with_initial_concentration) {\n\t\t\t//doesn't contain atom_followed\n\t\t\tif (species_network_v[si].spe_component.at(atom_followed) == 0)\n\t\t\t\tcontinue;\n\n\t\t\t//contain atom_followed\n\t\t\tstd::string str_tmp;\n\t\t\tfor (std::size_t ti = 0; ti < Ntrajectory; ++ti) {\n\t\t\t\tstr_tmp = this->pathway_sim_once(0.0, end_time_ratio*this->rnk_pt.get(\"time.tau\"), si, atom_followed);\n\t\t\t\t//put the atom followed in front\n\t\t\t\tstatistics_v[si].insert_pathway_stat(atom_followed + str_tmp);\n\t\t\t} //for\n\t\t} //for\n\n\t} //generate_path_by_running_monte_carlo_trajectory_s2m\n\n\tstd::size_t reactionNetwork_sr::superReactionNetwork::generate_path_by_running_monte_carlo_trajectory_all_elements_s2m(std::vector& statistics_v, std::size_t Ntrajectory, double end_time_ratio)\n\t{\n\t\tfor (auto x : this->element_v) {\n\t\t\tgenerate_path_by_running_monte_carlo_trajectory_s2m(statistics_v, Ntrajectory, x.ele_name, end_time_ratio);\n\t\t}\n\t\treturn this->element_v.size();\n\t}\n\n\tstd::vector reactionNetwork_sr::superReactionNetwork::return_element_vecotr() const\n\t{\n\t\treturn this->element_v;\n\t}\n\n\n\n}/*namespace reactionNetwork_sr*/\n\n#endif\n", "meta": {"hexsha": "e579b5ab429a0553974669a5154443fcba09cde0", "size": 98148, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/reactionNetwork/superReactionNetwork/superReactionNetwork.cpp", "max_stars_repo_name": "AdamPI314/SOHR", "max_stars_repo_head_hexsha": "eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-08-11T23:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T18:13:50.000Z", "max_issues_repo_path": "src/reactionNetwork/superReactionNetwork/superReactionNetwork.cpp", "max_issues_repo_name": "AdamPI314/SOHR", "max_issues_repo_head_hexsha": "eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/reactionNetwork/superReactionNetwork/superReactionNetwork.cpp", "max_forks_repo_name": "AdamPI314/SOHR", "max_forks_repo_head_hexsha": "eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2643274854, "max_line_length": 429, "alphanum_fraction": 0.7144516445, "num_tokens": 28139, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832206876841, "lm_q2_score": 0.37022540649291935, "lm_q1q2_score": 0.1995452819719607}} {"text": "#include \"Wad.h\"\r\n\r\n#include \r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\n#include \"IO.h\"\r\n\r\nnamespace {\r\n\tconst auto sqrt2 = std::sqrt(2.0);\r\n\r\n\tvoid ApplyAlphaSections(Image& pTex) {\r\n\t\tstd::vector pRGBTexture(pTex.width * pTex.height * 4, 0x00);\r\n\r\n\t\t// Color pRGBTexture totally blue\r\n\t\tfor (int i = 0; i < pTex.height * pTex.width; i++)\r\n\t\t\tpRGBTexture[i * 4 + 2] = 255;\r\n\r\n\t\tfor (int y = 0; y < pTex.height; y++) {\r\n\t\t\tfor (int x = 0; x < pTex.width; x++) {\r\n\t\t\t\tint index = y * pTex.width + x;\r\n\r\n\t\t\t\tif ((pTex.data[index * 4] == 0) && (pTex.data[index * 4 + 1] == 0) && (pTex.data[index * 4 + 2] == 255)) {\r\n\t\t\t\t\t// Blue color signifies a transparent portion of the texture. zero alpha for blending and\r\n\t\t\t\t\t// to get rid of blue edges choose the average color of the nearest non blue pixels\r\n\r\n\t\t\t\t\t//First set pixel black and transparent\r\n\t\t\t\t\tpTex.data[index * 4 + 2] = 0;\r\n\t\t\t\t\tpTex.data[index * 4 + 3] = 0;\r\n\r\n\t\t\t\t\tint count = 0;\r\n\t\t\t\t\tunsigned int RGBColorSum[3] = {0, 0, 0};\r\n\r\n\t\t\t\t\t//left above pixel\r\n\t\t\t\t\tif ((x > 0) && (y > 0)) {\r\n\t\t\t\t\t\tint iPixel = ((y - 1) * pTex.width + (x - 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += (unsigned int)((float)pTex.data[iPixel + 0] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[1] += (unsigned int)((float)pTex.data[iPixel + 1] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[2] += (unsigned int)((float)pTex.data[iPixel + 2] * sqrt2);\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//above pixel\r\n\t\t\t\t\tif ((x >= 0) && (y > 0)) {\r\n\t\t\t\t\t\tint iPixel = ((y - 1) * pTex.width + x) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += pTex.data[iPixel];\r\n\t\t\t\t\t\t\tRGBColorSum[1] += pTex.data[iPixel + 1];\r\n\t\t\t\t\t\t\tRGBColorSum[2] += pTex.data[iPixel + 2];\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//right above pixel\r\n\t\t\t\t\tif ((x < pTex.width - 1) && (y > 0)) {\r\n\t\t\t\t\t\tint iPixel = ((y - 1) * pTex.width + (x + 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += (unsigned int)((float)pTex.data[iPixel + 0] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[1] += (unsigned int)((float)pTex.data[iPixel + 1] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[2] += (unsigned int)((float)pTex.data[iPixel + 2] * sqrt2);\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//left pixel\r\n\t\t\t\t\tif (x > 0) {\r\n\t\t\t\t\t\tint iPixel = (y * pTex.width + (x - 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += pTex.data[iPixel];\r\n\t\t\t\t\t\t\tRGBColorSum[1] += pTex.data[iPixel + 1];\r\n\t\t\t\t\t\t\tRGBColorSum[2] += pTex.data[iPixel + 2];\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//right pixel\r\n\t\t\t\t\tif (x < pTex.width - 1) {\r\n\t\t\t\t\t\tint iPixel = (y * pTex.width + (x + 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += pTex.data[iPixel];\r\n\t\t\t\t\t\t\tRGBColorSum[1] += pTex.data[iPixel + 1];\r\n\t\t\t\t\t\t\tRGBColorSum[2] += pTex.data[iPixel + 2];\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//left underneath pixel\r\n\t\t\t\t\tif ((x > 0) && (y < pTex.height - 1)) {\r\n\t\t\t\t\t\tint iPixel = ((y + 1) * pTex.width + (x - 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += (unsigned int)((float)pTex.data[iPixel + 0] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[1] += (unsigned int)((float)pTex.data[iPixel + 1] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[2] += (unsigned int)((float)pTex.data[iPixel + 2] * sqrt2);\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//underneath pixel\r\n\t\t\t\t\tif ((x >= 0) && (y < pTex.height - 1)) {\r\n\t\t\t\t\t\tint iPixel = ((y + 1) * pTex.width + x) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += pTex.data[iPixel];\r\n\t\t\t\t\t\t\tRGBColorSum[1] += pTex.data[iPixel + 1];\r\n\t\t\t\t\t\t\tRGBColorSum[2] += pTex.data[iPixel + 2];\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\t//right underneath pixel\r\n\t\t\t\t\tif ((x < pTex.width - 1) && (y < pTex.height - 1)) {\r\n\t\t\t\t\t\tint iPixel = ((y + 1) * pTex.width + (x + 1)) * 4;\r\n\t\t\t\t\t\tif (!((pTex.data[iPixel] == 0) && (pTex.data[iPixel + 1] == 0) && (pTex.data[iPixel + 2] == 255))) {\r\n\t\t\t\t\t\t\tRGBColorSum[0] += (unsigned int)((float)pTex.data[iPixel + 0] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[1] += (unsigned int)((float)pTex.data[iPixel + 1] * sqrt2);\r\n\t\t\t\t\t\t\tRGBColorSum[2] += (unsigned int)((float)pTex.data[iPixel + 2] * sqrt2);\r\n\t\t\t\t\t\t\tcount++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tif (count > 0) {\r\n\t\t\t\t\t\tRGBColorSum[0] /= count;\r\n\t\t\t\t\t\tRGBColorSum[1] /= count;\r\n\t\t\t\t\t\tRGBColorSum[2] /= count;\r\n\r\n\t\t\t\t\t\tpRGBTexture[index * 4 + 0] = RGBColorSum[0];\r\n\t\t\t\t\t\tpRGBTexture[index * 4 + 1] = RGBColorSum[1];\r\n\t\t\t\t\t\tpRGBTexture[index * 4 + 2] = RGBColorSum[2];\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t//Merge pTex and pRGBTexture\r\n\t\tfor (int y = 0; y < pTex.height; y++) {\r\n\t\t\tfor (int x = 0; x < pTex.width; x++) {\r\n\t\t\t\tint index = y * pTex.width + x;\r\n\r\n\t\t\t\tif ((pRGBTexture[index * 4] != 0) || (pRGBTexture[index * 4 + 1] != 0) || (pRGBTexture[index * 4 + 2] != 255) || (pRGBTexture[index * 4 + 3] != 0))\r\n\t\t\t\t\tmemcpy(&pTex.data[index * 4], &pRGBTexture[index * 4], sizeof(unsigned char) * 4);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tauto texNameLess(const char* a, const char* b) {\r\n#ifdef WIN32\r\n\t\treturn _stricmp(a, b) < 0;\r\n#else\r\n\t\treturn strcasecmp(a, b) < 0;\r\n#endif\r\n\t}\r\n\r\n\tauto texNameEqual(const char* a, const char* b) {\r\n#ifdef WIN32\r\n\t\treturn _stricmp(a, b) == 0;\r\n#else\r\n\t\treturn strcasecmp(a, b) == 0;\r\n#endif\r\n\t}\r\n}\r\n\r\nWad::Wad(const fs::path& path)\r\n\t: wadFile(path, std::ios::binary) {\r\n\tif (!wadFile)\r\n\t\tthrow std::ios::failure(\"Failed to open file \" + path.string() + \" for reading\");\r\n\twadFile.exceptions(std::ios::badbit | std::ios::failbit);\r\n\tLoadDirectory();\r\n}\r\n\r\nauto Wad::loadTexture(const char* name) -> std::optional {\r\n\tauto rawTex = GetTexture(name);\r\n\tif (rawTex.empty())\r\n\t\treturn {};\r\n\r\n\tMipmapTexture tex;\r\n\tCreateMipTexture(rawTex, tex);\r\n\treturn tex;\r\n}\r\n\r\nauto Wad::LoadDecalTexture(const char* name) -> std::optional {\r\n\tauto rawTex = GetTexture(name);\r\n\tif (rawTex.empty())\r\n\t\treturn {};\r\n\r\n\tMipmapTexture tex;\r\n\tCreateDecalTexture(rawTex, tex);\r\n\treturn tex;\r\n}\r\n\r\nvoid Wad::LoadDirectory() {\r\n\tauto header = read(wadFile);\r\n\r\n\t// check magic\r\n\tif (header.magic[0] != 'W' || header.magic[1] != 'A' || header.magic[2] != 'D' || (header.magic[3] != '2' && header.magic[3] != '3'))\r\n\t\tthrow std::ios::failure(\"Unknown WAD magic number: \" + std::string(header.magic, 4));\r\n\r\n\t// read and sort directory\r\n\tdirEntries.resize(header.nDir);\r\n\twadFile.seekg(header.dirOffset);\r\n\treadVector(wadFile, dirEntries);\r\n\r\n\tstd::sort(begin(dirEntries), end(dirEntries), [](const WadDirEntry& a, const WadDirEntry& b) {\r\n\t\treturn texNameLess(a.name, b.name);\r\n\t});\r\n}\r\n\r\nauto Wad::GetTexture(const char* name) -> std::vector {\r\n\tconst auto it = std::lower_bound(begin(dirEntries), end(dirEntries), name, [](const WadDirEntry& e, const char* name) {\r\n\t\treturn texNameLess(e.name, name);\r\n\t});\r\n\r\n\tif (it == end(dirEntries) || !texNameEqual(it->name, name))\r\n\t\treturn {};\r\n\r\n\t// we can only handle uncompressed formats\r\n\tif (it->compressed)\r\n\t\tthrow std::runtime_error(\"WAD texture cannot be loaded. Cannot read compressed items\");\r\n\r\n\twadFile.seekg(it->nFilePos);\r\n\treturn readVector(wadFile, it->nSize);\r\n}\r\n\r\nvoid Wad::CreateMipTexture(const std::vector& rawTexture, MipmapTexture& mipTex) {\r\n\tconst auto* rawMipTex = (bsp30::MipTex*)rawTexture.data();\r\n\r\n\tauto width = rawMipTex->width;\r\n\tauto height = rawMipTex->height;\r\n\tconst auto palOffset = rawMipTex->offsets[3] + (width / 8) * (height / 8) + 2;\r\n\tconst auto* palette = rawTexture.data() + palOffset;\r\n\r\n\tfor (int level = 0; level < bsp30::MIPLEVELS; level++) {\r\n\t\tconst auto* pixel = &(rawTexture[rawMipTex->offsets[level]]);\r\n\r\n\t\tauto& img = mipTex.Img[level];\r\n\t\timg.channels = 4;\r\n\t\timg.width = width;\r\n\t\timg.height = height;\r\n\t\timg.data.resize(width * height * 4);\r\n\r\n\t\tfor (int i = 0; i < height * width; i++) {\r\n\t\t\tint palIndex = pixel[i] * 3;\r\n\r\n\t\t\timg.data[i * 4 + 0] = palette[palIndex + 0];\r\n\t\t\timg.data[i * 4 + 1] = palette[palIndex + 1];\r\n\t\t\timg.data[i * 4 + 2] = palette[palIndex + 2];\r\n\t\t\timg.data[i * 4 + 3] = 255;\r\n\t\t}\r\n\r\n\t\tApplyAlphaSections(mipTex.Img[level]);\r\n\r\n\t\twidth /= 2;\r\n\t\theight /= 2;\r\n\t}\r\n}\r\n\r\nvoid Wad::CreateDecalTexture(const std::vector& rawTexture, MipmapTexture& mipTex) {\r\n\tconst auto* rawMipTex = (bsp30::MipTex*)rawTexture.data();\r\n\r\n\tauto width = rawMipTex->width;\r\n\tauto height = rawMipTex->height;\r\n\tconst auto palOffset = rawMipTex->offsets[3] + (width / 8) * (height / 8) + 2;\r\n\tconst auto* palette = rawTexture.data() + palOffset;\r\n\tconst auto* color = palette + 255 * 3;\r\n\r\n\tfor (int level = 0; level < bsp30::MIPLEVELS; level++) {\r\n\t\tconst auto* pixel = &(rawTexture[rawMipTex->offsets[level]]);\r\n\r\n\t\tauto& img = mipTex.Img[level];\r\n\t\timg.channels = 4;\r\n\t\timg.width = width;\r\n\t\timg.height = height;\r\n\t\timg.data.resize(width * height * 4);\r\n\r\n\t\tfor (int i = 0; i < height * width; i++) {\r\n\t\t\tint palIndex = pixel[i] * 3;\r\n\r\n\t\t\timg.data[i * 4 + 0] = color[0];\r\n\t\t\timg.data[i * 4 + 1] = color[1];\r\n\t\t\timg.data[i * 4 + 2] = color[2];\r\n\t\t\timg.data[i * 4 + 3] = 255 - palette[palIndex];\r\n\t\t}\r\n\r\n\t\tApplyAlphaSections(mipTex.Img[level]);\r\n\r\n\t\twidth /= 2;\r\n\t\theight /= 2;\r\n\t}\r\n}\r\n", "meta": {"hexsha": "1b12b656bfe6d473b058bf6faeb4ae0a09d3c98d", "size": 9574, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Wad.cpp", "max_stars_repo_name": "bernhardmgruber/hlbsp", "max_stars_repo_head_hexsha": "a2144818fb6e747409dcd93cab97cea7f055dbfd", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2020-04-16T19:00:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T04:12:20.000Z", "max_issues_repo_path": "src/Wad.cpp", "max_issues_repo_name": "bernhardmgruber/hlbsp", "max_issues_repo_head_hexsha": "a2144818fb6e747409dcd93cab97cea7f055dbfd", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Wad.cpp", "max_forks_repo_name": "bernhardmgruber/hlbsp", "max_forks_repo_head_hexsha": "a2144818fb6e747409dcd93cab97cea7f055dbfd", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6757679181, "max_line_length": 152, "alphanum_fraction": 0.5592228953, "num_tokens": 3233, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832206876841, "lm_q2_score": 0.3702254064929193, "lm_q1q2_score": 0.19954528197196067}} {"text": "/*\n\nCandyPoker\nhttps://github.com/sweeterthancandy/CandyPoker\n\nMIT License\n\nCopyright (c) 2019 Gerry Candy\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n*/\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n\n#include \"app/pretty_printer.h\"\n#include \"ps/base/algorithm.h\"\n#include \"ps/base/board_combination_iterator.h\"\n#include \"ps/base/cards.h\"\n#include \"ps/base/cards.h\"\n#include \"ps/base/frontend.h\"\n#include \"ps/base/holdem_board_decl.h\"\n#include \"ps/base/rank_hasher.h\"\n#include \"ps/base/suit_hasher.h\"\n#include \"ps/base/tree.h\"\n\n#include \"ps/detail/tree_printer.h\"\n\n#include \"ps/eval/class_cache.h\"\n#include \"ps/eval/pass_mask_eval.h\"\n#include \"ps/eval/instruction.h\"\n\n#include \"ps/support/config.h\"\n#include \"ps/support/index_sequence.h\"\n\n#include \n\n#include \n\n#include \n\n#include \"ps/support/command.h\"\n\n#include \n#include \"ps/eval/holdem_class_vector_cache.h\"\n\n\n/*\n * 2 player\n \n player ev\n =========\n \n ---+---------------\n f_ | 0\n pf | sb+bb\n pp | 2*S*Ev - (S-sb)\n\n ---+---------------\n pf | 0\n pp | 2*S*Ev - (S-bb)\n \n\n total value\n ===========\n\n\n ---+---------------\n f_ | -sb\n pf | bb\n pp | 2*S*Ev - S\n \n ---+---------------\n pf | -sb\n pp | 2*S*Ev - S\n\n\n\n\n\n 2 players\n\n P1 P2\n -- (fold) --> f\n -- (push) --> (fold) --> pf\n -- (push) --> (call) --> pp\n \n 3 players\n P1 P2 P3\n -- (fold) --> (fold) ff\n -- (fold) --> (push) --> (fold) fpf\n -- (fold) --> (push) --> (call) fpp\n -- (push) --> (fold) --> (fold) pff\n -- (push) --> (fold) --> (call) pfp\n -- (push) --> (call) --> (fold) ppf\n -- (push) --> (call) --> (call) ppp\n\n\n\n f(2) -> g({pp,pf,fp,ff}) -> {pp,pf,fp,ff} \\ { perm : all actions fold before last player }\n \\------- always size 2 (n, as f*f, f*p -----/\n\n => |f(2)| = |{pp,pf,fp,ff}| - |{fp,ff}| \n => |f(3)| = |{ppp,ppf,pfp,pff,fpp,fpf,ffp,fff}| - |{ffp,fff}| = 2 ^ 3 - 2 = 6\n\n f(3) = {ppp,ppf,pfp,pff,fpp,fpf,ff}\n \n q(0) = {ppp,ppf,pfp,pff},{fpp,fpf,ff}\n ^ ^ ^ ^ ^ ^ ^\n\n q(1) = {ppp,ppf},{pfp,pff},{fpp,fpf},{ff}\n _^ _^ _^ _^ _^ _^ _^\n \n q(2) = {ppp},{ppf},{pfp},{pff},{fpp},{fpf },{ff}\n __^ __^ __^ __^ __^ __^\n\n\n\n */\n\nnamespace ps {\n\n inline Eigen::VectorXd choose_push_fold(Eigen::VectorXd const& push, Eigen::VectorXd const& fold){\n Eigen::VectorXd result(169);\n result.fill(.0);\n for(holdem_class_id id=0;id!=169;++id){\n if( push(id) >= fold(id) ){\n result(id) = 1.0;\n }\n }\n return result;\n }\n inline Eigen::VectorXd clamp(Eigen::VectorXd s){\n for(holdem_class_id idx=0;idx!=169;++idx){\n s(idx) = ( s(idx) < .5 ? .0 : 1. );\n }\n return s;\n }\n\n \n\n \n\n \n struct eval_tree_node;\n\n struct gt_context{\n gt_context(size_t num_players, double eff, double sb, double bb)\n :num_players_{num_players},\n eff_(eff),\n sb_(sb),\n bb_(bb)\n {}\n size_t num_players()const{ return num_players_; }\n double eff()const{ return eff_; }\n double sb()const{ return sb_; }\n double bb()const{ return bb_; }\n\n gt_context& use_game_tree(std::shared_ptr gt){\n gt_ = gt;\n return *this;\n }\n gt_context& use_cache(class_cache const& cc){\n cc_ = &cc;\n return *this;\n }\n \n std::shared_ptr root()const{\n return gt_;\n }\n class_cache const* cc()const{\n return cc_;\n }\n \n friend std::ostream& operator<<(std::ostream& ostr, gt_context const& self){\n ostr << \"eff_ = \" << self.eff_;\n ostr << \", sb_ = \" << self.sb_;\n ostr << \", bb_ = \" << self.bb_;\n return ostr;\n }\n private:\n size_t num_players_;\n double eff_;\n double sb_;\n double bb_;\n\n std::shared_ptr gt_;\n class_cache const* cc_;\n };\n\n double eval_prob_from_key(std::string const& key, Eigen::VectorXd const& vec){\n enum{ Debug = 0 };\n static std::unordered_map reg_alloc = {\n // Player\n { \"\" , 0 }, // 0\n { \"p\" , 1 }, // 1\n { \"f\" , 2 }, // 1\n { \"pp\", 3 }, // 2\n { \"pf\", 4 }, // 2\n { \"fp\", 5 } // 2\n };\n double result = 1.0;\n std::string sub;\n std::stringstream dbg;\n for(size_t idx=0;idx!=key.size();++idx){\n if( reg_alloc.count(sub) == 0 ){\n throw std::domain_error(\"bad\");\n }\n auto reg = reg_alloc[sub];\n switch(key[idx]){\n case 'p':\n case 'P':\n {\n dbg << \"P<\" << reg << \",\" << ( vec[reg] ) << \">\";\n result *= vec[reg];\n sub += 'p';\n break;\n }\n case 'f':\n case 'F':\n {\n dbg << \"F<\" << reg << \",\" << ( 1 - vec[reg] )<<\">\";\n result *= ( 1 - vec[reg] );\n sub += 'f';\n break;\n }}\n }\n if( Debug ) std::cout << dbg.str() << \"\\n\";\n return result;\n }\n void __eval_prob_from_key_test(){\n Eigen::VectorXd v(6);\n double a = 0.2; // \n double b = 0.3; // p\n double c = 0.5; // f\n double d = 0.7; // pp\n double e = 0.11; // pf\n double f = 0.13; // fp\n \n double A = ( 1.0 - a);\n double B = ( 1.0 - b);\n double C = ( 1.0 - c);\n double D = ( 1.0 - d);\n double E = ( 1.0 - e);\n double F = ( 1.0 - f);\n\n v[0] = a;\n v[1] = b;\n v[2] = c;\n v[3] = d;\n v[4] = e;\n v[5] = f;\n\n auto check = [](std::string const& expr_s, auto expr,\n std::string const& exp_s, auto exp){\n double epsilon = 1e-3;\n if( ! (std::fabs(expr - exp) < epsilon ) ){\n std::stringstream sstr;\n sstr << std::fixed;\n sstr << expr_s << \"=\" << expr << \", \";\n sstr << exp_s << \"=\" << exp;\n throw std::domain_error(sstr.str());\n }\n };\n #define check(expr,exp) check(#expr, expr, #exp, exp)\n check( eval_prob_from_key(\"p\" , v), a );\n check( eval_prob_from_key(\"f\" , v), A );\n check( eval_prob_from_key(\"pp\" , v), a*b );\n check( eval_prob_from_key(\"pf\" , v), a*B );\n check( eval_prob_from_key(\"fp\" , v), A*c );\n check( eval_prob_from_key(\"ff\" , v), A*C );\n \n check( eval_prob_from_key(\"ppp\" , v), a*b*d );\n check( eval_prob_from_key(\"pfp\" , v), a*B*e );\n check( eval_prob_from_key(\"fpp\" , v), A*c*f );\n //check( eval_prob_from_key(\"ffp\" , v), A*C );\n check( eval_prob_from_key(\"ppf\" , v), a*b*D );\n check( eval_prob_from_key(\"pff\" , v), a*B*E );\n check( eval_prob_from_key(\"fpf\" , v), A*c*F );\n //check( eval_prob_from_key(\"fff\" , v), A*C );\n\n \n #undef check\n\n }\n static int __eval_prob_from_key_test_mem = ( __eval_prob_from_key_test(), 0 );\n\n\n\n struct eval_tree_node{\n /*\n \\param[out] out A vector of size ctx.num_players(), which will be \n used to hold the probability weight result\n \n \\param[in] ctx A gt_context for sb,bb,eff etc\n\n \\param[in] vec A holdem class vector, representing the combination\n of hand deals between the players. For any N-player\n game, any hand deal can be represented of some \n N-tuple of holdem class ids.\n\n \\param[in] s A probability realization of the strategy vector.\n This is equivalent to\n P(c=c0)P(c=c1|x)P(c=c2|xx)...,\n ie each \n */\n virtual void evaluate(Eigen::VectorXd& out,\n gt_context const& ctx,\n holdem_class_vector const& vec,\n Eigen::VectorXd const& s)=0;\n virtual void display(std::ostream& ostr = std::cout)const=0;\n };\n struct eval_tree_node_static : eval_tree_node{\n explicit eval_tree_node_static(std::string const& key, Eigen::VectorXd vec):\n key_{key}, vec_{vec}\n {}\n\n virtual void evaluate(Eigen::VectorXd& out,\n gt_context const& ctx,\n holdem_class_vector const& vec,\n Eigen::VectorXd const& s)override\n {\n auto p = eval_prob_from_key(key_, s);\n //std::cout << \"--\" << key_ << \" => \" << p << \"\\n\";\n for(size_t idx=0;idx!=vec_.size();++idx){\n out[idx] += vec_[idx] * p;\n }\n //out += vec_ * p;\n out[vec.size()] += p;\n }\n virtual void display(std::ostream& ostr = std::cout)const override{\n ostr << \"Static{\" << key_\n << \", \" << vector_to_string(vec_) << \"\\n\";\n }\n private:\n std::string key_;\n Eigen::VectorXd vec_;\n };\n\n #if 0\n struct eval_tree_node_eval : eval_tree_node{\n template\n explicit\n eval_tree_node_eval(Args&&...)\n {\n v_mask_.resize(2);\n v_mask_.fill(1);\n }\n virtual void evaluate(Eigen::VectorXd& out,\n double p,\n gt_context const& ctx,\n holdem_class_vector const& vec,\n Eigen::VectorXd const& s)override\n {\n auto q = factor(s);\n p *= q;\n if( std::fabs(p) < 0.001 )\n return;\n\n auto ev = ctx.cc()->LookupVector(vec);\n\n auto equity_vec = ( v_mask_.size() * ev - v_mask_ ) * ctx.eff() * p;\n\n out += equity_vec;\n out[vec.size()] += p;\n }\n virtual void display(std::ostream& ostr = std::cout)const override{\n ostr << \"Eval{\" << vector_to_string(v_mask_) << \"\\n\";\n }\n private:\n Eigen::VectorXd v_mask_;\n };\n #endif\n\n struct eval_tree_node_eval : eval_tree_node{\n enum{ Debug = 0 };\n explicit\n eval_tree_node_eval(std::string const& key,\n std::vector perm,\n Eigen::VectorXd const& dead_money,\n Eigen::VectorXd const& active)\n :key_(key), perm_{perm}, dead_money_{dead_money}, active_{active}\n ,pot_amt_{active_.sum() + dead_money_.sum()}\n {\n\n delta_proto_.resize(dead_money_.size()+1);\n delta_proto_.fill(0);\n for(size_t idx=0;idx!=active_.size();++idx){\n delta_proto_[idx] -= active_[idx];\n delta_proto_[idx] -= dead_money_[idx];\n }\n \n if( Debug ){\n std::cout << \"perm => \" << detail::to_string(perm) << \"\\n\"; // __CandyPrint__(cxx-print-scalar,perm)\n std::cout << \"dead_money_ => \" << vector_to_string(dead_money_) << \"\\n\"; // __CandyPrint__(cxx-print-scalar,dead_money_)\n std::cout << \"active_ => \" << vector_to_string(active_) << \"\\n\"; // __CandyPrint__(cxx-print-scalar,active_)\n std::cout << \"delta_proto_ => \" << vector_to_string(delta_proto_) << \"\\n\"; // __CandyPrint__(cxx-print-scalar,delta_proto_)\n std::cout << \"pot_amt_ => \" << pot_amt_ << \"\\n\"; // __CandyPrint__(cxx-print-scalar,pot_amt_)\n }\n\n }\n virtual void evaluate(Eigen::VectorXd& out,\n gt_context const& ctx,\n holdem_class_vector const& cv,\n Eigen::VectorXd const& s)override\n {\n auto p = eval_prob_from_key(key_, s);\n\n //std::cout << \"--\" << key_ << \" => \" << p << \"\\n\";\n\n // short circuit for optimization purposes\n if( std::fabs(p) < 0.001 )\n return;\n\n holdem_class_vector tmp;\n for(auto _ : perm_ ){\n tmp.push_back(cv[_]);\n }\n \n auto ev = ctx.cc()->LookupVector(tmp);\n \n\n Eigen::VectorXd delta = delta_proto_;\n\n size_t ev_idx = 0;\n for( auto _ :perm_ ){\n delta[_] += pot_amt_ * ev[ev_idx];\n ++ev_idx;\n }\n\n //std::cout << \"key=\" << key_ << \",p\" << p << \", cv=\" << cv << \", delta=\" << vector_to_string(delta) << \"\\n\"; // __CandyPrint__(cxx-print-scalar,vector_to_string(delta))\n delta *= p;\n\n\n out += delta;\n // for checking\n out[ctx.num_players()] += p;\n }\n virtual void display(std::ostream& ostr = std::cout)const override{\n ostr << \"Eval{\" << key_ \n << \", \" << detail::to_string(perm_)\n << \", \" << vector_to_string(dead_money_)\n << \", \" << vector_to_string(active_) \n << \"}\\n\";\n }\n private:\n std::string key_;\n std::vector perm_;\n Eigen::VectorXd dead_money_;\n Eigen::VectorXd active_;\n Eigen::VectorXd delta_proto_;\n double pot_amt_;\n };\n\n struct eval_tree_non_terminal\n : public eval_tree_node\n , public std::vector >\n {\n virtual void evaluate(Eigen::VectorXd& out,\n gt_context const& ctx,\n holdem_class_vector const& vec,\n Eigen::VectorXd const& s)override\n {\n //std::cout << \"Begin{}\\n\";\n for(auto& ptr : *this){\n ptr->evaluate(out, ctx, vec, s);\n }\n //std::cout << \"End{}\\n\";\n }\n virtual void display(std::ostream& ostr = std::cout)const override{\n ostr << \"Begin{}\\n\";\n for(auto const& ptr : *this){\n ptr->display(ostr);\n }\n ostr << \"End{}\\n\";\n }\n };\n \n struct hu_eval_tree_flat : eval_tree_non_terminal{\n explicit\n hu_eval_tree_flat( gt_context const& ctx){\n Eigen::VectorXd v_f_{2};\n v_f_(0) = -ctx.sb();\n v_f_(1) = ctx.sb();\n auto n_f_ = std::make_shared(\"f\", v_f_);\n push_back(n_f_);\n\n Eigen::VectorXd v_pf{2};\n v_pf(0) = ctx.bb();\n v_pf(1) = -ctx.bb();\n auto n_pf = std::make_shared(\"pf\", v_pf);\n push_back(n_pf);\n\n Eigen::VectorXd dead_money = Eigen::VectorXd::Zero(2);\n Eigen::VectorXd active{2};\n active[0] = ctx.eff();\n active[1] = ctx.eff();\n auto n_pp = std::make_shared(\"pp\", std::vector{0,1}, dead_money, active);\n //auto n_pp = std::make_shared();\n push_back(n_pp);\n }\n };\n\n #if 0\n struct hu_eval_tree : eval_tree_non_terminal{\n explicit\n hu_eval_tree( gt_context const& ctx){\n\n\n Eigen::VectorXd v_f_{2};\n v_f_(0) = -ctx.sb();\n v_f_(1) = ctx.sb();\n auto n_f_ = std::make_shared(v_f_);\n n_f_->not_times(0);\n push_back(n_f_);\n\n auto n_p_ = std::make_shared();\n n_p_->times(0);\n\n Eigen::VectorXd v_pf{2};\n v_pf(0) = ctx.bb();\n v_pf(1) = -ctx.bb();\n auto n_pf = std::make_shared(v_pf);\n n_pf->not_times(1);\n n_p_->push_back(n_pf);\n\n Eigen::VectorXd dead_money = Eigen::VectorXd::Zero(2);\n Eigen::VectorXd active{2};\n active[0] = ctx.eff();\n active[1] = ctx.eff();\n auto n_pp = std::make_shared(std::vector{0,1}, dead_money, active);\n //auto n_pp = std::make_shared();\n n_pp->times(1);\n n_p_->push_back(n_pp);\n\n push_back(n_p_);\n\n }\n };\n #endif\n\n struct three_way_eval_tree : eval_tree_non_terminal{\n explicit\n three_way_eval_tree( gt_context const& ctx){\n\n // p p p \n\n size_t num_players = 3;\n \n\n Eigen::VectorXd stacks{num_players};\n for(size_t idx=0;idx!=num_players;++idx){\n stacks[idx] = ctx.eff();\n }\n\n\n Eigen::VectorXd v_blinds{num_players};\n v_blinds.fill(0.0);\n v_blinds[1] = ctx.sb();\n v_blinds[2] = ctx.bb();\n\n auto make_static = [&](std::string const& key, size_t target){\n Eigen::VectorXd sv = -v_blinds;\n sv[target] += v_blinds.sum();\n auto ptr = std::make_shared(key, sv);\n return ptr;\n };\n\n for(unsigned long long mask = ( 1 << num_players ); mask != 0;){\n --mask;\n std::bitset<32> bs = {mask};\n\n\n std::string key;\n std::vector perm;\n Eigen::VectorXd dead_money = Eigen::VectorXd::Zero(3);\n Eigen::VectorXd active = Eigen::VectorXd::Zero(3);\n\n for(size_t idx=0;idx!= num_players;++idx){\n if( bs.test(idx) ){\n active[idx] = stacks[idx];\n perm.push_back(idx);\n key += \"p\";\n } else{\n dead_money[idx] = v_blinds[idx];\n key += \"f\";\n }\n }\n\n if( bs.count() == 0 )\n continue;\n if( bs.count() == 1 && bs.test(num_players-1) ){\n // walk\n std::string degenerate_key(num_players-1, 'f');\n auto walk = make_static(degenerate_key, num_players-1);\n push_back(walk);\n } else if( bs.count() == 1 ){\n // steal \n auto steal = make_static(key, perm[0]);\n push_back(steal);\n } else { \n // push call\n\n auto allin = std::make_shared(key, perm, dead_money, active);\n push_back(allin);\n }\n \n }\n }\n };\n \n\n // returns a vector each players hand value\n Eigen::VectorXd combination_value(gt_context const& ctx,\n holdem_class_vector const& vec,\n Eigen::VectorXd const& s){\n Eigen::VectorXd result{vec.size()+1};\n result.fill(.0);\n ctx.root()->evaluate(result, ctx, vec,s);\n //std::cout << \"result[vec.size()] => \" << result[vec.size()] << \"\\n\"; // __CandyPrint__(cxx-print-scalar,result[vec.size()])\n\n return result;\n }\n\n /*\n \\param[in] idx The index of the strategy vector, for example\n for hu index 0 is for sb to push, whilst index\n 1 is for bb to call a push (given the action p),\n ie \n Index |Player| key | Given | P\n ------+------+-------+-------+------\n 0 | 0 | p | | P(p)\n 1 | 1 | pp | p | P(p|p)\n\n For three player this canonical mapping doesn't\n apply, we have\n \n Index |Player| Key | Given | P\n ------+------+-------+-------+------\n 0 | 0 | p | | P(p)\n 1 | 1 | pp | p | P(p|p)\n 2 | 1 | fp | f | P(p|f)\n 3 | 2 | ppp | pp | P(p|pp)\n 4 | 2 | pfp | pf | P(p|pf)\n 5 | 2 | fpp | fp | P(p|fp)\n */\n Eigen::VectorXd unilateral_detail(gt_context const& ctx,\n size_t idx,\n std::vector const& S)\n {\n Eigen::VectorXd result(169);\n result.fill(.0);\n \n Eigen::VectorXd s(S.size());\n\n auto player_idx = [](auto id){\n switch(id){\n case 0:\n return 0;\n case 1:\n case 2:\n return 1;\n case 3:\n case 4:\n case 5:\n return 2;\n }\n PS_UNREACHABLE();\n };\n\n if( ctx.num_players() == 3 ){\n for(auto const& _ : *Memory_ThreePlayerClassVector){\n auto const& cv = _.cv;\n // create a view of the vector, nothing fancy\n //\n // The strategy vector is of size 2,6,etc, each a vector of size 169\n // for a realization, we want to take \n for(size_t j=0;j!=s.size();++j){\n s[j] = S[j][cv[player_idx(j)]];\n }\n auto meta_result = combination_value(ctx, cv, s);\n result(cv[player_idx(idx)]) += _.prob * meta_result[player_idx(idx)];\n }\n } else {\n for(holdem_class_perm_iterator iter(ctx.num_players()),end;iter!=end;++iter){\n\n auto const& cv = *iter;\n auto p = cv.prob();\n // create a view of the vector, nothing fancy\n for(size_t idx=0;idx!=s.size();++idx){\n s[idx] = S[idx][cv[idx]];\n }\n auto meta_result = combination_value(ctx, cv, s);\n result(cv[idx]) += p * meta_result[idx];\n }\n }\n\n return result;\n }\n \n Eigen::VectorXd unilateral_maximal_exploitable(gt_context const& ctx, size_t idx, std::vector const& S)\n {\n\n enum{ Dp = 4 };\n enum{ Debug = 0};\n static Eigen::VectorXd fold_s = Eigen::VectorXd::Zero(169);\n static Eigen::VectorXd push_s = Eigen::VectorXd::Ones(169);\n if(Debug) std::cout << \"============== idx = \" << idx << \" =====================\\n\";\n auto copy = S;\n copy[idx] = push_s;\n auto push = unilateral_detail(ctx, idx, copy);\n if(Debug) pretty_print_strat(push, Dp);\n\n copy[idx] = fold_s;\n auto fold = unilateral_detail(ctx, idx, copy);\n if(Debug) pretty_print_strat(fold, Dp);\n\n return choose_push_fold(push, fold);\n }\n\n\n\n\n\n \n\n struct solver{\n virtual std::vector step(gt_context const& ctx,\n std::vector const& state)=0;\n };\n struct maximal_exploitable_solver_uniform : solver{\n explicit maximal_exploitable_solver_uniform(double factor = 0.05):factor_{factor}{}\n virtual std::vector step(gt_context const& ctx,\n std::vector const& state)override\n {\n std::vector result(state.size());\n \n //for(size_t idx=0;idx!=state.size();++idx){\n for(size_t idx=state.size();idx!=0;){\n --idx;\n\n auto counter = unilateral_maximal_exploitable(ctx,idx, state);\n result[idx] = state[idx] * ( 1.0 - factor_ ) + counter * factor_;\n }\n return result;\n }\n private:\n double factor_;\n };\n struct maximal_exploitable_solver_uniform_mt : solver{\n explicit maximal_exploitable_solver_uniform_mt(double factor = 0.05):factor_{factor}{}\n virtual std::vector step(gt_context const& ctx,\n std::vector const& state)override\n {\n boost::timer::auto_cpu_timer at;\n using result_t = std::future >;\n std::vector tmp;\n for(size_t idx=0;idx!=state.size();++idx){\n auto fut = std::async(std::launch::async, [idx,&ctx,&state,this](){\n return std::make_tuple(idx,unilateral_maximal_exploitable(ctx,idx, state));\n });\n tmp.emplace_back(std::move(fut));\n }\n std::cout << \"tmp.size() => \" << tmp.size() << \"\\n\"; // __CandyPrint__(cxx-print-scalar,tmp.size())\n std::vector result(state.size());\n for(auto& _ : tmp){\n auto ret = _.get();\n auto idx = std::get<0>(ret);\n auto const& counter = std::get<1>(ret);\n result[idx] = state[idx] * ( 1.0 - factor_ ) + counter * factor_;\n }\n return result;\n }\n private:\n double factor_;\n };\n\n\n struct cond_single_strategy_lp1{\n using state_t = std::vector;\n cond_single_strategy_lp1(size_t idx, double epsilon)\n :idx_(idx),\n epsilon_(epsilon)\n {}\n bool operator()(state_t const& from, state_t const& to)const{\n auto d = from[idx_] - to[idx_];\n auto norm = d.lpNorm<1>();\n auto cond = ( norm < epsilon_ );\n std::cout << \"norm => \" << norm << \"\\n\"; // __CandyPrint__(cxx-print-scalar,norm)\n return cond;\n }\n private:\n size_t idx_;\n double epsilon_;\n };\n\n struct make_solver{\n \n enum{ DefaultMaxIter = 400 };\n\n using state_t = std::vector;\n using step_observer_t = std::function;\n using stoppage_condition_t = std::function;\n\n explicit make_solver(gt_context const& ctx){\n ctx_ = &ctx;\n }\n make_solver& use_solver(std::shared_ptr s){\n solver_ = s;\n return *this;\n }\n make_solver& max_steps(size_t n){\n max_steps_ = n;\n return *this;\n }\n make_solver& init_state(std::vector const& s0){\n state0_ = s0;\n return *this;\n }\n make_solver& observer(step_observer_t obs){\n obs_.push_back(obs);\n return *this;\n }\n make_solver& stoppage_condition(stoppage_condition_t cond){\n stop_cond_ = cond;\n return *this;\n }\n std::vector run(){\n\n BOOST_ASSERT(ctx_ );\n BOOST_ASSERT(solver_ );\n BOOST_ASSERT(state0_.size() );\n BOOST_ASSERT( stop_cond_ );\n\n std::vector state = state0_;\n for(auto& _ : obs_){\n _(state);\n }\n\n for(size_t idx=0;idxstep(*ctx_, state);\n\n if( stop_cond_(state, next) ){\n state[0] = clamp(state[0]);\n state[1] = clamp(state[1]);\n return state;\n }\n state = next;\n for(auto& _ : obs_){\n _(state);\n }\n\n }\n\n std::vector result;\n result.push_back(Eigen::VectorXd::Zero(169));\n result.push_back(Eigen::VectorXd::Zero(169));\n\n BOOST_LOG_TRIVIAL(warning) << \"Failed to converge solve ctx = \" << *ctx_;\n return result;\n }\n private:\n gt_context const* ctx_;\n std::shared_ptr solver_;\n std::vector state0_;\n std::vector obs_;\n stoppage_condition_t stop_cond_;\n size_t max_steps_{DefaultMaxIter};\n };\n\n\n\n\n\n\nstruct HeadUpSolverCmd : Command{\n explicit\n HeadUpSolverCmd(std::vector const& args):args_{args}{}\n virtual int Execute()override{\n class_cache cc;\n\t\n std::string cache_name{\".cc.bin\"};\n try{\n cc.load(cache_name);\n }catch(std::exception const& e){\n std::cerr << \"Failed to load (\" << e.what() << \")\\n\";\n throw;\n }\n\n\n size_t num_players = 2;\n\n // create a vector of num_players of zero vectors\n std::vector state0( ( num_players == 2 ? 2 : 6 ) , Eigen::VectorXd::Zero(169));\n for(auto& _ : state0){\n _.fill(0.5);\n }\n \n\n using result_t = std::future > >;\n std::vector tmp;\n\n gt_context gtctx(num_players, 10, .5, 1.);\n\n #if 0\n hu_eval_tree{gtctx}.display();\n hu_eval_tree_flat{gtctx}.display();\n three_way_eval_tree{gtctx}.display();\n #endif\n\n\n auto solve = [&](auto num_players, auto eff){\n gt_context gtctx(num_players, eff, .5, 1.);\n std::shared_ptr gt;\n switch(num_players){\n case 2:\n gt = std::make_shared(gtctx);\n break;\n case 3:\n gt = std::make_shared(gtctx);\n break;\n default:\n BOOST_THROW_EXCEPTION(std::domain_error(\"unsupported\"));\n }\n gt->display();\n gtctx.use_game_tree(gt);\n\n gtctx.use_cache(cc);\n auto result = make_solver(gtctx)\n .use_solver(std::make_shared())\n .stoppage_condition(cond_single_strategy_lp1(0, 0.1))\n .init_state(state0)\n #if 1\n .observer([](auto const& vec){\n static std::vector v = { \"\" , \"p\" , \"f\" , \"pp\", \"pf\", \"fp\" };\n for(size_t idx=0;idx!=vec.size();++idx){\n std::cout << \"--------\" << v[idx] << \"-------------\\n\";\n pretty_print_strat(vec[idx], 2);\n }\n })\n #endif\n .run();\n return result;\n };\n\n auto enque = [&](double eff){\n tmp.push_back(std::async([&,num_players, eff](){\n auto result = solve(num_players, eff);\n return std::make_tuple(eff, result);\n }));\n };\n #if 0\n for(double eff = 5.0;eff <= 50.0;eff+=1){\n enque(eff);\n }\n #else\n enque(10);\n #endif\n\n #if 1\n Eigen::VectorXd s0(169);\n s0.fill(.0);\n Eigen::VectorXd s1(169);\n s1.fill(.0);\n for(auto& _ : tmp){\n auto aux = _.get();\n auto eff = std::get<0>(aux);\n auto const& vec = std::get<1>(aux);\n for(size_t idx=0;idx!=169;++idx){\n s0(idx) = std::max(s0(idx), eff*vec[0](idx));\n s1(idx) = std::max(s1(idx), eff*vec[1](idx));\n }\n }\n \n pretty_print_strat(s0, 1);\n pretty_print_strat(s1, 1);\n\n\n\n\n #if 0\n auto order_cards = [](auto const& strat){\n struct HandAux{\n HandAux(size_t id_, double level_)\n :id(id_),\n level(level_),\n decl{&holdem_hand_decl::get(id)}\n {}\n size_t id;\n double level;\n holdem_hand_decl const* decl;\n double cum_{.0};\n };\n std::vector aux;\n for(size_t idx=0;idx!=strat.size();++idx){\n aux.emplace_back(idx, strat[idx]);\n }\n // first sort by level\n std::sort( aux.begin(), aux.end(), [](auto const& l, auto const& r){\n return l.level > r.level;\n });\n holdem_hand_vector result;\n for(auto const& _ : aux){\n result.push_back(_.id);\n }\n \n\n return result;\n };\n #endif\n\n\n #endif\n\n #if 0\n for(auto& _ : tmp){\n auto aux = _.get();\n auto const& vec = std::get<1>(aux);\n for(auto const& s : vec ){\n pretty_print_strat(s, 2);\n }\n }\n #endif\n\n\n\n return EXIT_SUCCESS;\n }\nprivate:\n std::vector const& args_;\n};\nstatic TrivialCommandDecl HeadsUpSolverCmdDecl{\"heads-up-solver\"};\n \n} // end namespace ps\n", "meta": {"hexsha": "71364aa8d61dee31b88585e37a05fc853f3caadb", "size": 43683, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Trash/cmd_heads_up_solver.cpp", "max_stars_repo_name": "sweeterthancandy/CandyPoker", "max_stars_repo_head_hexsha": "53dfcc92402492739a2300847aeb298d389d546a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2019-10-31T12:57:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T10:41:18.000Z", "max_issues_repo_path": "Trash/cmd_heads_up_solver.cpp", "max_issues_repo_name": "sweeterthancandy/CandyPoker", "max_issues_repo_head_hexsha": "53dfcc92402492739a2300847aeb298d389d546a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trash/cmd_heads_up_solver.cpp", "max_forks_repo_name": "sweeterthancandy/CandyPoker", "max_forks_repo_head_hexsha": "53dfcc92402492739a2300847aeb298d389d546a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-04-01T06:05:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T06:05:52.000Z", "avg_line_length": 41.2492917847, "max_line_length": 193, "alphanum_fraction": 0.3745164023, "num_tokens": 8350, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832206876841, "lm_q2_score": 0.37022539954425293, "lm_q1q2_score": 0.19954527822674611}} {"text": "//\n// Created by dragos on 31.10.2018.\n//\n\n#include \"cegis.h\"\n#include \"cfg_algos.h\"\n#include \n#include \n#include \n#include \n\nstd::pair cegis::deduce(z3::solver &slv) {\n const auto &asserts = slv.assertions();\n return {false, sigma.ctx().bool_val(true)};\n}\n\nz3::expr cegis::run() {\n auto occs = occurences(sigma);\n auto typed = typed_occurences(sigma);\n std::set non_consts;\n for (const auto &p : occs) {\n if (p.first.find(\"pre_call\") == std::string::npos) {\n non_consts.emplace(p.first);\n }\n }\n z3::solver slv(sigma.ctx());\n auto nsigma = !sigma;\n auto &ctx = sigma.ctx();\n auto expand_bools = sigma;\n while (true) {\n if (slv.check() != z3::check_result::sat) {\n return sigma.ctx().bool_val(false);\n }\n slv.push();\n slv.add(nsigma);\n auto cr2 = slv.check();\n if (cr2 == z3::check_result::sat) {\n auto cex = slv.get_model();\n slv.pop();\n auto deduced = deduce(slv);\n auto res = deduced.first;\n auto C = deduced.second;\n if (res) {\n return C;\n } else {\n auto sz = cex.size();\n z3::expr_vector rep(ctx), with(ctx);\n std::set found;\n for (unsigned i = 0; i != sz; ++i) {\n auto fd = cex.get_const_decl(i);\n auto v = cex.get_const_interp(fd);\n if (fd.name().str().find(\"pre_call\") == std::string::npos) {\n found.emplace(fd.name().str());\n // means input variable\n rep.push_back(fd());\n with.push_back(v);\n }\n }\n std::set unfound;\n std::set_difference(non_consts.begin(), non_consts.end(), found.begin(),\n found.end(), std::inserter(unfound, unfound.end()));\n for (const auto &uf : unfound) {\n auto tp = typed.find(uf)->second;\n rep.push_back(ctx.constant(uf.c_str(), tp));\n if (tp.is_bool()) {\n with.push_back(ctx.bool_val(true));\n } else {\n with.push_back(ctx.num_val(0, tp));\n }\n }\n slv.add(sigma.substitute(rep, with).simplify());\n }\n } else {\n slv.pop();\n return z3::mk_and(slv.assertions());\n }\n }\n}\n\nz3::expr cegis::replace_constants(const z3::expr &old) {\n if (old.is_app()) {\n if (old.decl().decl_kind() == Z3_OP_FALSE ||\n old.decl().decl_kind() == Z3_OP_TRUE) {\n return ctx().bool_const(next().c_str());\n }\n if (old.decl().decl_kind() == Z3_OP_DT_CONSTRUCTOR) {\n return ctx().constant(next().c_str(), old.get_sort());\n }\n if (old.is_const()) {\n if (old.decl().decl_kind() == Z3_OP_UNINTERPRETED) {\n return old;\n } else if (old.decl().decl_kind() == Z3_OP_BNUM) {\n return ctx().constant(next().c_str(), old.get_sort());\n }\n }\n auto num = old.num_args();\n z3::expr_vector quoi(ctx());\n for (unsigned i = 0; i != num; ++i) {\n auto arg = old.arg(i);\n quoi.push_back(replace_constants(arg));\n }\n return old.decl()(quoi);\n } else if (old.is_numeral()) {\n return ctx().constant(next().c_str(), old.get_sort());\n }\n\n return old;\n}\n\nstd::unordered_map>\nunconstrained(const z3::expr &e, const std::map &occs) {\n std::unordered_map> ucterms;\n std::function &)> lam;\n lam = [&](const z3::expr &e, std::vector &bounds) -> void {\n if (e.is_app()) {\n if (e.is_const() && e.decl().decl_kind() == Z3_OP_UNINTERPRETED)\n return;\n if (e.decl().decl_kind() == Z3_OP_DT_CONSTRUCTOR)\n return;\n if (e.decl().decl_kind() == Z3_OP_BNUM)\n return;\n std::vector vars;\n std::vector nonvars;\n for (unsigned i = 0; i != e.num_args(); ++i) {\n if ((e.arg(i).is_const() &&\n e.arg(i).decl().decl_kind() == Z3_OP_UNINTERPRETED) ||\n e.arg(i).is_var()) {\n vars.push_back(e.arg(i));\n } else {\n nonvars.push_back(e.arg(i));\n }\n }\n bool uc = true;\n std::unordered_set involved;\n for (auto &v : vars) {\n std::string declname;\n if (v.is_const()) {\n declname = v.decl().name().str();\n } else if (v.is_var()) {\n declname = bounds[bounds.size() - 1 - Z3_get_index_value(v.ctx(), v)];\n }\n auto I = occs.find(declname);\n if (I == occs.end() || I->second != 1) {\n uc = false;\n } else {\n involved.emplace(declname);\n }\n }\n if (e.is_eq() || e.decl().decl_kind() == Z3_OP_BADD) {\n if (involved.size() == 1) {\n ucterms[e] = std::move(involved);\n } else {\n for (auto &ex : nonvars) {\n lam(ex, bounds);\n }\n }\n } else {\n if (nonvars.empty()) {\n if (uc) {\n ucterms[e] = std::move(involved);\n }\n } else {\n for (auto &ex : nonvars) {\n lam(ex, bounds);\n }\n }\n }\n } else if (e.is_quantifier()) {\n auto nrs = Z3_get_quantifier_num_bound(e.ctx(), e);\n for (unsigned i = 0; i != nrs; ++i) {\n z3::symbol nm(e.ctx(), Z3_get_quantifier_bound_name(e.ctx(), e, i));\n bounds.push_back(nm.str());\n }\n lam(e.body(), bounds);\n for (unsigned i = 0; i != nrs; ++i)\n bounds.pop_back();\n } else if (e.is_var()) {\n return;\n }\n };\n std::vector bounds;\n lam(e, bounds);\n return ucterms;\n};\n\nz3::expr remove_unconstrained(const z3::expr &expr,\n std::map &occurs) {\n if (!expr.is_quantifier())\n return expr;\n auto nr = Z3_get_quantifier_num_bound(expr.ctx(), expr);\n std::vector sorts;\n std::vector names;\n std::unordered_set remove;\n for (unsigned i = 0; i != nr; ++i) {\n auto qname = Z3_get_quantifier_bound_name(expr.ctx(), expr, i);\n z3::symbol symbol(expr.ctx(), qname);\n names.push_back(symbol.str());\n auto qsort = Z3_get_quantifier_bound_sort(expr.ctx(), expr, i);\n z3::sort srt(expr.ctx(), qsort);\n sorts.push_back(srt);\n }\n z3::expr_vector subs(expr.ctx());\n for (unsigned i = 0; i != nr; ++i) {\n subs.push_back(\n expr.ctx().constant(names[nr - 1 - i].c_str(), sorts[nr - 1 - i]));\n }\n auto qfexpr = expr;\n qfexpr = qfexpr.body().substitute(subs);\n for (unsigned i = 0; i != nr; ++i) {\n subs[i] = expr.ctx().constant(names[i].c_str(), sorts[i]);\n }\n\n BUG_CHECK(!qfexpr.is_quantifier(),\n \"quantifier found even though not expecting one %1%\", qfexpr);\n\n auto crt = qfexpr;\n cegis c(expr);\n bool change = true;\n auto crtoccurences = occurs;\n std::unordered_set interesting;\n std::transform(\n occurs.begin(), occurs.end(),\n std::inserter(interesting, interesting.begin()),\n [](const std::pair &x) { return x.first; });\n do {\n LOG4(\"step: \" << crt);\n change = false;\n auto ucd = unconstrained(crt, crtoccurences);\n for (auto &uc : ucd) {\n LOG4(\"uc: \" << uc.first << Z3_get_ast_id(uc.first.ctx(), uc.first));\n auto fresh = z3::expr(expr.ctx(), Z3_mk_fresh_const(expr.ctx(), \"v__\",\n uc.first.get_sort()));\n auto fname = fresh.decl().name().str();\n change = true;\n z3::expr_vector src(uc.first.ctx());\n src.push_back(uc.first);\n z3::expr_vector dst(uc.first.ctx());\n dst.push_back(fresh);\n crt = crt.substitute(src, dst).simplify();\n LOG4(\"after substitution:\" << crt << \" \");\n interesting.emplace(fname);\n remove.insert(uc.second.begin(), uc.second.end());\n for (auto &rm : uc.second)\n LOG4(\"removing \" << rm);\n names.push_back(fname);\n sorts.push_back(fresh.get_sort());\n subs.push_back(fresh);\n }\n if (change) {\n auto nocs = c.occurences(crt);\n crtoccurences.clear();\n for (auto &n : nocs) {\n if (interesting.count(n.first))\n crtoccurences.emplace(n);\n }\n }\n } while (change);\n\n z3::expr_vector qs(expr.ctx());\n for (unsigned i = 0; i != names.size(); ++i) {\n if (!remove.count(names[i])) {\n qs.push_back(subs[i]);\n }\n }\n return z3::forall(qs, crt);\n}\n\nstd::map occurences(const z3::expr &expr,\n std::vector &bounds) {\n if (expr.is_const()) {\n if (expr.decl().decl_kind() == Z3_OP_UNINTERPRETED) {\n return {{expr.decl().name().str(), 1}};\n }\n return {};\n } else if (expr.is_var()) {\n auto idx = Z3_get_index_value(expr.ctx(), expr);\n return {{bounds[bounds.size() - idx - 1], 1}};\n }\n std::map res;\n if (expr.is_app()) {\n auto num = expr.num_args();\n for (unsigned i = 0; i != num; ++i) {\n auto arg = expr.arg(i);\n auto evd = occurences(arg, bounds);\n if (res.empty()) {\n res = std::move(evd);\n } else {\n for (const auto &pr : evd) {\n auto rib = res.emplace(pr);\n if (!rib.second) {\n rib.first->second += pr.second;\n }\n }\n }\n }\n } else if (expr.is_quantifier()) {\n auto body = expr.body();\n auto nrs = Z3_get_quantifier_num_bound(expr.ctx(), expr);\n for (unsigned i = 0; i != nrs; ++i) {\n z3::symbol nm(expr.ctx(),\n Z3_get_quantifier_bound_name(expr.ctx(), expr, i));\n bounds.push_back(nm.str());\n }\n auto occs = occurences(body, bounds);\n for (unsigned i = 0; i != nrs; ++i)\n bounds.pop_back();\n return std::move(occs);\n }\n return res;\n}\n\nstd::map cegis::occurences(const z3::expr &expr) {\n std::vector bounds;\n return ::occurences(expr, bounds);\n}\n\nstd::map typed_occurences(const z3::expr &expr) {\n if (expr.is_const()) {\n if (expr.decl().decl_kind() == Z3_OP_UNINTERPRETED) {\n return {{expr.decl().name().str(), expr.get_sort()}};\n }\n return {};\n }\n std::map res;\n if (expr.is_app()) {\n auto num = expr.num_args();\n for (unsigned i = 0; i != num; ++i) {\n auto arg = expr.arg(i);\n auto evd = typed_occurences(arg);\n if (res.empty()) {\n res = std::move(evd);\n } else {\n for (const auto &pr : evd) {\n auto rib = res.emplace(pr);\n if (!rib.second) {\n rib.first->second = pr.second;\n }\n }\n }\n }\n }\n return res;\n}\n\nstd::vector cegis::atoms(const z3::expr &expr,\n const std::string &what) {\n if (expr.get_sort().is_bool() && expr.is_app()) {\n auto decl = expr.decl();\n if (decl.decl_kind() == Z3_OP_AND || decl.decl_kind() == Z3_OP_OR ||\n decl.decl_kind() == Z3_OP_NOT || decl.decl_kind() == Z3_OP_IMPLIES) {\n // nothing to do here , will be handled below\n } else {\n auto occs = occurences(expr);\n if (occs.count(what)) {\n return {expr};\n }\n }\n }\n std::vector res;\n if (expr.is_app()) {\n auto num = expr.num_args();\n for (unsigned i = 0; i != num; ++i) {\n auto arg = expr.arg(i);\n auto evd = atoms(arg, what);\n if (res.empty()) {\n res = std::move(evd);\n } else {\n res.insert(res.end(), evd.begin(), evd.end());\n }\n }\n }\n return res;\n}\n\nbool is_unconstrained(const z3::expr &src,\n const std::map &occs) {\n if (src.is_app()) {\n if (src.is_bool()) {\n if (src.is_app() && src.decl().decl_kind() == Z3_OP_TRUE)\n return false;\n if (src.is_app() && src.decl().decl_kind() == Z3_OP_FALSE)\n return false;\n const auto &decl = src.decl();\n if (decl.decl_kind() == Z3_OP_EQ) {\n auto arg1 = src.arg(0);\n auto arg2 = src.arg(1);\n if (is_unconstrained(arg1, occs) || is_unconstrained(arg2, occs)) {\n return true;\n } else {\n if (arg1.is_app() && arg1.decl().decl_kind() == Z3_OP_BAND &&\n arg2.is_app() && arg2.decl().decl_kind() == Z3_OP_BAND) {\n auto arg11 = arg1.arg(0);\n auto arg12 = arg1.arg(1);\n auto arg21 = arg2.arg(0);\n auto arg22 = arg2.arg(1);\n if (z3::eq(arg12, arg22) && (is_unconstrained(arg11, occs) ||\n is_unconstrained(arg21, occs))) {\n return true;\n }\n } else if (arg1.is_app() && arg1.decl().decl_kind() == Z3_OP_BNOT &&\n arg2.is_app() && arg2.decl().decl_kind() == Z3_OP_BNOT) {\n // the bv rewriter does this for some yet undisclosed reasons\n auto arg11 = arg1.arg(0);\n auto arg21 = arg2.arg(0);\n if (arg11.is_app() && arg11.decl().decl_kind() == Z3_OP_BOR &&\n arg21.is_app() && arg21.decl().decl_kind() == Z3_OP_BOR) {\n if (arg11.num_args() == 2 && 2 == arg21.num_args()) {\n auto arg111 = arg11.arg(0);\n auto arg112 = arg11.arg(1);\n auto arg211 = arg21.arg(0);\n auto arg212 = arg21.arg(1);\n if (arg111.is_app() &&\n arg111.decl().decl_kind() == Z3_OP_BNOT &&\n arg112.is_app() &&\n arg112.decl().decl_kind() == Z3_OP_BNOT &&\n arg211.is_app() &&\n arg211.decl().decl_kind() == Z3_OP_BNOT &&\n arg212.is_app() &&\n arg212.decl().decl_kind() == Z3_OP_BNOT) {\n if (z3::eq(arg212.arg(0), arg112.arg(0)) &&\n (is_unconstrained(arg111, occs) ||\n is_unconstrained(arg211, occs))) {\n return true;\n }\n }\n }\n }\n }\n }\n }\n }\n if (src.is_app() && src.decl().decl_kind() == Z3_OP_BNUM)\n return false;\n if (src.is_const() && src.decl().decl_kind() == Z3_OP_UNINTERPRETED)\n return occs.find(src.decl().name().str())->second == 1;\n auto num = src.num_args();\n bool uncnstrd = true;\n for (unsigned i = 0; i != num; ++i) {\n if (!is_unconstrained(src.arg(i), occs)) {\n uncnstrd = false;\n break;\n }\n }\n if (uncnstrd)\n return true;\n }\n return false;\n}\n\nbool any_occurence(const std::string &ct, const z3::expr &target) {\n if (target.is_app()) {\n if (target.decl().decl_kind() == Z3_OP_TRUE ||\n target.decl().decl_kind() == Z3_OP_FALSE ||\n target.decl().decl_kind() == Z3_OP_BNUM ||\n target.decl().decl_kind() == Z3_OP_DT_CONSTRUCTOR)\n return false;\n if (target.is_const())\n return target.decl().name().str() == ct;\n for (unsigned i = 0; i != target.num_args(); ++i)\n if (any_occurence(ct, target.arg(i)))\n return true;\n }\n return false;\n}\n\nbool any_occurence(const z3::func_decl &ct, const z3::expr &target) {\n return any_occurence(ct.name().str(), target);\n}\n\nz3::expr_vector generate_necessary_abstraction(\n z3::solver *pslv, z3::solver *pdual_slv,\n std::unordered_set &ctrld,\n const std::unordered_set &may_control) {\n z3::context *context = &pslv->ctx();\n auto cr = pslv->check();\n z3::expr_vector control_blocks(*context);\n unsigned int nr_push = 0;\n while (cr == z3::check_result::sat) {\n auto model = pslv->get_model();\n z3::expr_vector assumptions(*context);\n for (const auto &e : ctrld) {\n auto ev = model.eval(e);\n switch (ev.bool_value()) {\n case Z3_L_TRUE:\n assumptions.push_back(e);\n break;\n case Z3_L_FALSE:\n assumptions.push_back(!e);\n default:\n break;\n }\n }\n std::stringstream ss;\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n ss << assumptions[i] << ';';\n }\n LOG4(\"assumptions: \" << ss.str());\n auto dual_res = pdual_slv->check(assumptions);\n switch (dual_res) {\n case z3::check_result::unsat: {\n auto uc = pdual_slv->unsat_core();\n std::stringstream ss;\n for (unsigned i = 0; i != uc.size(); ++i) {\n ss << uc[i] << ';';\n }\n LOG4(\"core: \" << ss.str());\n control_blocks.push_back(!z3::mk_and(uc));\n nr_push++;\n pslv->push();\n pslv->add(!z3::mk_and(uc));\n break;\n }\n case z3::check_result::sat: {\n LOG4(\"can't do with current abstraction, need to refine\");\n bool changed = false;\n pdual_slv->push();\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n pdual_slv->add(assumptions[i]);\n }\n z3::expr_vector old_assumptions(pdual_slv->ctx());\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n old_assumptions.push_back(assumptions[i]);\n }\n assumptions.resize(0);\n for (auto &m : may_control) {\n if (ctrld.find(m) == ctrld.end()) {\n changed = true;\n auto ev = model.eval(m);\n switch (ev.bool_value()) {\n case Z3_L_TRUE:\n assumptions.push_back(m);\n break;\n case Z3_L_FALSE:\n assumptions.push_back(!m);\n default:\n break;\n }\n }\n }\n z3::expr_vector core(pdual_slv->ctx());\n if (!changed) {\n LOG3(\"can't control this behavior\");\n } else {\n auto cr = pdual_slv->check(assumptions);\n if (cr == z3::check_result::sat) {\n core = assumptions;\n LOG3(\"can't control this behavior\");\n } else {\n core = pdual_slv->unsat_core();\n for (unsigned i = 0, e = core.size(); i != e; ++i) {\n auto a = core[i];\n if (a.decl().decl_kind() == Z3_OP_NOT) {\n ctrld.emplace(a.arg(0));\n } else {\n ctrld.emplace(a);\n }\n }\n for (unsigned i = 0, e = old_assumptions.size(); i != e; ++i) {\n core.push_back(old_assumptions[i]);\n }\n control_blocks.push_back(!z3::mk_and(core));\n }\n }\n pdual_slv->pop();\n nr_push++;\n pslv->push();\n pslv->add(!z3::mk_and(core));\n break;\n }\n default:\n BUG(\"???\");\n }\n cr = pslv->check();\n }\n if (nr_push)\n pslv->pop(nr_push);\n return control_blocks;\n}\n\nz3::expr_vector generate_necessary_abstraction_2(\n z3::expr &rho_a, z3::solver *pslv, z3::solver *pdual_slv,\n std::unordered_set &ctrld,\n const std::unordered_set &may_control) {\n z3::context *context = &pslv->ctx();\n z3::solver rho_solver(*context);\n rho_solver.add(rho_a);\n auto cr = pslv->check();\n z3::expr_vector control_blocks(*context);\n unsigned int nr_push = 0;\n while (cr == z3::check_result::sat) {\n auto model = pslv->get_model();\n auto rhoeval = model.eval(rho_a).bool_value();\n if (rhoeval == Z3_L_FALSE) {\n auto ct_sz = model.num_consts();\n z3::expr_vector kill_this(*context);\n for (unsigned i = 0; i != ct_sz; ++i) {\n auto cd = model.get_const_decl(i);\n auto ci = model.get_const_interp(cd);\n kill_this.push_back(cd() == ci);\n }\n BUG_CHECK(rho_solver.check(kill_this) == z3::check_result::unsat,\n \"rho solver should be unsat in this model\");\n nr_push++;\n pslv->push();\n pslv->add(!z3::mk_and(rho_solver.unsat_core()));\n } else {\n z3::expr_vector assumptions(*context);\n for (const auto &e : ctrld) {\n auto ev = model.eval(e);\n switch (ev.bool_value()) {\n case Z3_L_TRUE:\n assumptions.push_back(e);\n break;\n case Z3_L_FALSE:\n assumptions.push_back(!e);\n default:\n break;\n }\n }\n std::stringstream ss;\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n ss << assumptions[i] << ';';\n }\n LOG4(\"assumptions: \" << ss.str());\n auto dual_res = pdual_slv->check(assumptions);\n switch (dual_res) {\n case z3::check_result::unsat: {\n auto uc = pdual_slv->unsat_core();\n std::stringstream ss;\n for (unsigned i = 0; i != uc.size(); ++i) {\n ss << uc[i] << ';';\n }\n LOG4(\"core: \" << ss.str());\n control_blocks.push_back(!z3::mk_and(uc));\n nr_push++;\n pslv->push();\n pslv->add(!z3::mk_and(uc));\n break;\n }\n case z3::check_result::sat: {\n LOG4(\"can't do with current abstraction, need to refine\");\n bool changed = false;\n pdual_slv->push();\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n pdual_slv->add(assumptions[i]);\n }\n z3::expr_vector old_assumptions(pdual_slv->ctx());\n for (unsigned i = 0; i != assumptions.size(); ++i) {\n old_assumptions.push_back(assumptions[i]);\n }\n assumptions.resize(0);\n for (auto &m : may_control) {\n if (ctrld.find(m) == ctrld.end()) {\n changed = true;\n auto ev = model.eval(m);\n switch (ev.bool_value()) {\n case Z3_L_TRUE:\n assumptions.push_back(m);\n break;\n case Z3_L_FALSE:\n assumptions.push_back(!m);\n default:\n break;\n }\n }\n }\n z3::expr_vector core(pdual_slv->ctx());\n if (!changed) {\n LOG3(\"can't control this behavior\");\n } else {\n auto cr = pdual_slv->check(assumptions);\n if (cr == z3::check_result::sat) {\n core = assumptions;\n LOG3(\"can't control this behavior\");\n } else {\n core = pdual_slv->unsat_core();\n for (unsigned i = 0, e = core.size(); i != e; ++i) {\n auto a = core[i];\n if (a.decl().decl_kind() == Z3_OP_NOT) {\n ctrld.emplace(a.arg(0));\n } else {\n ctrld.emplace(a);\n }\n }\n for (unsigned i = 0, e = old_assumptions.size(); i != e; ++i) {\n core.push_back(old_assumptions[i]);\n }\n control_blocks.push_back(!z3::mk_and(core));\n }\n }\n pdual_slv->pop();\n nr_push++;\n pslv->push();\n pslv->add(!z3::mk_and(core));\n } break;\n default:\n BUG(\"???\");\n }\n }\n cr = pslv->check();\n }\n if (nr_push)\n pslv->pop(nr_push);\n return control_blocks;\n}\n\nz3::expr_vector\ngenerate_necessary_abstraction(z3::solver *pslv, z3::solver *pdual_slv,\n std::unordered_set &ctrld) {\n std::unordered_set may_control;\n return generate_necessary_abstraction(pslv, pdual_slv, ctrld, may_control);\n}\n\nz3::expr merge_or(const z3::expr &e1, const z3::expr &e2) {\n if (e1.is_app() && e1.decl().decl_kind() == Z3_OP_AND && e2.is_app() &&\n e2.decl().decl_kind() == Z3_OP_AND) {\n auto nargs1 = e1.num_args();\n auto nargs2 = e2.num_args();\n unsigned int idx = 0;\n z3::expr_vector evec(e1.ctx());\n while (idx < nargs1 && idx < nargs2) {\n auto arg1 = e1.arg(idx);\n auto arg2 = e2.arg(idx);\n if (!z3::eq(arg1, arg2)) {\n break;\n }\n evec.push_back(arg1);\n ++idx;\n }\n z3::expr conj1 = e1.ctx().bool_val(true);\n for (unsigned i = idx; i < nargs1; ++i)\n conj1 = conj1 && e1.arg(i);\n z3::expr conj2 = e2.ctx().bool_val(true);\n for (unsigned i = idx; i < nargs2; ++i)\n conj2 = conj2 && e2.arg(i);\n evec.push_back(conj1 || conj2);\n return z3::mk_and(evec);\n }\n return e1 || e2;\n}\n\ntemplate void toFixPoint(T &obj, Fun f) {\n bool change = true;\n while (change) {\n change = f(obj);\n }\n};\n\nvoid packet_solver(z3::expr_vector &assertions, packet_theory &pt) {\n z3::solver slv(assertions.ctx());\n auto dumpAssertions = [](std::ostream &os, const z3::expr_vector &assertions,\n std::string n) {\n os << n << \":===============\\n\";\n for (unsigned i = 0, nr = assertions.size(); i != nr; ++i) {\n os << assertions[i] << '\\n';\n }\n os << \"===============\";\n };\n auto assertionString = [&dumpAssertions](const z3::expr_vector &assertions,\n std::string n) {\n std::stringstream ss;\n dumpAssertions(ss, assertions, n);\n return ss.str();\n };\n\n auto replacefun = [&](z3::expr_vector &src, z3::expr_vector &dst,\n z3::expr_vector &cp) -> bool {\n bool change = false;\n for (unsigned i = 0, nr = assertions.size(); i != nr; ++i) {\n auto e = assertions[i];\n auto newe = e.substitute(src, dst).simplify();\n if (!z3::eq(e, newe)) {\n change = true;\n LOG4(\"replace: \" << e << \" -> \" << newe);\n }\n cp.push_back(newe);\n }\n return change;\n };\n\n auto replaceEqualities = [&](z3::expr_vector &assertions) {\n std::unordered_map replace;\n z3::expr_vector cp(assertions.ctx());\n z3::expr_vector src(assertions.ctx());\n z3::expr_vector dst(assertions.ctx());\n auto nr = assertions.size();\n for (unsigned i = 0; i != nr; ++i) {\n auto e = assertions[i];\n if (e.is_eq()) {\n auto e1 = e.arg(0);\n auto e2 = e.arg(1);\n if (z3::eq(e1.get_sort(), pt.packetSort)) {\n if (pt.isConst(e1) && pt.isConst(e2)) {\n auto replace1 = replace.count(e1) != 0;\n auto replace2 = replace.count(e2) != 0;\n z3::expr what(assertions.ctx());\n z3::expr with(assertions.ctx());\n if (!replace1 || !replace2) {\n if (!replace1) {\n what = e1;\n with = e2;\n } else if (!replace2) {\n what = e2;\n with = e1;\n }\n auto I = replace.find(with);\n src.push_back(what);\n if (I != replace.end()) {\n replace.emplace(what, I->second);\n dst.push_back(I->second);\n } else {\n replace.emplace(what, with);\n dst.push_back(with);\n }\n }\n }\n }\n }\n }\n auto c = replacefun(src, dst, cp);\n assertions = cp;\n return c;\n };\n toFixPoint(assertions, replaceEqualities);\n LOG4(assertionString(assertions, \"fold equalities\"));\n // push equalities inwards\n auto pushEqualities = [&](z3::expr_vector &assertions) {\n bool change = false;\n auto nr = assertions.size();\n z3::expr_vector cp(assertions.ctx());\n z3::expr_vector src(assertions.ctx());\n z3::expr_vector dst(assertions.ctx());\n std::unordered_map replace;\n for (unsigned i = 0; i != nr; ++i) {\n auto e = assertions[i];\n if (e.is_eq()) {\n auto e1 = e.arg(0);\n auto e2 = e.arg(1);\n if (pt.isPacket(e1)) {\n auto e1const = pt.isConst(e1);\n auto e2const = pt.isConst(e2);\n z3::expr rep(e1.ctx());\n z3::expr with(e1.ctx());\n if (e1const != e2const) {\n if (e1const) {\n rep = e1;\n with = e2;\n } else {\n rep = e2;\n with = e1;\n }\n replace.emplace(rep, with);\n } else if (e1const) {\n BUG(\"expecting (= p constructor), got %1%\", e);\n }\n }\n }\n }\n for (auto &rep : replace) {\n src.push_back(rep.first);\n dst.push_back(rep.second);\n }\n for (unsigned i = 0; i != nr; ++i) {\n auto e = assertions[i];\n auto newe = e.substitute(src, dst).simplify();\n if (!z3::eq(newe, e)) {\n change = true;\n }\n cp.push_back(newe);\n }\n assertions = cp;\n return change;\n };\n toFixPoint(assertions, pushEqualities);\n LOG4(assertionString(assertions, \"push equalities\"));\n auto eliminatePrependPrepend = [&](z3::expr_vector &assertions) {\n z3::expr_vector src(assertions.ctx());\n z3::expr_vector dst(assertions.ctx());\n auto nr = assertions.size();\n for (unsigned i = 0; i != nr; ++i) {\n auto e = assertions[i];\n recurse(e,\n [&](const z3::expr &ex) {\n if (pt.isPacket(ex) && pt.isPrepend(ex)) {\n auto e1 = ex.arg(1);\n if (pt.isPrepend(e1)) {\n auto p = ex.arg(0);\n auto c = e1.arg(0);\n auto d = e1.arg(1);\n src.push_back(ex);\n dst.push_back(pt.prepend(pt.prepend(p, c), d));\n }\n }\n return true;\n },\n [](const z3::expr &) {});\n }\n z3::expr_vector cp(assertions.ctx());\n auto c = replacefun(src, dst, cp);\n assertions = cp;\n return c;\n };\n toFixPoint(assertions, eliminatePrependPrepend);\n LOG4(assertionString(assertions, \"trans\"));\n // post re-write checks:\n auto emitHandling = [&](z3::expr_vector &assertions) {\n bool change = false;\n std::unordered_map replace;\n for (unsigned i = 0, e = assertions.size(); i != e; ++i) {\n if (assertions[i].is_eq()) {\n auto e1 = assertions[i].arg(0);\n auto e2 = assertions[i].arg(1);\n if (pt.isPacket(e1)) {\n if (pt.isPrepend(e1) && pt.isPrepend(e2)) {\n auto e12 = e1.arg(1);\n auto e22 = e2.arg(1);\n auto ex12 = pt.isEmit(e12.decl());\n auto ex22 = pt.isEmit(e22.decl());\n if (ex12 && ex22) {\n if (*ex12 == *ex22) {\n replace.emplace(assertions[i], e12.arg(0) == e22.arg(0) &&\n e1.arg(0) == e2.arg(0));\n assertions.push_back(e12.arg(0) == e22.arg(0));\n assertions.push_back(e1.arg(0) == e2.arg(0));\n } else {\n unsigned N = 0;\n unsigned M = 0;\n z3::expr x(assertions.ctx());\n z3::expr y(assertions.ctx());\n z3::expr p1(assertions.ctx());\n z3::expr p2(assertions.ctx());\n if (*ex12 < *ex22) {\n N = *ex22;\n M = *ex12;\n p1 = e2.arg(0);\n p2 = e1.arg(0);\n x = e22.arg(0);\n y = e12.arg(0);\n } else {\n N = *ex12;\n M = *ex22;\n p1 = e1.arg(0);\n p2 = e2.arg(0);\n x = e12.arg(0);\n y = e22.arg(0);\n }\n auto p2_ = z3::to_expr(\n assertions.ctx(),\n Z3_mk_fresh_const(assertions.ctx(), \"pack\", pt.packetSort));\n auto x2_ = z3::to_expr(\n assertions.ctx(),\n Z3_mk_fresh_const(assertions.ctx(), \"x\",\n assertions.ctx().bv_sort(N - M)));\n assertions.push_back(y == x.extract(N - 1, N - M));\n assertions.push_back(p2 ==\n pt.prepend(p2_, pt.emit(N - M)(x2_)));\n assertions.push_back(x2_ == x.extract(N - M - 1, 0));\n assertions.push_back(p2_ == p1);\n }\n change = true;\n Z3_ast_vector_set(assertions.ctx(), assertions, i,\n assertions.ctx().bool_val(true));\n }\n }\n }\n }\n }\n return change;\n };\n toFixPoint(assertions, emitHandling);\n LOG4(assertionString(assertions, \"emitHandling\"));\n}\n\nz3::expr cegis::generate_forall(const z3::expr &src,\n const std::map &occs) {\n auto &ctx = src.ctx();\n if (is_unconstrained(src, occs)) {\n std::cerr << \"yup, unconstrained statement... \" << src << '\\n';\n return ctx.bool_val(true);\n } else {\n std::cerr << \"not unconstrained statement... \" << src << '\\n';\n return ctx.bool_val(false);\n }\n z3::expr_vector rep(ctx), with(ctx);\n auto typed = typed_occurences(src);\n for (auto &occ : occs) {\n if (occ.second == 1) {\n with.push_back(ctx.constant((occ.first + \"__\").c_str(),\n typed.find(occ.first)->second));\n rep.push_back(\n ctx.constant(occ.first.c_str(), typed.find(occ.first)->second));\n }\n }\n auto ebody = z3::expr(src).substitute(rep, with);\n auto existvars = with;\n rep = z3::expr_vector(ctx);\n with = z3::expr_vector(ctx);\n for (auto &occ : occs) {\n if (occ.second > 1) {\n with.push_back(ctx.constant((occ.first + \"__\").c_str(),\n typed.find(occ.first)->second));\n rep.push_back(\n ctx.constant(occ.first.c_str(), typed.find(occ.first)->second));\n }\n }\n auto body = ebody.substitute(rep, with);\n with.push_back(ctx.constant(\"value____\", src.get_sort()));\n return z3::forall(with, z3::exists(existvars, with.back() == body));\n}\n\nz3::expr cegis::simplify(z3::expr sigma) {\n z3::context &ctx = sigma.ctx();\n auto m = occurences(sigma);\n std::map level_1_unconstrained;\n std::copy_if(\n m.begin(), m.end(),\n std::inserter(level_1_unconstrained, level_1_unconstrained.end()),\n [&](const std::pair &pr) {\n // level of quantification == 2 && number of occurences == 1 =>\n // need to just check if I can simply slash all formulas\n return pr.first.find(\"pre_call\") == std::string::npos && pr.second == 1;\n });\n z3::expr_vector rep(ctx), with(ctx);\n auto tmp_nr = 0;\n for (const auto &p : level_1_unconstrained) {\n auto exprs = atoms(sigma, p.first);\n if (!exprs.empty()) {\n for (const auto &e : exprs) {\n auto eoccs = occurences(e);\n try {\n std::map all_occs;\n for (const auto &p : m) {\n if (p.first.find(\"pre_call\") != std::string::npos &&\n eoccs.count(p.first)) {\n all_occs.emplace(p.first, 2);\n } else {\n if (eoccs.count(p.first)) {\n all_occs.emplace(p);\n }\n }\n }\n auto fall = is_unconstrained(e, all_occs);\n if (fall) {\n rep.push_back(e);\n std::string tmp = \"tmp\";\n tmp += std::to_string(tmp_nr++);\n with.push_back(ctx.constant(tmp.c_str(), e.get_sort()));\n }\n } catch (z3::exception &e) {\n std::cerr << e << '\\n';\n throw e;\n }\n }\n }\n }\n return sigma.substitute(rep, with).simplify();\n}\n\nnamespace analysis {\nvoid get_extra_controls(z3::solver &context,\n const std::unordered_set &hints,\n const std::string &check_against,\n z3::expr_vector &evec) {\n LOG4(\"starting inference loop for \" << check_against);\n std::unordered_set all_living_things, known;\n auto nr_assertions = context.assertions().size();\n auto assertions = context.assertions();\n for (unsigned i = 0; i != nr_assertions; ++i) {\n auto flat_context = assertions[i];\n get_atoms(flat_context, all_living_things,\n [&](const std::string &nm) { return check_against == nm; });\n }\n for (auto &hint : hints) {\n get_atoms(hint, all_living_things,\n [&](const std::string &nm) { return check_against == nm; });\n }\n for (auto &e : all_living_things) {\n context.push();\n context.add(!e);\n if (context.check() == z3::check_result::unsat) {\n // nothing\n if (e.decl().decl_kind() != Z3_OP_EQ) {\n known.emplace(e.ctx().bool_val(true));\n } else {\n auto e0 = e.arg(0);\n auto e1 = e.arg(1);\n if (e0.decl().decl_kind() == Z3_OP_UNINTERPRETED &&\n check_against == e0.decl().name().str()) {\n known.emplace(e1);\n } else if (e1.decl().decl_kind() == Z3_OP_UNINTERPRETED &&\n check_against == e1.decl().name().str()) {\n known.emplace(e0);\n }\n }\n } else {\n context.pop();\n context.push();\n context.add(e);\n if (context.check() == z3::check_result::unsat) {\n if (e.decl().decl_kind() == Z3_OP_EQ) {\n auto e0 = e.arg(0);\n auto e1 = e.arg(1);\n if (e0.is_bool()) {\n known.emplace(!e1);\n }\n } else {\n known.emplace(e.ctx().bool_val(false));\n }\n }\n }\n context.pop();\n }\n for (const auto &exp : known) {\n auto e = controls_expr(exp);\n LOG4(\"inference computed \" << e);\n evec.push_back(e);\n }\n}\n\nbool get_extra_control_bt(\n z3::solver &context, z3::solver &ctx2,\n const std::set &may_control,\n const std::unordered_set &hints, std::set &processed,\n std::map &known_inferences,\n z3::expr_vector &do_add) {\n z3::context &ctx = context.ctx();\n bool is_changed = true;\n unsigned int nr_pushes = 0;\n while (ctx2.check() == z3::check_result::sat) {\n is_changed = false;\n auto model = ctx2.get_model();\n auto model_size = model.num_consts();\n std::unordered_set uncontrolled;\n z3::expr_vector block(ctx);\n z3::expr_vector exprs(ctx);\n for (unsigned i = 0; i != model_size; ++i) {\n auto ct = model.get_const_decl(i);\n if (ct.name().str().find(\"controls_\") != std::string::npos &&\n ct.range().is_bool()) {\n auto in = model.get_const_interp(ct);\n switch (in.bool_value()) {\n case Z3_L_FALSE:\n block.push_back(!ct());\n if (processed.find(ct.name().str().substr(9)) == processed.end()) {\n uncontrolled.emplace(ct.name().str().substr(9));\n auto I = known_inferences.find(ct.name().str().substr(9));\n if (I != known_inferences.end()) {\n for (unsigned idx = 0, e = I->second.size(); idx != e; ++idx) {\n exprs.push_back(I->second[idx]);\n }\n } else {\n z3::expr_vector new_vec(ctx);\n get_extra_controls(context, hints, ct.name().str().substr(9),\n new_vec);\n known_inferences.emplace(ct.name().str().substr(9), new_vec)\n .first;\n for (unsigned idx = 0, e = new_vec.size(); idx != e; ++idx) {\n exprs.push_back(new_vec[idx]);\n }\n }\n }\n break;\n case Z3_L_TRUE:\n block.push_back(ct());\n case Z3_L_UNDEF:\n break;\n }\n }\n }\n unsigned sz = exprs.size();\n // add consequences\n for (unsigned i = 0; i != sz; ++i) {\n auto e = exprs[i];\n ctx2.push();\n ctx2.add(e);\n auto cr = ctx2.check();\n ctx2.pop();\n if (cr == z3::check_result::sat) {\n is_changed = true;\n ctx2.push();\n ++nr_pushes;\n ctx2.add(!e);\n }\n }\n if (!is_changed) {\n z3::expr_vector assumptions(ctx);\n for (const auto &mc : may_control) {\n assumptions.push_back(control_var(ctx, mc));\n }\n auto cr = ctx2.check(assumptions);\n if (cr != z3::check_result::unsat) {\n LOG4(\"oh, snap...\");\n return false;\n } else {\n for (unsigned i = 0; i != model_size; ++i) {\n auto ct = model.get_const_decl(i);\n if (ct.name().str().find(\"controls_\") != std::string::npos &&\n ct.range().is_bool() &&\n may_control.count(ct.name().str().substr(9))) {\n auto v = model.get_const_interp(ct).bool_value();\n if (v == Z3_L_FALSE) {\n do_add.push_back(ct());\n }\n }\n }\n }\n } else {\n auto old_size = do_add.size();\n processed.insert(uncontrolled.begin(), uncontrolled.end());\n if (!get_extra_control_bt(context, ctx2, may_control, hints, processed,\n known_inferences, do_add))\n return false;\n for (auto n : uncontrolled) {\n processed.erase(n);\n }\n if (nr_pushes) {\n ctx2.pop(nr_pushes);\n nr_pushes = 0;\n }\n if (do_add.size() != old_size) {\n for (auto i = old_size; i != do_add.size(); ++i)\n ctx2.add(do_add[i]);\n }\n }\n ctx2.add(!z3::mk_and(block));\n block.resize(0);\n }\n return true;\n}\n\nz3::expr controls_expr(const z3::expr &need_to) {\n std::set must_control;\n recurse(need_to,\n [&](const z3::expr &e) {\n if (e.is_app() && e.num_args() == 0 &&\n e.decl().decl_kind() == Z3_OP_UNINTERPRETED) {\n must_control.emplace(e.decl().name().str());\n return false;\n } else {\n return true;\n }\n },\n [](const z3::expr &) {});\n z3::expr_vector evec(need_to.ctx());\n for (const auto &s : must_control) {\n evec.push_back(control_var(need_to.ctx(), s));\n }\n return z3::mk_and(evec);\n}\n\nz3::expr control_var(z3::context &ctx, const std::string &s) {\n std::stringstream ss;\n ss << \"controls_\" << s;\n return ctx.bool_const(ss.str().c_str());\n}\n\nbool get_extra_control(z3::solver &context, z3::expr &must_control,\n const std::set &controls,\n const std::set &may_control,\n const std::unordered_set &hints,\n z3::expr_vector &do_add) {\n z3::solver ctx2(context.ctx());\n for (const auto &ctrl : controls) {\n ctx2.add(control_var(ctx2.ctx(), ctrl));\n }\n for (unsigned i = 0, e = do_add.size(); i != e; ++i) {\n ctx2.add(do_add[i]);\n }\n auto old_sz = do_add.size();\n LOG4(\"must control \" << must_control);\n ctx2.add(!controls_expr(must_control));\n std::set visited;\n std::map known_inferences;\n auto rest = get_extra_control_bt(context, ctx2, may_control, hints, visited,\n known_inferences, do_add);\n LOG4(\"result \" << must_control);\n for (unsigned i = old_sz; i != do_add.size(); ++i) {\n LOG4(\"means \" << do_add[i]);\n }\n return rest;\n}\n\nstd::unordered_set get_atoms(const z3::expr &flat_context) {\n std::unordered_set atoms;\n get_atoms(flat_context, atoms, [](const std::string &) { return true; });\n return atoms;\n}\n}\n\nunsigned packet_solver_::termid(const z3::expr &e) {\n auto EMI = index.emplace(e, index.size());\n if (EMI.second)\n revindex.emplace_back(e);\n return EMI.first->second;\n}\n\nvoid packet_solver_::add(z3::expr e) {\n // auto nnfe = to_nnf(e);\n // auto atoms = analysis::get_atoms(nnfe);\n // for (auto &at : atoms) {\n // bool isth = false;\n // recurse(at, [&](const z3::expr &ex) {\n // if (pt.isPacket(ex)) {\n // auto before = revindex.size();\n // auto nr = termid(ex);\n // if (nr == before) {\n // // new term found\n // if (!pt.isConst(ex))\n // free_terms.emplace(nr);\n // }\n // isth = true;\n // }\n // return true;\n // });\n // if (isth) {\n // theory_atoms.emplace(at);\n // if (at.is_eq()) {\n // auto e0 = at.arg(0);\n // auto e1 = at.arg(1);\n // if (!pt.isConst(e0)) {\n // if (pt.isConst(e1)) {\n // std::swap(e0, e1);\n // } else {\n // BUG(\"can't handle arbitrary equations, only between var to term \"\n // \"%1%\",\n // at);\n // }\n // }\n // free_terms.erase(termid(e1));\n // }\n // }\n // }\n // for (auto tid : free_terms) {\n // // make up new variables for terms which have no variables\n // // implicitly attached. i.e. modelEmit_XXX\n // auto oldterm = revindex[tid];\n // if (pt.isZero(oldterm)) continue;\n // BUG_CHECK(pt.isEmit(oldterm.decl()), \"new terms must be emits, but %1% found\", oldterm);\n // auto tdumb = z3::to_expr(ctx(), Z3_mk_fresh_const(ctx(), \"pack\", pt.packetSort));\n // dumb_vars.emplace(tid, tdumb);\n // }\n // unsigned int crt_polarity = 0;\n // recurse(nnfe, [&crt_polarity](const z3::expr &e) {\n // if (e.is_not()) crt_polarity++;\n // return true;\n // }, [this, &crt_polarity](const z3::expr &e) {\n // if (e.is_not()) {\n // crt_polarity--;\n // return;\n // }\n // if (theory_atoms.count(e)) {\n // auto EMI = theory_atoms_polarity.emplace(e, 1 << (crt_polarity % 2));\n // if (!EMI.second) {\n // EMI.first->second |= (1 << (crt_polarity % 2));\n // }\n // }\n // });\n // for (const auto &pol : theory_atoms_polarity) {\n // if (pol.second != 1) {\n // BUG(\"bipolar atom %1% %2%\", pol.first, pol.second);\n // }\n // }\n // s.add(e.substitute(src, dst));\n s.add(e);\n}\n\nanalysis::node_t node_from_exp(const z3::expr &e) {\n analysis::node_t n;\n return n.clone(e.hash());\n}\n\nz3::check_result packet_solver_::check() {\n START(solve);\n auto cr = s.check();\n END(solve);\n START(refine);\n while (cr == z3::check_result::sat) {\n auto model = s.get_model();\n for (const auto &that : theory_atoms) {\n auto evd = model.eval(that);\n auto bv = evd.bool_value();\n if (bv != Z3_L_UNDEF) {\n auto b = (bv == Z3_L_TRUE);\n if (that.is_eq()) {\n if (!b) {\n std::ofstream dump(\"dump_wrong.smt\");\n dump << s << '\\n';\n dump << \"(check-sat)\";\n dump.close();\n BUG(\"can't handle neq in %1%\", that);\n }\n } else {\n BUG(\"can't handle anything other than eq in %1%\", that);\n }\n }\n }\n cr = s.check();\n }\n END(refine);\n auto dref = DURATION(refine);\n auto d = DURATION(solve);\n\n std::cerr << \"check result:\" << cr << \" #assertions:\" << s.assertions().size()\n << \" time:\" << d << \"ms, refined in \" << dref << \"ms\\n\";\n return cr;\n}\n\nz3::check_result packet_solver_::check(z3::expr_vector &evec) {\n START(solve);\n auto cr = s.check(evec);\n END(solve);\n auto d = DURATION(solve);\n std::cerr << \"check assumptions result:\" << cr\n << \" #assertions:\" << s.assertions().size()\n << \" #assumptions:\" << evec.size() << \" time:\" << d << \"ms\\n\";\n return cr;\n}\n\nz3::expr_vector packet_solver_::unsat_core() const {\n START(unsatCore);\n auto r = s.unsat_core();\n END(unsatCore);\n auto d = DURATION(unsatCore);\n std::cerr << \"unsat core result:\" << r.size() << \" time:\" << d << \" ms\\n\";\n return r;\n}\n\nz3::model packet_solver_::get_model() const {\n START(getModel);\n auto m = s.get_model();\n END(getModel);\n auto d = DURATION(getModel);\n std::cerr << \"get model time:\" << d << \"ms\\n\";\n return m;\n}\n\nvoid packet_solver_::pop(unsigned int n) { s.pop(n); }\n\nvoid packet_solver_::push() { s.push(); }\n\nstd::ostream &operator<<(std::ostream &os, const packet_solver_ &ps) {\n return os << ps.s;\n}\n\nz3::expr remove_packets(const z3::expr &expr, packet_theory &pt) {\n if (!expr.is_quantifier()) {\n return expr;\n }\n auto &ctx = expr.ctx();\n std::vector> sorted;\n\n std::vector occurences;\n auto numbounds = Z3_get_quantifier_num_bound(expr.ctx(), expr);\n Z3_symbol syms[numbounds];\n Z3_sort sorts[numbounds];\n for (unsigned i = 0; i != numbounds; ++i) {\n auto sm = Z3_get_quantifier_bound_name(ctx, expr, i);\n auto str = Z3_get_symbol_string(ctx, sm);\n auto srt = z3::to_sort(ctx, Z3_get_quantifier_bound_sort(ctx, expr, i));\n sorted.emplace_back(str, srt);\n syms[i] = sm;\n sorts[i] = srt;\n }\n auto body = expr.body();\n z3::expr_vector src(ctx);\n z3::expr_vector dst(ctx);\n recurse(body, [&](const z3::expr &e) {\n if (e.is_and() || e.is_or() || e.is_implies() || e.is_ite() || e.is_not())\n return true;\n if (e.is_eq()) {\n if (pt.isPacket(e.arg(0))) {\n src.push_back(e);\n dst.push_back(ctx.bool_val(true));\n }\n return false;\n }\n return true;\n });\n body = body.substitute(src, dst);\n return z3::to_expr(ctx,\n Z3_mk_quantifier(ctx, Z3_is_quantifier_forall(ctx, expr),\n Z3_get_quantifier_weight(ctx, expr), 0,\n nullptr, numbounds, sorts, syms, body));\n}\n\nz3::expr chunkify(const z3::expr &expr) {\n if (!expr.is_quantifier()) {\n return expr;\n }\n auto &ctx = expr.ctx();\n std::vector> sorted;\n std::vector occurences;\n auto numbounds = Z3_get_quantifier_num_bound(expr.ctx(), expr);\n for (unsigned i = 0; i != numbounds; ++i) {\n auto sm = Z3_get_quantifier_bound_name(ctx, expr, i);\n auto str = Z3_get_symbol_string(ctx, sm);\n auto srt = z3::to_sort(ctx, Z3_get_quantifier_bound_sort(ctx, expr, i));\n sorted.emplace_back(str, srt);\n }\n occurences.resize(numbounds, 0);\n auto body = expr.body();\n recurse(body, [](const z3::expr &e) {\n if (e.is_quantifier())\n BUG(\"no nested quantifiers allowed %1%\", e);\n return true;\n });\n recurse(body, [&](const z3::expr &e) {\n if (e.is_var()) {\n auto idx = Z3_get_index_value(e.ctx(), e);\n occurences[numbounds - idx - 1]++;\n }\n return true;\n });\n auto body_ = body;\n std::vector remove;\n std::vector add;\n for (unsigned j = 0; j != numbounds; ++j) {\n auto occs = occurences[j];\n if (occs > 1 && sorted[j].second.is_bv()) {\n std::unordered_set terms;\n LOG4(sorted[j].first << \" occurs #\" << occs);\n recurse(body, [&](const z3::expr &e) {\n if (e.is_app()) {\n for (unsigned i = 0; i != e.num_args(); ++i) {\n if (e.arg(i).is_var()) {\n auto idx = numbounds - Z3_get_index_value(e.ctx(), e.arg(i)) - 1;\n if (idx == j) {\n LOG4(\"in term:\" << e);\n terms.emplace(e);\n }\n }\n }\n }\n return true;\n });\n std::set intervals({0, sorted[j].second.bv_size()});\n std::map, z3::expr> extracts;\n for (auto &term : terms) {\n if (term.decl().decl_kind() == Z3_OP_EXTRACT) {\n auto lo = Z3_get_decl_int_parameter(ctx, term.decl(), 1);\n auto hi = Z3_get_decl_int_parameter(ctx, term.decl(), 0);\n intervals.emplace(lo);\n intervals.emplace(hi + 1);\n extracts.emplace(std::make_pair(lo, hi + 1), term);\n }\n }\n if (intervals.size() <= 2)\n continue;\n auto I = intervals.begin();\n auto crt = *I;\n ++I;\n std::map> cover;\n for (; I != intervals.end(); ++I) {\n auto Zi = z3::to_expr(\n ctx, Z3_mk_fresh_const(ctx, \"x\", ctx.bv_sort(*I - crt)));\n cover.emplace(crt, std::make_pair(*I, Zi));\n crt = *I;\n }\n if (cover.size() == 1)\n continue;\n z3::expr_vector src(ctx);\n z3::expr_vector dst(ctx);\n for (auto &ex : extracts) {\n auto c = cover.find(ex.first.first)->second;\n z3::expr_vector evec(ctx);\n while (c.first <= ex.first.second) {\n evec.push_back(c.second);\n c = cover.find(c.first)->second;\n }\n z3::expr pleaserep(ctx);\n if (evec.size() == 1)\n pleaserep = evec.back();\n else\n pleaserep = z3::concat(evec);\n src.push_back(ex.second);\n dst.push_back(pleaserep);\n }\n body_ = body_.substitute(src, dst);\n z3::expr_vector cov(ctx);\n for (auto &x : cover) {\n cov.push_back(x.second.second);\n add.push_back(x.second.second);\n }\n z3::expr cove = z3::concat(cov);\n recurse(body, [&](const z3::expr &e) {\n if (e.is_var()) {\n auto idx = numbounds - Z3_get_index_value(e.ctx(), e) - 1;\n if (idx == j) {\n src.push_back(e);\n dst.push_back(cove);\n }\n }\n return true;\n });\n body_ = body_.substitute(src, dst);\n remove.push_back(j);\n }\n }\n\n std::vector freshes;\n z3::expr_vector src(ctx);\n z3::expr_vector dst(ctx);\n\n for (auto &p : sorted) {\n freshes.push_back(\n z3::to_expr(ctx, Z3_mk_fresh_const(ctx, p.first.c_str(), p.second)));\n }\n recurse(body, [&](const z3::expr &ex) {\n if (ex.is_var()) {\n auto idx = numbounds - Z3_get_index_value(ex.ctx(), ex) - 1;\n src.push_back(ex);\n dst.push_back(freshes[idx]);\n }\n return true;\n });\n body_ = body_.substitute(src, dst);\n z3::expr_vector newbounds(ctx);\n for (unsigned i = 0; i != numbounds; ++i) {\n if (!std::binary_search(remove.begin(), remove.end(), i)) {\n newbounds.push_back(freshes[i]);\n }\n }\n for (const auto &ex : add) {\n newbounds.push_back(ex);\n }\n return z3::forall(newbounds, body_);\n}\n\npacket_solver_::packet_solver_(z3::solver &s, packet_theory &pt)\n : s(s), pt(pt) {\n s.set(\"macro_finder\", true);\n termid(pt.zero());\n}\n\nvoid packet_solver_::makeAxioms() {\n // saturate extracts\n auto made = pt.make_axioms();\n for (unsigned i = 0, e = made.size(); i != e; ++i) {\n s.add(made[i]);\n }\n // bool saturated = false;\n // while (!saturated) {\n // auto oldsize = pt.packetExtracts.size();\n // std::set alsoadd;\n // for (auto &pex : pt.packetExtracts) {\n // for (auto &pem : pt.packetEmits) {\n // auto N = pex.first;\n // auto M = pem.first;\n // while (N > M) {\n // (void)alsoadd.emplace(N - M);\n // N -= M;\n // }\n // }\n // }\n // for (auto als : alsoadd) {\n // pt.extract(als);\n // }\n // saturated = oldsize == pt.packetExtracts.size();\n // }\n // LOG4(\"#packetExtracts:\" << pt.packetExtracts.size());\n // {\n // auto observation = [](const z3::expr &e) { return e; };\n // auto pack = ctx().constant(\"p\", pt.packetSort);\n // auto c = ctx().constant(\"c\", pt.packetSort);\n // auto d = ctx().constant(\"d\", pt.packetSort);\n // auto ast = new Z3_ast[1];\n // ast[0] = observation(pt.prepend(pack, pt.zero()));\n // auto ppat = new Z3_pattern[1];\n // ppat[0] = Z3_mk_pattern(ctx(), 1, ast);\n // Z3_inc_ref(ctx(), Z3_pattern_to_ast(ctx(), ppat[0]));\n // BUG_CHECK(ctx().check_error() == Z3_OK, \"not ok \");\n //\n // auto ppack = new Z3_app[1];\n // ppack[0] = pack;\n // // s.add(z3::to_expr(\n // // ctx(), Z3_mk_forall_const(ctx(), 0, 1, ppack, 1, ppat,\n // // observation(pt.prepend(pack,\n // pt.zero()))\n // // ==\n // // observation(pack))));\n // LOG4(\"made forall\");\n // ast[0] = observation(pt.prepend(pt.zero(), pack));\n // ppat[0] = Z3_mk_pattern(ctx(), 1, ast);\n // Z3_inc_ref(ctx(), Z3_pattern_to_ast(ctx(), ppat[0]));\n // BUG_CHECK(ctx().check_error() == Z3_OK, \"not ok \");\n // // s.add(z3::to_expr(\n // // ctx(), Z3_mk_forall_const(ctx(), 0, 1, ppack, 1, ppat,\n // // observation(pt.prepend(pt.zero(),\n // pack))\n // // ==\n // // observation(pack))));\n // LOG4(\"made forall 2\");\n // ast[0] = observation(pt.prepend(pack, pt.prepend(c, d)));\n // ppat[0] = Z3_mk_pattern(ctx(), 1, ast);\n // Z3_inc_ref(ctx(), Z3_pattern_to_ast(ctx(), ppat[0]));\n //\n // auto apps = new Z3_app[3];\n // apps[0] = pack;\n // apps[1] = c;\n // apps[2] = d;\n // // s.add(z3::to_expr(\n // // ctx(), Z3_mk_forall_const(\n // // ctx(), 0, 3, apps, 1, ppat,\n // // observation(pt.prepend(pack, pt.prepend(c, d))) ==\n // // observation(pt.prepend(pt.prepend(pack, c),\n // d)))));\n //\n // for (auto &pem : pt.packetEmits) {\n // auto X = ctx().bv_const(\"x\", pem.first);\n // s.add(z3::forall(X, pt.length(pt.emit(pem.first)(X)) ==\n // ctx().num_val(pem.first, ctx().int_sort())));\n // }\n // s.add(z3::forall(pack, pt.length(pack) >= 0));\n // s.add(z3::forall(\n // pack, z3::implies(pt.length(pack) == ctx().num_val(0,\n // ctx().int_sort()),\n // pack == pt.zero())));\n // s.add(pt.length(pt.zero()) == ctx().num_val(0, ctx().int_sort()));\n //\n // // for (auto &pem : pt.packetEmits) {\n // // auto X = ctx().bv_const(\"x\", pem.first);\n // // s.add(z3::forall(X, pt.emit(pem.first)(X) != pt.zero()));\n // // }\n // // {\n // // auto c = ctx().constant(\"c\", pt.packetSort);\n // // auto d = ctx().constant(\"d\", pt.packetSort);\n // // z3::expr_vector expr_vector(ctx());\n // // expr_vector.push_back(c);\n // // expr_vector.push_back(d);\n // // s.add(z3::forall(expr_vector,\n // // lenfun(pt.prepend(c, d)) == lenfun(c) +\n // // lenfun(d)));\n // // }\n // }\n}\npacket_theory::packet_theory(z3::context &context)\n : context(context), packetSort(context), bsort(context.bv_sort(1)),\n zero(context), prepend(context), length(context), constructor(context),\n projections(context) {\n // packetSort = ctx().seq_sort(bsort);\n packetSort = ctx().uninterpreted_sort(\"packet\");\n zero = ctx().function(\"modelZero\", 0, nullptr, packetSort);\n length = ctx().function(\"length\", packetSort, ctx().int_sort());\n // const char *names[2] = {\"length\", \"arr\"};\n // z3::sort sorts[2] = {ctx().int_sort(), ctx().bv_sort(4096)};\n // constructor = ctx().tuple_sort(\"packet\", 2, names, sorts, projections);\n // packetSort = constructor.range();\n // z3::sort_vector emp(context);\n // zero = ctx().function(\"modelZero\", emp, packetSort);\n prepend = ctx().function(\"modelPrepend\", packetSort, packetSort, packetSort);\n // length = projections[0];\n}\n\nnamespace z3 {\nz3::expr forall(z3::expr_vector &xs, const z3::expr &b,\n z3::expr_vector &patterns) {\n array vars(xs);\n array pats(patterns.size());\n for (unsigned i = 0; i != patterns.size(); ++i) {\n array asts(1);\n asts[0] = patterns[i];\n pats[i] = Z3_mk_pattern(b.ctx(), 1, asts.ptr());\n Z3_inc_ref(b.ctx(), Z3_pattern_to_ast(b.ctx(), pats[i]));\n }\n Z3_ast r = Z3_mk_forall_const(b.ctx(), 0, vars.size(), vars.ptr(),\n pats.size(), pats.ptr(), b);\n b.check_error();\n return expr(b.ctx(), r);\n}\nz3::expr forall(const z3::expr &x1, const z3::expr &b,\n z3::expr_vector &patterns) {\n z3::expr_vector evec(b.ctx());\n evec.push_back(x1);\n return forall(evec, b, patterns);\n}\n}\n\nz3::expr_vector packet_theory::make_axioms() {\n z3::expr_vector axes(ctx());\n // for (auto &pem : packetEmits) {\n // auto x = ctx().bv_const(\"x\", pem.first);\n // auto p = ctx().constant(\"p\", packetSort);\n // z3::expr_vector bounds(ctx());\n // bounds.push_back(x);\n // bounds.push_back(p);\n // axes.push_back(\n // z3::forall(bounds,\n // pem.second(p, x) ==\n // z3::concat(p, reverse(pem.first)(x)).extract(4095,\n // 0)));\n // }\n for (auto &padv : packetAdvances) {\n auto p = ctx().constant(\"p\", packetSort);\n axes.push_back(\n z3::forall(p, padv.second(p) ==\n z3::zext(p.extract(4095, padv.first), padv.first)));\n }\n\n for (auto &pex : packetExtracts) {\n auto p = ctx().constant(\"p\", packetSort);\n axes.push_back(z3::forall(p, pex.second(p) == p.extract(pex.first - 1, 0)));\n }\n for (auto &r : rotates) {\n auto x = ctx().bv_const(\"x\", r.first);\n\n z3::expr_vector concd(ctx());\n for (unsigned i = 0; i != r.first; ++i) {\n concd.push_back(x.extract(i, i));\n }\n axes.push_back(z3::forall(x, r.second(x) == z3::concat(concd)));\n // z3::expr_vector patterns(ctx());\n // patterns.push_back(r.second(r.second(x)));\n // axes.push_back(z3::forall(x, r.second(r.second(x)) == x, patterns));\n }\n axes.push_back(zero() == ctx().bv_val(0, 4096));\n return axes;\n}\n", "meta": {"hexsha": "708c922a7232eb2fcbd60ac2a854f846f09c1729", "size": 60186, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "backends/analysis/cegis.cpp", "max_stars_repo_name": "shellqiqi/bf4", "max_stars_repo_head_hexsha": "6c99c8f5b0dc61cf2cb7602c9f13ada7b651703f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-03-02T12:15:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T12:15:41.000Z", "max_issues_repo_path": "backends/analysis/cegis.cpp", "max_issues_repo_name": "shellqiqi/bf4", "max_issues_repo_head_hexsha": "6c99c8f5b0dc61cf2cb7602c9f13ada7b651703f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backends/analysis/cegis.cpp", "max_forks_repo_name": "shellqiqi/bf4", "max_forks_repo_head_hexsha": "6c99c8f5b0dc61cf2cb7602c9f13ada7b651703f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1056105611, "max_line_length": 95, "alphanum_fraction": 0.5205695677, "num_tokens": 16903, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832206876841, "lm_q2_score": 0.37022539259558657, "lm_q1q2_score": 0.19954527448153153}} {"text": "/*\n Copyright (c) 2018-2019 Nokia.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*/\n\n/*\n * This source code is part of the near-RT RIC (RAN Intelligent Controller)\n * platform project (RICP).\n*/\n\n#include \"config.h\"\n#include \"private/error.hpp\"\n#include \"private/abort.hpp\"\n#include \"private/asyncstorageimpl.hpp\"\n#include \"private/configurationreader.hpp\"\n#include \"private/asyncdummystorage.hpp\"\n#include \"private/engine.hpp\"\n#include \"private/logger.hpp\"\n#if HAVE_REDIS\n#include \"private/redis/asyncredisstorage.hpp\"\n#endif\n\n#include \n#include \n\nusing namespace shareddatalayer;\nusing namespace shareddatalayer::redis;\n\nnamespace\n{\n std::shared_ptr asyncDatabaseDiscoveryCreator(std::shared_ptr engine,\n const std::string& ns,\n const DatabaseConfiguration& databaseConfiguration,\n const boost::optional& addressIndex,\n std::shared_ptr logger)\n {\n return AsyncDatabaseDiscovery::create(engine,\n ns,\n databaseConfiguration,\n addressIndex,\n logger);\n }\n\n std::uint32_t crc32(const std::string& s)\n {\n boost::crc_32_type result;\n result.process_bytes(s.data(), s.size());\n return result.checksum();\n }\n\n std::uint32_t getClusterHashIndex(const std::string& s, const size_t count)\n {\n return crc32(s)%count;\n }\n}\n\nAsyncStorageImpl::AsyncStorageImpl(std::shared_ptr engine,\n const boost::optional& pId,\n std::shared_ptr logger):\n engine(engine),\n databaseConfiguration(std::make_shared()),\n namespaceConfigurations(std::make_shared()),\n publisherId(pId),\n logger(logger),\n asyncDatabaseDiscoveryCreator(::asyncDatabaseDiscoveryCreator)\n{\n ConfigurationReader configurationReader(logger);\n configurationReader.readDatabaseConfiguration(std::ref(*databaseConfiguration));\n configurationReader.readNamespaceConfigurations(std::ref(*namespaceConfigurations));\n}\n\n// Meant for UT usage\nAsyncStorageImpl::AsyncStorageImpl(std::shared_ptr engine,\n const boost::optional& pId,\n std::shared_ptr databaseConfiguration,\n std::shared_ptr namespaceConfigurations,\n std::shared_ptr logger,\n const AsyncDatabaseDiscoveryCreator& asyncDatabaseDiscoveryCreator):\n engine(engine),\n databaseConfiguration(databaseConfiguration),\n namespaceConfigurations(namespaceConfigurations),\n publisherId(pId),\n logger(logger),\n asyncDatabaseDiscoveryCreator(asyncDatabaseDiscoveryCreator)\n{\n}\n\nvoid AsyncStorageImpl::setAsyncRedisStorageHandlersForCluster(const std::string& ns)\n{\n static auto serverCount = databaseConfiguration->getServerAddresses().size();\n for (std::size_t addrIndex = 0; addrIndex < serverCount; addrIndex++)\n {\n auto redisHandler = std::make_shared(engine,\n asyncDatabaseDiscoveryCreator(\n engine,\n ns,\n std::ref(*databaseConfiguration),\n addrIndex,\n logger),\n publisherId,\n namespaceConfigurations,\n logger);\n asyncStorages.push_back(redisHandler);\n }\n}\n\nvoid AsyncStorageImpl::setAsyncRedisStorageHandlers(const std::string& ns)\n{\n if (DatabaseConfiguration::DbType::SDL_STANDALONE_CLUSTER == databaseConfiguration->getDbType() ||\n DatabaseConfiguration::DbType::SDL_SENTINEL_CLUSTER == databaseConfiguration->getDbType())\n {\n setAsyncRedisStorageHandlersForCluster(ns);\n return;\n }\n auto redisHandler = std::make_shared(engine,\n asyncDatabaseDiscoveryCreator(\n engine,\n ns,\n std::ref(*databaseConfiguration),\n boost::none,\n logger),\n publisherId,\n namespaceConfigurations,\n logger);\n asyncStorages.push_back(redisHandler);\n}\n\nAsyncStorage& AsyncStorageImpl::getAsyncRedisStorageHandler(const std::string& ns)\n{\n std::size_t handlerIndex{0};\n if (DatabaseConfiguration::DbType::SDL_STANDALONE_CLUSTER == databaseConfiguration->getDbType() ||\n DatabaseConfiguration::DbType::SDL_SENTINEL_CLUSTER == databaseConfiguration->getDbType())\n handlerIndex = getClusterHashIndex(ns, databaseConfiguration->getServerAddresses().size());\n return *asyncStorages.at(handlerIndex);\n}\n\nAsyncStorage& AsyncStorageImpl::getRedisHandler(const std::string& ns)\n{\n#if HAVE_REDIS\n if (asyncStorages.empty())\n setAsyncRedisStorageHandlers(ns);\n\n return getAsyncRedisStorageHandler(ns);\n#else\n logger->error() << \"Redis operations cannot be performed, Redis not enabled\";\n SHAREDDATALAYER_ABORT(\"Invalid configuration.\");\n#endif\n}\n\nAsyncStorage& AsyncStorageImpl::getDummyHandler()\n{\n static AsyncDummyStorage dummyHandler{engine};\n return dummyHandler;\n}\n\nAsyncStorage& AsyncStorageImpl::getOperationHandler(const std::string& ns)\n{\n if (namespaceConfigurations->isDbBackendUseEnabled(ns))\n return getRedisHandler(ns);\n\n return getDummyHandler();\n}\n\nint AsyncStorageImpl::fd() const\n{\n return engine->fd();\n}\n\nvoid AsyncStorageImpl::handleEvents()\n{\n engine->handleEvents();\n}\n\nvoid AsyncStorageImpl::waitReadyAsync(const Namespace& ns,\n const ReadyAck& readyAck)\n{\n getOperationHandler(ns).waitReadyAsync(ns, readyAck);\n}\n\nvoid AsyncStorageImpl::setAsync(const Namespace& ns,\n const DataMap& dataMap,\n const ModifyAck& modifyAck)\n{\n getOperationHandler(ns).setAsync(ns, dataMap, modifyAck);\n}\n\nvoid AsyncStorageImpl::setIfAsync(const Namespace& ns,\n const Key& key,\n const Data& oldData,\n const Data& newData,\n const ModifyIfAck& modifyIfAck)\n{\n getOperationHandler(ns).setIfAsync(ns, key, oldData, newData, modifyIfAck);\n}\n\nvoid AsyncStorageImpl::removeIfAsync(const Namespace& ns,\n const Key& key,\n const Data& data,\n const ModifyIfAck& modifyIfAck)\n{\n getOperationHandler(ns).removeIfAsync(ns, key, data, modifyIfAck);\n}\n\nvoid AsyncStorageImpl::setIfNotExistsAsync(const Namespace& ns,\n const Key& key,\n const Data& data,\n const ModifyIfAck& modifyIfAck)\n{\n getOperationHandler(ns).setIfNotExistsAsync(ns, key, data, modifyIfAck);\n}\n\nvoid AsyncStorageImpl::getAsync(const Namespace& ns,\n const Keys& keys,\n const GetAck& getAck)\n{\n getOperationHandler(ns).getAsync(ns, keys, getAck);\n}\n\nvoid AsyncStorageImpl::removeAsync(const Namespace& ns,\n const Keys& keys,\n const ModifyAck& modifyAck)\n{\n getOperationHandler(ns).removeAsync(ns, keys, modifyAck);\n}\n\nvoid AsyncStorageImpl::findKeysAsync(const Namespace& ns,\n const std::string& keyPrefix,\n const FindKeysAck& findKeysAck)\n{\n getOperationHandler(ns).findKeysAsync(ns, keyPrefix, findKeysAck);\n}\n\nvoid AsyncStorageImpl::listKeys(const Namespace& ns,\n const std::string& pattern,\n const FindKeysAck& findKeysAck)\n{\n getOperationHandler(ns).listKeys(ns, pattern, findKeysAck);\n}\n\nvoid AsyncStorageImpl::removeAllAsync(const Namespace& ns,\n const ModifyAck& modifyAck)\n{\n getOperationHandler(ns).removeAllAsync(ns, modifyAck);\n}\n", "meta": {"hexsha": "0d6d683ab5019acaf1fb105ce94fef31504e4738", "size": 10179, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/asyncstorageimpl.cpp", "max_stars_repo_name": "o-ran-sc/ric-plt-sdl", "max_stars_repo_head_hexsha": "782df7475cbe2f823042f731f1cd877eb525b228", "max_stars_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/asyncstorageimpl.cpp", "max_issues_repo_name": "o-ran-sc/ric-plt-sdl", "max_issues_repo_head_hexsha": "782df7475cbe2f823042f731f1cd877eb525b228", "max_issues_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/asyncstorageimpl.cpp", "max_forks_repo_name": "o-ran-sc/ric-plt-sdl", "max_forks_repo_head_hexsha": "782df7475cbe2f823042f731f1cd877eb525b228", "max_forks_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.76171875, "max_line_length": 129, "alphanum_fraction": 0.5540819334, "num_tokens": 1718, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832206876841, "lm_q2_score": 0.37022539259558657, "lm_q1q2_score": 0.19954527448153153}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \n#include \n#include \n\n#define PORT 65001\n#define MAX_COUNT 100\n#define DB_WRITE 10\n#define PEAK 700\n#define NOT_PEAK 500\n#define MIN_INTERVAL 300\n#define PI 3.14159265\n\nusing namespace std;\n\nclass tms_ss_whs1\n{\nprivate:\n\tros::NodeHandle nh;\n\tros::Publisher db_pub;\n\tint sock;\npublic:\n\tfloat temp=0;\n\tint rate=0,p_rate=0;\n\tint msec=0,p_msec=0;\n\tint hakei[MAX_COUNT] = {0};\n\tint count=0;\n\tdouble roll,pitch;\n\tint db_count=0;\n\tint last_peak_time=-1;\n\tvoid spin()\n\t{\n\t\twhile(ros::ok()){\n\t\t\tint rcvmsg[3];\n\t\t\tint n = recv(sock,rcvmsg,sizeof(rcvmsg),0);\n\t\t\tif(n<1){\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tp_msec=msec;\n\t\t\tmsec = (rcvmsg[0]>>16)&0xffff;\n\t\t\thakei[count] = (rcvmsg[0]&0xffff);\n\t\t\ttemp = ((rcvmsg[1]>>16)&0xffff)*0.01;\n\t\t\tfloat acc_x = (((short)(rcvmsg[1]&0xffff)<<16)>>16)*0.01;\n\t\t\tfloat acc_y = (rcvmsg[2]>>16)*0.01;\n\t\t\tfloat acc_z = (((short)(rcvmsg[2]&0xffff)<<16)>>16)*0.01;\n\n\t\t\tif(msec==p_msec) msec+=8;\n\t\t\telse if(msec==(p_msec-8)) msec+=16;\n\t\t\telse if(msec==(p_msec-16)) msec+=24;\n\t\t\telse if(msec==(p_msec-24)) msec+=32;\n\n\t\t\tdouble G = sqrt(acc_x*acc_x+acc_y*acc_y+acc_z*acc_z);\n\t\t\tif(acc_y != 0){\n\t\t\t\troll = asin(-acc_x/G);\n\t\t\t\tpitch = atan(acc_z/acc_y);\n\t\t\t}\n\n\t\t\tROS_INFO(\"msec:%d hakei:%d rate:%d\",msec,hakei[count],rate);\n\n\t\t\tint interval = msec-last_peak_time;\n\t\t\tif(interval<-MIN_INTERVAL) interval+=60000;\n\n\t\t\tif(hakei[count]>PEAK&&interval>MIN_INTERVAL){\n\t\t\t\tif(last_peak_time==-1)\n\t\t\t\tlast_peak_time = msec;\n\t\t\t\telse{\n\t\t\t\t\tp_rate = rate;\n\t\t\t\t\trate = (int)(1000.0 / (double)interval * 60.0);\n\t\t\t\t\tif(rate<30) rate = 0;\n\t\t\t\t\telse if(rate>200) rate = p_rate;\n\t\t\t\t\tlast_peak_time = msec;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcount++;\n\t\t\tdb_count++;\n\n\t\t\tif(db_count>=DB_WRITE){\n\t\t\t\tdb_count=0;\n\t\t\t\tdb_write(1);\n\t\t\t}\n\n\t\t\tif(count>=MAX_COUNT){\n\t\t\t\tcount=0;\n\t\t\t}\n\t\t\tros::spinOnce();\n\t\t}\n\t\tdb_write(0);\n\t\tclose(sock);\n\t}\n\tvoid db_write(int state)\n\t{\n\t\tchar data[500];\n\t\tchar buf[8];\n\t\tconst char *c1 = \"{\";\n\t\tstrcpy(data,c1);\n\t\tstrcat(data,\"\\\"temp\\\":\");\n\t\tsprintf(buf,\"%.2f\",temp);\n\t\tstrcat(data,buf);\n\t\tstrcat(data,\", \\\"rate\\\":\");\n\t\tsprintf(buf,\"%d\",rate);\n\t\tstrcat(data,buf);\n\t\tstrcat(data,\", \\\"wave\\\":[\");\n\t\tfor(int i=0;i=MAX_COUNT) ? (count+i-MAX_COUNT) : count+i;\n\t\t\tsprintf(buf,\"%d\",hakei[j]);\n\t\t\tstrcat(data,buf);\n\t\t\tif(i!=MAX_COUNT-1) strcat(data,\",\");\n\t\t}\n\t\tstrcat(data,\"]\");\n\t\tstrcat(data,\"}\");\n\n\t\tros::Time now = ros::Time::now() + ros::Duration(9*60*60); // GMT +9\n\t\ttms_msg_db::TmsdbStamped db_msg;\n\n\t\tstd::string frame_id(\"/world\");\n\t\tdb_msg.header.frame_id = frame_id;\n\t\tdb_msg.header.stamp = now;\n\t\tdb_msg.tmsdb.clear();\n\t\ttms_msg_db::Tmsdb tmpData;\n\n\t\ttmpData.time = boost::posix_time::to_iso_extended_string(now.toBoost());\n\t\ttmpData.name = \"whs1_mybeat\";\n\t\ttmpData.id = 3021;\n\t\ttmpData.place = 5001;\n\t\ttmpData.sensor = 3021;\n\t\ttmpData.state = state;\n\t\ttmpData.rr\t\t\t= roll;\n\t\ttmpData.rp \t\t\t= pitch;\n\t\ttmpData.ry\t\t\t= 0;\n\n\t\ttmpData.note=data;\n\t\tdb_msg.tmsdb.push_back(tmpData);\n\t\tdb_pub.publish(db_msg);\n\t}\n\ttms_ss_whs1()\n\t{\n\t\tdb_pub=nh.advertise (\"tms_db_data\", 1000);\n\t\tsock = socket(AF_INET,SOCK_DGRAM,0);\n\t\tstruct sockaddr_in s_address;\n\t\ts_address.sin_family=AF_INET;\n\t\ts_address.sin_addr.s_addr=INADDR_ANY;\n\t\ts_address.sin_port=htons(PORT);\n\t\tconst int on = 1;\n\t\tsetsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));\n setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on));\n\t\tbind(sock,(struct sockaddr *)&s_address,sizeof(s_address));\n\t\tint val=1;\n\t\tioctl(sock,FIONBIO,&val);\n\t\tROS_INFO(\"tms_ss_whs1 ready...\");\n\t}\n};\n\nint main(int argc, char **argv)\n{\n\tros::init(argc,argv,\"tms_ss_whs1\");\n\ttms_ss_whs1 whs1;\n\twhs1.spin();\n\treturn 0;\n}\n", "meta": {"hexsha": "7219367441b6bce44f11d5be65044bb061137881", "size": 3965, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tms_ss/tms_ss_whs1/src/main.cpp", "max_stars_repo_name": "robotpilot/ros_tms", "max_stars_repo_head_hexsha": "3d6b6579e89aa9cb216cd3cb6157fabc553c18f1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 54.0, "max_stars_repo_stars_event_min_datetime": "2015-01-06T06:58:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-02T07:49:37.000Z", "max_issues_repo_path": "tms_ss/tms_ss_whs1/src/main.cpp", "max_issues_repo_name": "robotpilot/ros_tms", "max_issues_repo_head_hexsha": "3d6b6579e89aa9cb216cd3cb6157fabc553c18f1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 114.0, "max_issues_repo_issues_event_min_datetime": "2015-01-07T06:42:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T05:54:04.000Z", "max_forks_repo_path": "tms_ss/tms_ss_whs1/src/main.cpp", "max_forks_repo_name": "robotpilot/ros_tms", "max_forks_repo_head_hexsha": "3d6b6579e89aa9cb216cd3cb6157fabc553c18f1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 24.0, "max_forks_repo_forks_event_min_datetime": "2015-03-27T08:35:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-08T13:05:31.000Z", "avg_line_length": 23.0523255814, "max_line_length": 77, "alphanum_fraction": 0.6433795712, "num_tokens": 1351, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.538983220687684, "lm_q2_score": 0.3702253925955866, "lm_q1q2_score": 0.1995452744815315}} {"text": "#include \n#include \"Scene_polyhedron_selection_item.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"triangulate_primitive.h\"\n\n#ifdef USE_SURFACE_MESH\ntypedef Scene_surface_mesh_item Scene_face_graph_item;\n#else\ntypedef Scene_polyhedron_item Scene_face_graph_item;\n#endif\n\ntypedef Scene_face_graph_item::Face_graph Face_graph;\ntypedef boost::property_map::type VPmap;\ntypedef boost::property_map::const_type constVPmap;\n\ntypedef Scene_face_graph_item::Vertex_selection_map Vertex_selection_map;\n\ntypedef boost::graph_traits::vertex_descriptor fg_vertex_descriptor;\ntypedef boost::graph_traits::face_descriptor fg_face_descriptor;\ntypedef boost::graph_traits::edge_descriptor fg_edge_descriptor;\ntypedef boost::graph_traits::halfedge_descriptor fg_halfedge_descriptor;\n\nstruct Scene_polyhedron_selection_item_priv{\n\n typedef Scene_facegraph_item_k_ring_selection::Active_handle Active_handle;\n typedef boost::unordered_set Selection_set_vertex;\n typedef boost::unordered_set Selection_set_facet;\n typedef boost::unordered_set Selection_set_edge;\n struct vertex_on_path\n {\n fg_vertex_descriptor vertex;\n bool is_constrained;\n };\n\n Scene_polyhedron_selection_item_priv(Scene_polyhedron_selection_item* parent):\n item(parent)\n {\n }\n\n void initializeBuffers(CGAL::Three::Viewer_interface *viewer) const;\n void initialize_temp_buffers(CGAL::Three::Viewer_interface *viewer) const;\n void initialize_HL_buffers(CGAL::Three::Viewer_interface *viewer) const;\n void computeElements() const;\n void compute_any_elements(std::vector &p_facets, std::vector &p_lines, std::vector &p_points, std::vector &p_normals,\n const Selection_set_vertex& p_sel_vertex, const Selection_set_facet &p_sel_facet, const Selection_set_edge &p_sel_edges) const;\n void compute_temp_elements() const;\n void compute_HL_elements() const;\n void triangulate_facet(fg_face_descriptor, Kernel::Vector_3 normal,\n std::vector &p_facets,std::vector &p_normals) const;\n void tempInstructions(QString s1, QString s2);\n\n void computeAndDisplayPath();\n void addVertexToPath(fg_vertex_descriptor, vertex_on_path &);\n\n enum VAOs{\n Facets = 0,\n TempFacets,\n Edges,\n TempEdges,\n Points,\n TempPoints,\n FixedPoints,\n HLPoints,\n HLEdges,\n HLFacets,\n NumberOfVaos\n };\n enum VBOs{\n VertexFacets = 0,\n NormalFacets,\n VertexEdges,\n VertexPoints,\n VertexTempFacets,\n NormalTempFacets,\n VertexTempEdges,\n VertexTempPoints,\n VertexFixedPoints,\n ColorFixedPoints,\n VertexHLPoints,\n VertexHLEdges,\n VertexHLFacets,\n NormalHLFacets,\n NumberOfVbos\n };\n\n QList path;\n QList constrained_vertices;\n bool is_path_selecting;\n bool poly_need_update;\n mutable bool are_temp_buffers_filled;\n //Specifies Selection/edition mode\n bool first_selected;\n int operation_mode;\n QString m_temp_instructs;\n bool is_treated;\n fg_vertex_descriptor to_split_vh;\n fg_face_descriptor to_split_fh;\n fg_edge_descriptor to_join_ed;\n Active_handle::Type original_sel_mode;\n //Only needed for the triangulation\n Face_graph* poly;\n CGAL::Unique_hash_map face_normals_map;\n CGAL::Unique_hash_map vertex_normals_map;\n boost::associative_property_map< CGAL::Unique_hash_map >\n nf_pmap;\n boost::associative_property_map< CGAL::Unique_hash_map >\n nv_pmap;\n Scene_face_graph_item::ManipulatedFrame *manipulated_frame;\n bool ready_to_move;\n\n Vertex_selection_map vertex_selection_map()\n {\n return item->poly_item->vertex_selection_map();\n }\n\n Face_graph* polyhedron() { return poly; }\n const Face_graph* polyhedron()const { return poly; }\n\n bool canAddFace(fg_halfedge_descriptor hc, Scene_polyhedron_selection_item::fg_halfedge_descriptor t);\n bool canAddFaceAndVertex(Scene_polyhedron_selection_item::fg_halfedge_descriptor hc, Scene_polyhedron_selection_item::fg_halfedge_descriptor t);\n\n mutable std::vector positions_facets;\n mutable std::vector normals;\n mutable std::vector positions_lines;\n mutable std::vector positions_points;\n mutable std::size_t nb_facets;\n mutable std::size_t nb_points;\n mutable std::size_t nb_lines;\n\n mutable std::vector positions_temp_facets;\n mutable std::vector positions_fixed_points;\n mutable std::vector color_fixed_points;\n mutable std::vector temp_normals;\n mutable std::vector positions_temp_lines;\n mutable std::vector positions_temp_points;\n mutable std::vector positions_HL_facets;\n mutable std::vector HL_normals;\n mutable std::vector positions_HL_lines;\n mutable std::vector positions_HL_points;\n\n mutable std::size_t nb_temp_facets;\n mutable std::size_t nb_temp_points;\n mutable std::size_t nb_temp_lines;\n mutable std::size_t nb_fixed_points;\n\n mutable QOpenGLShaderProgram *program;\n mutable bool are_HL_buffers_filled;\n Scene_polyhedron_selection_item* item;\n};\n\n\nvoid Scene_polyhedron_selection_item_priv::initializeBuffers(CGAL::Three::Viewer_interface *viewer)const\n{\n //vao containing the data for the facets\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_WITH_LIGHT, viewer);\n program->bind();\n\n item->vaos[Facets]->bind();\n item->buffers[VertexFacets].bind();\n item->buffers[VertexFacets].allocate(positions_facets.data(),\n static_cast(positions_facets.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexFacets].release();\n\n\n\n item->buffers[NormalFacets].bind();\n item->buffers[NormalFacets].allocate(normals.data(),\n static_cast(normals.size()*sizeof(float)));\n program->enableAttributeArray(\"normals\");\n program->setAttributeBuffer(\"normals\",GL_FLOAT,0,3);\n item->buffers[NormalFacets].release();\n\n item->vaos[Facets]->release();\n program->release();\n\n }\n //vao containing the data for the lines\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[Edges]->bind();\n\n item->buffers[VertexEdges].bind();\n item->buffers[VertexEdges].allocate(positions_lines.data(),\n static_cast(positions_lines.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexEdges].release();\n\n program->release();\n\n item->vaos[Edges]->release();\n\n }\n //vao containing the data for the points\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[Points]->bind();\n\n item->buffers[VertexPoints].bind();\n item->buffers[VertexPoints].allocate(positions_points.data(),\n static_cast(positions_points.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexPoints].release();\n program->release();\n\n item->vaos[Points]->release();\n }\n\n nb_facets = positions_facets.size();\n positions_facets.resize(0);\n std::vector(positions_facets).swap(positions_facets);\n\n normals.resize(0);\n std::vector(normals).swap(normals);\n\n nb_lines = positions_lines.size();\n positions_lines.resize(0);\n std::vector(positions_lines).swap(positions_lines);\n\n nb_points = positions_points.size();\n positions_points.resize(0);\n std::vector(positions_points).swap(positions_points);\n item->are_buffers_filled = true;\n}\n\nvoid Scene_polyhedron_selection_item_priv::initialize_temp_buffers(CGAL::Three::Viewer_interface *viewer)const\n{\n //vao containing the data for the temp facets\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_WITH_LIGHT, viewer);\n program->bind();\n\n item->vaos[TempFacets]->bind();\n item->buffers[VertexTempFacets].bind();\n item->buffers[VertexTempFacets].allocate(positions_temp_facets.data(),\n static_cast(positions_temp_facets.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexTempFacets].release();\n\n\n\n item->buffers[NormalTempFacets].bind();\n item->buffers[NormalTempFacets].allocate(temp_normals.data(),\n static_cast(temp_normals.size()*sizeof(float)));\n program->enableAttributeArray(\"normals\");\n program->setAttributeBuffer(\"normals\",GL_FLOAT,0,3);\n item->buffers[NormalTempFacets].release();\n\n item->vaos[TempFacets]->release();\n program->release();\n }\n //vao containing the data for the temp lines\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[TempEdges]->bind();\n\n item->buffers[VertexTempEdges].bind();\n item->buffers[VertexTempEdges].allocate(positions_temp_lines.data(),\n static_cast(positions_temp_lines.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexTempEdges].release();\n\n program->release();\n\n item->vaos[TempEdges]->release();\n\n }\n //vaos containing the data for the temp points\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[TempPoints]->bind();\n\n item->buffers[VertexTempPoints].bind();\n item->buffers[VertexTempPoints].allocate(positions_temp_points.data(),\n static_cast(positions_temp_points.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexTempPoints].release();\n item->vaos[TempPoints]->release();\n\n item->vaos[FixedPoints]->bind();\n\n item->buffers[VertexFixedPoints].bind();\n item->buffers[VertexFixedPoints].allocate(positions_fixed_points.data(),\n static_cast(positions_fixed_points.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexFixedPoints].release();\n item->buffers[ColorFixedPoints].bind();\n item->buffers[ColorFixedPoints].allocate(color_fixed_points.data(),\n static_cast(color_fixed_points.size()*sizeof(float)));\n program->enableAttributeArray(\"colors\");\n program->setAttributeBuffer(\"colors\",GL_FLOAT,0,3);\n item->buffers[ColorFixedPoints].release();\n item->vaos[FixedPoints]->release();\n\n program->release();\n }\n nb_temp_facets = positions_temp_facets.size();\n positions_temp_facets.resize(0);\n std::vector(positions_temp_facets).swap(positions_temp_facets);\n\n temp_normals.resize(0);\n std::vector(temp_normals).swap(temp_normals);\n\n nb_temp_lines = positions_temp_lines.size();\n positions_temp_lines.resize(0);\n std::vector(positions_temp_lines).swap(positions_temp_lines);\n\n nb_temp_points = positions_temp_points.size();\n positions_temp_points.resize(0);\n std::vector(positions_temp_points).swap(positions_temp_points);\n\n nb_fixed_points = positions_fixed_points.size();\n positions_fixed_points.resize(0);\n std::vector(positions_fixed_points).swap(positions_fixed_points);\n are_temp_buffers_filled = true;\n}\nvoid Scene_polyhedron_selection_item_priv::initialize_HL_buffers(CGAL::Three::Viewer_interface *viewer)const\n{\n //vao containing the data for the temp facets\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_WITH_LIGHT, viewer);\n program->bind();\n\n item->vaos[HLFacets]->bind();\n item->buffers[VertexHLFacets].bind();\n item->buffers[VertexHLFacets].allocate(positions_HL_facets.data(),\n static_cast(positions_HL_facets.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexHLFacets].release();\n\n\n item->buffers[NormalHLFacets].bind();\n item->buffers[NormalHLFacets].allocate(HL_normals.data(),\n static_cast(HL_normals.size()*sizeof(float)));\n program->enableAttributeArray(\"normals\");\n program->setAttributeBuffer(\"normals\",GL_FLOAT,0,3);\n item->buffers[NormalHLFacets].release();\n\n item->vaos[HLFacets]->release();\n program->release();\n\n }\n //vao containing the data for the temp lines\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[HLEdges]->bind();\n\n item->buffers[VertexHLEdges].bind();\n item->buffers[VertexHLEdges].allocate(positions_HL_lines.data(),\n static_cast(positions_HL_lines.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexHLEdges].release();\n\n program->release();\n\n item->vaos[HLEdges]->release();\n\n }\n //vao containing the data for the temp points\n {\n program = item->getShaderProgram(Scene_polyhedron_selection_item::PROGRAM_NO_SELECTION, viewer);\n program->bind();\n item->vaos[HLPoints]->bind();\n\n item->buffers[VertexHLPoints].bind();\n item->buffers[VertexHLPoints].allocate(positions_HL_points.data(),\n static_cast(positions_HL_points.size()*sizeof(float)));\n program->enableAttributeArray(\"vertex\");\n program->setAttributeBuffer(\"vertex\",GL_FLOAT,0,3);\n item->buffers[VertexHLPoints].release();\n\n program->release();\n\n item->vaos[HLPoints]->release();\n }\n are_HL_buffers_filled = true;\n}\ntemplate\nvoid push_back_xyz(const TypeWithXYZ& t,\n ContainerWithPushBack& vector)\n{\n vector.push_back(t.x());\n vector.push_back(t.y());\n vector.push_back(t.z());\n}\n\ntypedef Kernel Traits;\n\n//Make sure all the facets are triangles\ntypedef Traits::Point_3\t Point_3;\ntypedef Traits::Point_3\t Point;\ntypedef Traits::Vector_3\t Vector;\n\nvoid\nScene_polyhedron_selection_item_priv::triangulate_facet(fg_face_descriptor fit,const Vector normal,\n std::vector &p_facets,std::vector &p_normals ) const\n{\n typedef FacetTriangulator FT;\n double diagonal;\n if(item->poly_item->diagonalBbox() != std::numeric_limits::infinity())\n diagonal = item->poly_item->diagonalBbox();\n else\n diagonal = 0.0;\n FT triangulation(fit,normal,poly,diagonal);\n //iterates on the internal faces to add the vertices to the positions\n //and the normals to the appropriate vectors\n for(FT::CDT::Finite_faces_iterator\n ffit = triangulation.cdt->finite_faces_begin(),\n end = triangulation.cdt->finite_faces_end();\n ffit != end; ++ffit)\n {\n if(ffit->info().is_external)\n continue;\n\n push_back_xyz(ffit->vertex(0)->point(), p_facets);\n push_back_xyz(ffit->vertex(1)->point(), p_facets);\n push_back_xyz(ffit->vertex(2)->point(), p_facets);\n\n push_back_xyz(normal, p_normals);\n push_back_xyz(normal, p_normals);\n push_back_xyz(normal, p_normals);\n }\n}\n\n\nvoid Scene_polyhedron_selection_item_priv::compute_any_elements(std::vector& p_facets, std::vector& p_lines, std::vector& p_points, std::vector& p_normals,\n const Selection_set_vertex& p_sel_vertices, const Selection_set_facet& p_sel_facets, const Selection_set_edge& p_sel_edges)const\n{\n const qglviewer::Vec offset = static_cast(QGLViewer::QGLViewerPool().first())->offset();\n p_facets.clear();\n p_lines.clear();\n p_points.clear();\n p_normals.clear();\n //The facet\n\n if(!poly)\n return;\n\n VPmap vpm = get(CGAL::vertex_point,*poly);\n for(Selection_set_facet::iterator\n it = p_sel_facets.begin(),\n end = p_sel_facets.end();\n it != end; it++)\n {\n fg_face_descriptor f = (*it);\n if (f == boost::graph_traits::null_face())\n continue;\n Vector nf = get(nf_pmap, f);\n if(is_triangle(halfedge(f,*poly),*poly))\n {\n p_normals.push_back(nf.x());\n p_normals.push_back(nf.y());\n p_normals.push_back(nf.z());\n\n p_normals.push_back(nf.x());\n p_normals.push_back(nf.y());\n p_normals.push_back(nf.z());\n\n p_normals.push_back(nf.x());\n p_normals.push_back(nf.y());\n p_normals.push_back(nf.z());\n\n\n BOOST_FOREACH(fg_halfedge_descriptor he, halfedges_around_face(halfedge(f,*polyhedron()), *polyhedron()))\n {\n const Point& p = get(vpm,target(he,*poly));\n p_facets.push_back(p.x()+offset.x);\n p_facets.push_back(p.y()+offset.y);\n p_facets.push_back(p.z()+offset.z);\n }\n }\n else if (is_quad(halfedge(f,*poly), *poly))\n {\n Kernel::Vector_3 v_offset(offset.x, offset.y, offset.z);\n Vector nf = get(nf_pmap, f);\n {\n //1st half-quad\n const Point& p0 = get(vpm,target(halfedge(f,*poly),*poly));\n const Point& p1 = get(vpm,target(next(halfedge(f,*poly),*poly),*poly));\n const Point& p2 = get(vpm,target(next(next(halfedge(f,*poly),*poly),*poly),*poly));\n\n push_back_xyz(p0+v_offset, p_facets);\n push_back_xyz(p1+v_offset, p_facets);\n push_back_xyz(p2+v_offset, p_facets);\n\n push_back_xyz(nf, p_normals);\n push_back_xyz(nf, p_normals);\n push_back_xyz(nf, p_normals);\n }\n {\n //2nd half-quad\n const Point& p0 = get(vpm, target(next(next(halfedge(f,*poly),*poly),*poly),*poly));\n const Point& p1 = get(vpm, target(prev(halfedge(f,*poly),*poly),*poly));\n const Point& p2 = get(vpm, target(halfedge(f,*poly),*poly));\n\n push_back_xyz(p0+v_offset, p_facets);\n push_back_xyz(p1+v_offset, p_facets);\n push_back_xyz(p2+v_offset, p_facets);\n\n push_back_xyz(nf, p_normals);\n push_back_xyz(nf, p_normals);\n push_back_xyz(nf, p_normals);\n }\n }\n else\n {\n triangulate_facet(f, nf, p_facets, p_normals);\n }\n }\n\n //The Lines\n {\n\n for(Selection_set_edge::iterator it = p_sel_edges.begin(); it != p_sel_edges.end(); ++it) {\n const Point& a = get(vpm, target(halfedge(*it,*poly),*poly));\n const Point& b = get(vpm, target(opposite((halfedge(*it,*poly)),*poly),*poly));\n p_lines.push_back(a.x()+offset.x);\n p_lines.push_back(a.y()+offset.y);\n p_lines.push_back(a.z()+offset.z);\n\n p_lines.push_back(b.x()+offset.x);\n p_lines.push_back(b.y()+offset.y);\n p_lines.push_back(b.z()+offset.z);\n }\n\n }\n //The points\n {\n for(Selection_set_vertex::iterator\n it = p_sel_vertices.begin(),\n end = p_sel_vertices.end();\n it != end; ++it)\n {\n const Point& p = get(vpm, *it);\n p_points.push_back(p.x()+offset.x);\n p_points.push_back(p.y()+offset.y);\n p_points.push_back(p.z()+offset.z);\n }\n }\n}\nvoid Scene_polyhedron_selection_item_priv::computeElements()const\n{\n QApplication::setOverrideCursor(Qt::WaitCursor);\n compute_any_elements(positions_facets, positions_lines, positions_points, normals,\n item->selected_vertices, item->selected_facets, item->selected_edges);\n QApplication::restoreOverrideCursor();\n}\nvoid Scene_polyhedron_selection_item_priv::compute_temp_elements()const\n{\n QApplication::setOverrideCursor(Qt::WaitCursor);\n compute_any_elements(positions_temp_facets, positions_temp_lines, positions_temp_points, temp_normals,\n item->temp_selected_vertices, item->temp_selected_facets, item->temp_selected_edges);\n //The fixed points\n {\n const qglviewer::Vec offset = static_cast(QGLViewer::QGLViewerPool().first())->offset();\n color_fixed_points.clear();\n positions_fixed_points.clear();\n int i=0;\n\n constVPmap vpm = get(CGAL::vertex_point,*polyhedron());\n\n for(Scene_polyhedron_selection_item::Selection_set_vertex::iterator\n it = item->fixed_vertices.begin(),\n end = item->fixed_vertices.end();\n it != end; ++it)\n {\n const Point& p = get(vpm,*it);\n positions_fixed_points.push_back(p.x()+offset.x);\n positions_fixed_points.push_back(p.y()+offset.y);\n positions_fixed_points.push_back(p.z()+offset.z);\n\n if(*it == constrained_vertices.first()|| *it == constrained_vertices.last())\n {\n color_fixed_points.push_back(0.0);\n color_fixed_points.push_back(0.0);\n color_fixed_points.push_back(1.0);\n }\n else\n {\n color_fixed_points.push_back(1.0);\n color_fixed_points.push_back(0.0);\n color_fixed_points.push_back(0.0);\n }\n i++;\n }\n }\n QApplication::restoreOverrideCursor();\n}\n\nvoid Scene_polyhedron_selection_item_priv::compute_HL_elements()const\n{\n QApplication::setOverrideCursor(Qt::WaitCursor);\n compute_any_elements(positions_HL_facets, positions_HL_lines, positions_HL_points, HL_normals,\n item->HL_selected_vertices, item->HL_selected_facets, item->HL_selected_edges);\n QApplication::restoreOverrideCursor();\n}\n\nvoid Scene_polyhedron_selection_item::draw(CGAL::Three::Viewer_interface* viewer) const\n{\n GLfloat offset_factor;\n GLfloat offset_units;\n if(!d->are_HL_buffers_filled)\n {\n d->compute_HL_elements();\n d->initialize_HL_buffers(viewer);\n }\n\n viewer->glGetFloatv(GL_POLYGON_OFFSET_FACTOR, &offset_factor);\n viewer->glGetFloatv(GL_POLYGON_OFFSET_UNITS, &offset_units);\n glPolygonOffset(0.5f, 0.9f);\n vaos[Scene_polyhedron_selection_item_priv::HLFacets]->bind();\n d->program = getShaderProgram(PROGRAM_WITH_LIGHT);\n attribBuffers(viewer,PROGRAM_WITH_LIGHT);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(255,153,51));\n viewer->glDrawArrays(GL_TRIANGLES, 0, static_cast(d->positions_HL_facets.size())/3);\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::HLFacets]->release();\n\n if(!d->are_temp_buffers_filled)\n {\n d->compute_temp_elements();\n d->initialize_temp_buffers(viewer);\n }\n vaos[Scene_polyhedron_selection_item_priv::TempFacets]->bind();\n d->program = getShaderProgram(PROGRAM_WITH_LIGHT);\n attribBuffers(viewer,PROGRAM_WITH_LIGHT);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(0,255,0));\n viewer->glDrawArrays(GL_TRIANGLES, 0, static_cast(d->nb_temp_facets/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::TempFacets]->release();\n if(!are_buffers_filled)\n {\n d->computeElements();\n d->initializeBuffers(viewer);\n }\n\n vaos[Scene_polyhedron_selection_item_priv::TempFacets]->bind();\n d->program = getShaderProgram(PROGRAM_WITH_LIGHT);\n attribBuffers(viewer,PROGRAM_WITH_LIGHT);\n\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(0,255,0));\n viewer->glDrawArrays(GL_TRIANGLES, 0, static_cast(d->nb_temp_facets/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::TempFacets]->release();\n\n if(!are_buffers_filled)\n {\n d->computeElements();\n d->initializeBuffers(viewer);\n }\n vaos[Scene_polyhedron_selection_item_priv::Facets]->bind();\n d->program = getShaderProgram(PROGRAM_WITH_LIGHT);\n attribBuffers(viewer,PROGRAM_WITH_LIGHT);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",this->color());\n viewer->glDrawArrays(GL_TRIANGLES, 0, static_cast(d->nb_facets/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::Facets]->release();\n\n glEnable(GL_POLYGON_OFFSET_LINE);\n viewer->glPolygonMode(GL_FRONT_AND_BACK,GL_LINE);\n glPolygonOffset(0.0f, 1.5f);\n drawEdges(viewer);\n glDisable(GL_POLYGON_OFFSET_LINE);\n viewer->glPolygonMode(GL_FRONT_AND_BACK,GL_POINT);\n glPolygonOffset(offset_factor, offset_units);\n drawPoints(viewer);\n viewer->glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);\n}\n\nvoid Scene_polyhedron_selection_item::drawEdges(CGAL::Three::Viewer_interface* viewer) const\n{\n\n viewer->glLineWidth(3.f);\n\n if(!d->are_HL_buffers_filled)\n {\n d->compute_HL_elements();\n d->initialize_HL_buffers(viewer);\n }\n\n vaos[Scene_polyhedron_selection_item_priv::HLEdges]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n\n d->program->setAttributeValue(\"colors\",QColor(255,153,51));\n viewer->glDrawArrays(GL_LINES, 0, static_cast(d->positions_HL_lines.size()/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::HLEdges]->release();\n\n if(!d->are_temp_buffers_filled)\n {\n d->compute_temp_elements();\n d->initialize_temp_buffers(viewer);\n }\n\n vaos[Scene_polyhedron_selection_item_priv::TempEdges]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n\n d->program->setAttributeValue(\"colors\",QColor(0,200,0));\n viewer->glDrawArrays(GL_LINES, 0, static_cast(d->nb_temp_lines/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::TempEdges]->release();\n viewer->glLineWidth(3.0f);\n if(!are_buffers_filled)\n {\n d->computeElements();\n d->initializeBuffers(viewer);\n }\n\n vaos[Scene_polyhedron_selection_item_priv::Edges]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n\n d->program->setAttributeValue(\"colors\",QColor(255,\n color().blue()/2,\n color().green()/2));\n viewer->glDrawArrays(GL_LINES, 0, static_cast(d->nb_lines/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::Edges]->release();\n\n\n viewer->glLineWidth(1.f);\n}\n\nvoid Scene_polyhedron_selection_item::drawPoints(CGAL::Three::Viewer_interface* viewer) const\n{\n viewer->glPointSize(5.5f);\n\n if(!d->are_HL_buffers_filled)\n {\n d->compute_HL_elements();\n d->initialize_HL_buffers(viewer);\n }\n vaos[Scene_polyhedron_selection_item_priv::HLPoints]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(255,153,51));\n viewer->glDrawArrays(GL_POINTS, 0, static_cast(d->positions_HL_points.size()/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::HLPoints]->release();\n\n if(!d->are_temp_buffers_filled)\n {\n d->compute_temp_elements();\n d->initialize_temp_buffers(viewer);\n }\n viewer->glPointSize(5.5f);\n\n vaos[Scene_polyhedron_selection_item_priv::TempPoints]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(0,50,0));\n viewer->glDrawArrays(GL_POINTS, 0, static_cast(d->nb_temp_points/3));\n vaos[Scene_polyhedron_selection_item_priv::TempPoints]->release();\n vaos[Scene_polyhedron_selection_item_priv::FixedPoints]->bind();\n viewer->glDrawArrays(GL_POINTS, 0, static_cast(d->nb_fixed_points/3));\n d->program->release();\n vaos[Scene_polyhedron_selection_item_priv::FixedPoints]->release();\n if(!are_buffers_filled)\n {\n d->computeElements();\n d->initializeBuffers(viewer);\n }\n vaos[Scene_polyhedron_selection_item_priv::Points]->bind();\n d->program = getShaderProgram(PROGRAM_NO_SELECTION);\n attribBuffers(viewer,PROGRAM_NO_SELECTION);\n d->program->bind();\n d->program->setAttributeValue(\"colors\",QColor(255,\n (std::min)(color().blue()+color().red(), 255),\n (std::min)(color().green()+color().red(), 255)));\n viewer->glDrawArrays(GL_POINTS, 0, static_cast(d->nb_points/3));\n d->program->release();\n vaos[Points]->release();\n\n viewer->glPointSize(1.f);\n}\n\n\nvoid Scene_polyhedron_selection_item::inverse_selection()\n{\n switch(k_ring_selector.active_handle_type)\n {\n case Active_handle::VERTEX:\n {\n Selection_set_vertex temp_select = selected_vertices;\n select_all();\n Q_FOREACH(fg_vertex_descriptor vh, temp_select)\n {\n selected_vertices.erase(vh);\n }\n break;\n }\n case Active_handle::EDGE:\n {\n Selection_set_edge temp_select = selected_edges;\n select_all();\n Q_FOREACH(fg_edge_descriptor ed , temp_select)\n selected_edges.erase(ed);\n break;\n }\n default:\n {\n Selection_set_facet temp_select = selected_facets;\n select_all();\n Q_FOREACH(fg_face_descriptor fh, temp_select)\n selected_facets.erase(fh);\n break;\n }\n }\n invalidateOpenGLBuffers();\n QGLViewer* v = *QGLViewer::QGLViewerPool().begin();\n v->update();\n}\n\nvoid Scene_polyhedron_selection_item::set_operation_mode(int mode)\n{\n k_ring_selector.setEditMode(true);\n Q_EMIT updateInstructions(QString(\"SHIFT + left click to apply operation.\"));\n switch(mode)\n {\n case -2:\n set_active_handle_type(d->original_sel_mode);\n Q_EMIT updateInstructions(\"Select two vertices to create the path between them. (1/2)\");\n break;\n case -1:\n //restore original selection_type\n set_active_handle_type(d->original_sel_mode);\n clearHL();\n k_ring_selector.setEditMode(false);\n break;\n //Join vertex\n case 0:\n Q_EMIT updateInstructions(\"Select the edge with extremities you want to join.\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Split vertex\n case 1:\n Q_EMIT updateInstructions(\"Select the vertex you want to split. (1/3)\");\n //set the selection type to Vertex\n set_active_handle_type(static_cast(0));\n break;\n //Split edge\n case 2:\n Q_EMIT updateInstructions(\"Select the edge you want to split.\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Join face\n case 3:\n Q_EMIT updateInstructions(\"Select the edge separating the faces you want to join.\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Split face\n case 4:\n Q_EMIT updateInstructions(\"Select the facet you want to split (degree >= 4). (1/3)\");\n //set the selection type to Facet\n set_active_handle_type(static_cast(1));\n break;\n //Collapse edge\n case 5:\n Q_EMIT updateInstructions(\"Select the edge you want to collapse.\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Flip edge\n case 6:\n Q_EMIT updateInstructions(\"Select the edge you want to flip.\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Add center vertex\n case 7:\n Q_EMIT updateInstructions(\"Select a facet.\");\n //set the selection type to Facet\n set_active_handle_type(static_cast(1));\n break;\n //Remove center vertex\n case 8:\n Q_EMIT updateInstructions(\"Select the vertex you want to remove.\");\n //set the selection type to vertex\n set_active_handle_type(static_cast(0));\n break;\n //Add vertex and face to border\n case 9:\n Q_EMIT updateInstructions(\"Select a border edge. (1/2)\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n //Add face to border\n case 10:\n Q_EMIT updateInstructions(\"Select a border edge. (1/2)\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(2));\n break;\n case 11:\n Q_EMIT updateInstructions(\"Select a vertex. (1/2)\");\n //set the selection type to Edge\n set_active_handle_type(static_cast(0));\n break;\n default:\n break;\n }\n d->operation_mode = mode;\n}\ntemplate\nbool Scene_polyhedron_selection_item::treat_classic_selection(const HandleRange& selection)\n{\n typedef typename HandleRange::value_type HandleType;\n Selection_traits tr(this);\n bool any_change = false;\n if(is_insert) {\n BOOST_FOREACH(HandleType h, selection)\n any_change |= tr.container().insert(h).second;\n }\n else{\n BOOST_FOREACH(HandleType h, selection)\n any_change |= (tr.container().erase(h)!=0);\n }\n if(any_change) { invalidateOpenGLBuffers(); Q_EMIT itemChanged(); }\n return any_change;\n}\n\nbool Scene_polyhedron_selection_item::treat_selection(const std::set& selection)\n{\n VPmap vpm = get(CGAL::vertex_point, *polyhedron());\n if(!d->is_treated)\n {\n fg_vertex_descriptor vh = *selection.begin();\n Selection_traits tr(this);\n switch(d->operation_mode)\n {\n //classic selection\n case -2:\n case -1:\n {\n if(!d->is_path_selecting)\n {\n return treat_classic_selection(selection);\n }\n else\n {\n if(is_insert)\n {\n selectPath(*selection.begin());\n invalidateOpenGLBuffers();\n Q_EMIT itemChanged();\n }\n }\n return false;\n break;\n }\n //Split vertex\n case 1:\n {\n //save VH\n d->to_split_vh = vh;\n temp_selected_vertices.insert(d->to_split_vh);\n //set to select facet\n set_active_handle_type(static_cast(1));\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select first facet. (2/3)\");\n break;\n }\n //Split face\n case 4:\n {\n static fg_vertex_descriptor s;\n static fg_halfedge_descriptor h1,h2;\n static bool found_h1(false), found_h2(false);\n if(!d->first_selected)\n {\n //Is the vertex on the face ?\n BOOST_FOREACH(fg_halfedge_descriptor hafc, halfedges_around_face(halfedge(d->to_split_fh,*polyhedron()), *polyhedron()))\n {\n if(target(hafc,*polyhedron())==vh)\n {\n h1 = hafc;\n s = vh;\n found_h1 = true;\n break;\n }\n }\n if(!found_h1)\n {\n d->tempInstructions(\"Vertex not selected : The vertex is not on the face.\",\n \"Select the first vertex. (2/3)\");\n }\n else\n {\n d->first_selected = true;\n temp_selected_vertices.insert(s);\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select the second vertex (3/3)\");\n }\n }\n else\n {\n bool is_same(false), are_next(false);\n for(int i=0; i<1; i++) //seems useless but allow the use of break.\n {\n //Is the vertex on the face ?\n BOOST_FOREACH(fg_halfedge_descriptor hafc, halfedges_around_face(halfedge(d->to_split_fh,*polyhedron()), *polyhedron()))\n if(target(hafc,*polyhedron())==vh)\n {\n h2 = hafc;\n found_h2 = true;\n break;\n }\n if(!found_h2)\n {\n break;\n }\n //Are they different ?\n if(h1 == h2)\n {\n is_same = true;\n break;\n }\n is_same = false;\n //Are they directly following each other?\n if(next(h1, *polyhedron()) == h2 ||\n next(h2, *polyhedron()) == h1)\n {\n are_next = true;\n break;\n }\n are_next = false;\n }\n if(!found_h2)\n d->tempInstructions(\"Vertex not selected : The vertex is not on the face.\",\n \"Select the second vertex (3/3).\");\n else if(is_same)\n d->tempInstructions(\"Vertex not selected : The vertices must be different.\",\n \"Select the second vertex (3/3).\");\n else if(are_next)\n d->tempInstructions(\"Vertex not selected : The vertices must not directly follow each other.\",\n \"Select the second vertex (3/3).\");\n else\n {\n CGAL::Euler::split_face(h1,h2, *polyhedron());\n d->first_selected = false;\n temp_selected_vertices.clear();\n temp_selected_facets.clear();\n compute_normal_maps();\n invalidateOpenGLBuffers();\n //reset selection type to Facet\n set_active_handle_type(static_cast(1));\n d->tempInstructions(\"Face split.\",\n \"Select a facet (1/3).\");\n polyhedron_item()->invalidateOpenGLBuffers();\n }\n }\n break;\n }\n //Remove center vertex\n case 8:\n {\n bool has_hole = false;\n BOOST_FOREACH(fg_halfedge_descriptor hc, halfedges_around_target(vh,*polyhedron()))\n {\n if(is_border(hc,*polyhedron()))\n {\n has_hole = true;\n break;\n }\n }\n if(!has_hole)\n {\n CGAL::Euler::remove_center_vertex(halfedge(vh,*polyhedron()),*polyhedron());\n compute_normal_maps();\n polyhedron_item()->invalidateOpenGLBuffers();\n }\n else\n {\n d->tempInstructions(\"Vertex not selected : There must be no hole incident to the selection.\",\n \"Select the vertex you want to remove.\");\n }\n break;\n }\n case 11:\n QGLViewer* viewer = *QGLViewer::QGLViewerPool().begin();\n const qglviewer::Vec offset = static_cast(viewer)->offset();\n if(viewer->manipulatedFrame() != d->manipulated_frame)\n {\n temp_selected_vertices.insert(vh);\n k_ring_selector.setEditMode(false);\n const Point_3& p = get(vpm,vh);\n d->manipulated_frame->setPosition(p.x()+offset.x, p.y()+offset.y, p.z()+offset.z);\n viewer->setManipulatedFrame(d->manipulated_frame);\n connect(d->manipulated_frame, SIGNAL(modified()), this, SLOT(updateTick()));\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Ctrl+Right-click to move the point. \\nHit Ctrl+Z to leave the selection. (2/2)\");\n }\n else\n {\n temp_selected_vertices.clear();\n temp_selected_vertices.insert(vh);\n const Point_3& p = get(vpm,vh);\n d->manipulated_frame->setPosition(p.x()+offset.x, p.y()+offset.y, p.z()+offset.z);\n invalidateOpenGLBuffers();\n }\n break;\n }\n }\n d->is_treated = true;\n //Keeps the item from trying to draw primitive that has just been deleted.\n clearHL();\n return false;\n}\n\n//returns true if halfedge's facet's degree >= degree\n/*\nstd::size_t facet_degree(fg_halfedge_descriptor h, const Face_graph& polyhedron)\n{\n return degree(h,polyhedron);\n}\n*/\nbool Scene_polyhedron_selection_item:: treat_selection(const std::set& selection)\n{\n VPmap vpm = get(CGAL::vertex_point, *polyhedron());\n fg_edge_descriptor ed = *selection.begin();\n if(!d->is_treated)\n {\n Selection_traits tr(this);\n switch(d->operation_mode)\n {\n //classic selection\n case -1:\n {\n return treat_classic_selection(selection);\n break;\n }\n //Join vertex\n case 0:\n if(boost::distance(CGAL::halfedges_around_face(halfedge(ed, *polyhedron()), *polyhedron())) < 4\n ||\n boost::distance(CGAL::halfedges_around_face(opposite(halfedge(ed, *polyhedron()),*polyhedron()),*polyhedron()))< 4)\n {\n d->tempInstructions(\"Edge not selected: the incident facets must have a degree of at least 4.\",\n \"Select the edge with extremities you want to join.\");\n }\n else\n {\n fg_halfedge_descriptor targt = halfedge(ed, *polyhedron());\n Point S,T;\n S = get(vpm, source(targt, *polyhedron()));\n T = get(vpm, target(targt, *polyhedron()));\n put(vpm, target(CGAL::Euler::join_vertex(targt,*polyhedron()),*polyhedron()), Point(0.5*(S.x()+T.x()), 0.5*(S.y()+T.y()), 0.5*(S.z()+T.z())));\n d->tempInstructions(\"Vertices joined.\",\n \"Select the edge with extremities you want to join.\");\n compute_normal_maps();\n invalidateOpenGLBuffers();\n polyhedron_item()->invalidateOpenGLBuffers();\n }\n break;\n //Split edge\n case 2:\n {\n\n Point_3 a(get(vpm,target(halfedge(ed, *polyhedron()),*polyhedron()))),\n b(get(vpm,target(opposite(halfedge(ed, *polyhedron()),*polyhedron()),*polyhedron())));\n fg_halfedge_descriptor hhandle = CGAL::Euler::split_edge(halfedge(ed, *polyhedron()),*polyhedron());\n Point_3 p((b.x()+a.x())/2.0, (b.y()+a.y())/2.0,(b.z()+a.z())/2.0);\n\n put(vpm, target(hhandle,*polyhedron()), p);\n invalidateOpenGLBuffers();\n poly_item->invalidateOpenGLBuffers();\n compute_normal_maps();\n d->tempInstructions(\"Edge splitted.\",\n \"Select the edge you want to split.\");\n break;\n }\n //Join face\n case 3:\n if(out_degree(source(halfedge(ed,*polyhedron()),*polyhedron()),*polyhedron())<3 ||\n out_degree(target(halfedge(ed,*polyhedron()),*polyhedron()),*polyhedron())<3)\n d->tempInstructions(\"Faces not joined : the two ends of the edge must have a degree of at least 3.\",\n \"Select the edge separating the faces you want to join.\");\n else\n {\n CGAL::Euler::join_face(halfedge(ed, *polyhedron()), *polyhedron());\n compute_normal_maps();\n poly_item->invalidateOpenGLBuffers();\n }\n break;\n //Collapse edge\n case 5:\n if(!is_triangle_mesh(*polyhedron()))\n {\n d->tempInstructions(\"Edge not collapsed : the graph must be triangulated.\",\n \"Select the edge you want to collapse.\");\n }\n else if(!CGAL::Euler::does_satisfy_link_condition(ed, *polyhedron()))\n {\n d->tempInstructions(\"Edge not collapsed : link condition not satidfied.\",\n \"Select the edge you want to collapse.\");\n }\n else\n {\n fg_halfedge_descriptor targt = halfedge(ed, *polyhedron());\n Point S,T;\n S = get(vpm, source(targt, *polyhedron()));\n T = get(vpm, target(targt, *polyhedron()));\n\n put(vpm, CGAL::Euler::collapse_edge(ed, *polyhedron()), Point(0.5*(S.x()+T.x()), 0.5*(S.y()+T.y()), 0.5*(S.z()+T.z())));\n compute_normal_maps();\n polyhedron_item()->invalidateOpenGLBuffers();\n\n d->tempInstructions(\"Edge collapsed.\",\n \"Select the edge you want to collapse.\");\n }\n break;\n //Flip edge\n case 6:\n\n //check preconditions\n if(boost::distance(CGAL::halfedges_around_face(halfedge(ed, *polyhedron()),*polyhedron())) == 3 \n && \n boost::distance(CGAL::halfedges_around_face(opposite(halfedge(ed, *polyhedron()),*polyhedron()),*polyhedron())) == 3)\n {\n CGAL::Euler::flip_edge(halfedge(ed, *polyhedron()), *polyhedron());\n polyhedron_item()->invalidateOpenGLBuffers();\n compute_normal_maps();\n }\n else\n {\n d->tempInstructions(\"Edge not selected : incident facets must be triangles.\",\n \"Select the edge you want to flip.\");\n }\n\n break;\n //Add vertex and face to border\n case 9:\n {\n static fg_halfedge_descriptor t;\n if(!d->first_selected)\n {\n bool found = false;\n fg_halfedge_descriptor hc = halfedge(ed, *polyhedron());\n if(is_border(hc,*polyhedron()))\n {\n t = hc;\n found = true;\n }\n else if(is_border(opposite(hc,*polyhedron()),*polyhedron()))\n {\n t = opposite(hc,*polyhedron());\n found = true;\n }\n if(found)\n {\n d->first_selected = true;\n temp_selected_edges.insert(edge(t, *polyhedron()));\n temp_selected_vertices.insert(target(t,*polyhedron()));\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select second edge. (2/2)\");\n }\n else\n {\n d->tempInstructions(\"Edge not selected : no border found.\",\n \"Select a border edge. (1/2)\");\n }\n }\n else\n {\n fg_halfedge_descriptor hc = halfedge(ed, *polyhedron());\n if(d->canAddFaceAndVertex(hc, t))\n {\n d->first_selected = false;\n\n\n temp_selected_edges.clear();\n temp_selected_vertices.clear();\n compute_normal_maps();\n invalidateOpenGLBuffers();\n polyhedron_item()->invalidateOpenGLBuffers();\n d->tempInstructions(\"Face and vertex added.\",\n \"Select a border edge. (1/2)\");\n }\n }\n break;\n }\n //Add face to border\n case 10:\n {\n static fg_halfedge_descriptor t;\n if(!d->first_selected)\n {\n bool found = false;\n fg_halfedge_descriptor hc = halfedge(ed, *polyhedron());\n if(is_border(hc,*polyhedron()))\n {\n t = hc;\n found = true;\n }\n else if(is_border(opposite(hc,*polyhedron()),*polyhedron()))\n {\n t = opposite(hc,*polyhedron());\n found = true;\n }\n if(found)\n {\n d->first_selected = true;\n temp_selected_edges.insert(edge(t, *polyhedron()));\n temp_selected_vertices.insert(target(t,*polyhedron()));\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select second edge. (2/2)\");\n set_active_handle_type(static_cast(2));\n }\n else\n {\n d->tempInstructions(\"Edge not selected : no border found.\",\n \"Select a border edge. (1/2)\");\n }\n }\n else\n {\n fg_halfedge_descriptor hc = halfedge(ed, *polyhedron());\n if(d->canAddFace(hc, t))\n {\n d->first_selected = false;\n temp_selected_vertices.clear();\n temp_selected_edges.clear();\n compute_normal_maps();\n invalidateOpenGLBuffers();\n polyhedron_item()->invalidateOpenGLBuffers();\n d->tempInstructions(\"Face added.\",\n \"Select a border edge. (1/2)\");\n }\n }\n break;\n }\n }\n }\n d->is_treated = true;\n //Keeps the item from trying to draw primitive that has just been deleted.\n clearHL();\n return false;\n}\n\nbool Scene_polyhedron_selection_item::treat_selection(const std::vector& selection)\n{\n return treat_classic_selection(selection);\n}\n\nbool Scene_polyhedron_selection_item::treat_selection(const std::set& selection)\n{\n VPmap vpm = get(CGAL::vertex_point,*polyhedron());\n if(!d->is_treated)\n {\n fg_face_descriptor fh = *selection.begin();\n Selection_traits tr(this);\n switch(d->operation_mode)\n {\n //classic selection\n case -1:\n {\n return treat_classic_selection(selection);\n break;\n }\n //Split vertex\n case 1:\n {\n static fg_halfedge_descriptor h1;\n //stores first fh and emit change label\n if(!d->first_selected)\n {\n bool found = false;\n //test preco\n BOOST_FOREACH(fg_halfedge_descriptor hafc, halfedges_around_face(halfedge(fh,*polyhedron()),*polyhedron()))\n {\n if(target(hafc,*polyhedron())==d->to_split_vh)\n {\n h1 = hafc;\n found = true;\n break;\n }\n }\n if(found)\n {\n d->first_selected = true;\n temp_selected_facets.insert(fh);\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select the second facet. (3/3)\");\n }\n else\n d->tempInstructions(\"Facet not selected : no valid halfedge\",\n \"Select first facet. (2/3)\");\n }\n //call the function with point and facets.\n else\n {\n //get the right halfedges\n fg_halfedge_descriptor h2;\n bool found = false;\n BOOST_FOREACH(fg_halfedge_descriptor hafc, halfedges_around_face(halfedge(fh,*polyhedron()),*polyhedron()))\n {\n if(target(hafc,*polyhedron())==d->to_split_vh)\n {\n h2 = hafc;\n found = true;\n break;\n }\n }\n\n if(found &&(h1 != h2))\n {\n fg_halfedge_descriptor hhandle = CGAL::Euler::split_vertex(h1,h2,*polyhedron());\n\n temp_selected_facets.clear();\n Point_3 p1t = get(vpm, target(h1,*polyhedron()));\n Point_3 p1s = get(vpm, target(opposite(h1,*polyhedron()),*polyhedron()));\n double x = p1t.x() + 0.01 * (p1s.x() - p1t.x());\n double y = p1t.y() + 0.01 * (p1s.y() - p1t.y());\n double z = p1t.z() + 0.01 * (p1s.z() - p1t.z());\n put(vpm, target(opposite(hhandle,*polyhedron()),*polyhedron()), Point_3(x,y,z));;\n d->first_selected = false;\n temp_selected_vertices.clear();\n compute_normal_maps();\n invalidateOpenGLBuffers();\n //reset selection mode\n set_active_handle_type(static_cast(0));\n poly_item->invalidateOpenGLBuffers();\n d->tempInstructions(\"Vertex splitted.\", \"Select the vertex you want splitted. (1/3)\");\n }\n else if(h1 == h2)\n {\n d->tempInstructions(\"Facet not selected : same as the first.\", \"Select the second facet. (3/3)\");\n }\n else\n {\n d->tempInstructions(\"Facet not selected : no valid halfedge.\", \"Select the second facet. (3/3)\");\n }\n }\n break;\n }\n //Split face\n case 4:\n if(is_triangle(halfedge(fh,*d->poly), *d->poly))\n {\n d->tempInstructions(\"Facet not selected : Facet must not be a triangle.\",\n \"Select the facet you want to split (degree >= 4). (1/3)\");\n }\n else\n {\n d->to_split_fh = fh;\n temp_selected_facets.insert(d->to_split_fh);\n compute_normal_maps();\n invalidateOpenGLBuffers();\n //set to select vertex\n set_active_handle_type(static_cast(0));\n Q_EMIT updateInstructions(\"Select first vertex. (2/3)\");\n }\n break;\n //Add center vertex\n case 7:\n if(is_border(halfedge(fh,*polyhedron()),*polyhedron()))\n {\n d->tempInstructions(\"Facet not selected : Facet must not be null.\",\n \"Select a Facet. (1/3)\");\n }\n else\n {\n double x(0), y(0), z(0);\n int total(0);\n\n BOOST_FOREACH(fg_halfedge_descriptor hafc, halfedges_around_face(halfedge(fh,*polyhedron()),*polyhedron()))\n {\n fg_vertex_descriptor vd = target(hafc,*polyhedron());\n Point_3& p = get(vpm,vd);\n x+= p.x(); y+=p.y(); z+=p.z();\n total++;\n }\n fg_halfedge_descriptor hhandle = CGAL::Euler::add_center_vertex(halfedge(fh,*polyhedron()), *polyhedron());\n if(total !=0)\n put(vpm, target(hhandle,*polyhedron()), Point_3(x/(double)total, y/(double)total, z/(double)total));\n compute_normal_maps();\n poly_item->invalidateOpenGLBuffers();\n\n }\n break;\n }\n }\n d->is_treated = true;\n //Keeps the item from trying to draw primitive that has just been deleted.\n clearHL();\n return false;\n}\n\nvoid Scene_polyhedron_selection_item_priv::tempInstructions(QString s1, QString s2)\n{\n m_temp_instructs = s2;\n Q_EMIT item->updateInstructions(QString(\"%1\").arg(s1));\n QTimer timer;\n timer.singleShot(5500, item, SLOT(emitTempInstruct()));\n}\nvoid Scene_polyhedron_selection_item::emitTempInstruct()\n{\n Q_EMIT updateInstructions(QString(\"%1\").arg(d->m_temp_instructs));\n}\n\n/// An exception used while catching a throw that stops Dijkstra's algorithm\n/// once the shortest path to a target has been found.\nclass Dijkstra_end_exception : public std::exception\n{\n const char* what() const throw ()\n {\n return \"Dijkstra shortest path: reached the target vertex.\";\n }\n};\n\n/// Visitor to stop Dijkstra's algorithm once the given target turns 'BLACK',\n/// that is when the target has been examined through all its incident edges and\n/// the shortest path is thus known.\nclass Stop_at_target_Dijkstra_visitor : boost::default_dijkstra_visitor\n{\n fg_vertex_descriptor destination_vd;\n\npublic:\n Stop_at_target_Dijkstra_visitor(fg_vertex_descriptor destination_vd)\n : destination_vd(destination_vd)\n { }\n\n void initialize_vertex(const fg_vertex_descriptor& /*s*/, const Face_graph& /*mesh*/) const { }\n void examine_vertex(const fg_vertex_descriptor& /*s*/, const Face_graph& /*mesh*/) const { }\n void examine_edge(const fg_edge_descriptor& /*e*/, const Face_graph& /*mesh*/) const { }\n void edge_relaxed(const fg_edge_descriptor& /*e*/, const Face_graph& /*mesh*/) const { }\n void discover_vertex(const fg_vertex_descriptor& /*s*/, const Face_graph& /*mesh*/) const { }\n void edge_not_relaxed(const fg_edge_descriptor& /*e*/, const Face_graph& /*mesh*/) const { }\n void finish_vertex(const fg_vertex_descriptor &vd, const Face_graph& /* mesh*/) const\n {\n if(vd == destination_vd)\n throw Dijkstra_end_exception();\n }\n};\n\nvoid Scene_polyhedron_selection_item_priv::computeAndDisplayPath()\n{\n item->temp_selected_edges.clear();\n path.clear();\n\n typedef boost::unordered_map Pred_umap;\n typedef boost::associative_property_map Pred_pmap;\n\n Pred_umap predecessor;\n Pred_pmap pred_pmap(predecessor);\n\n vertex_on_path vop;\n QList::iterator it;\n for(it = constrained_vertices.begin(); it!=constrained_vertices.end()-1; ++it)\n {\n fg_vertex_descriptor t(*it), s(*(it+1));\n Stop_at_target_Dijkstra_visitor vis(t);\n\n try\n {\n boost::dijkstra_shortest_paths(*item->polyhedron(), s,\n boost::predecessor_map(pred_pmap).visitor(vis));\n }\n catch (const std::exception& e)\n {\n std::cout << e.what() << std::endl;\n }\n\n // Walk back from target to source and collect vertices along the way\n do\n {\n vop.vertex = t;\n if(constrained_vertices.contains(t))\n {\n vop.is_constrained = true;\n }\n else\n vop.is_constrained = false;\n path.append(vop);\n t = get(pred_pmap, t);\n }\n while(t != s);\n }\n\n // Add the last vertex\n vop.vertex = constrained_vertices.last();\n vop.is_constrained = true;\n path.append(vop);\n\n // Display path\n QList::iterator path_it;\n for(path_it = path.begin(); path_it!=path.end()-1; ++path_it)\n {\n std::pair h = halfedge((path_it+1)->vertex,path_it->vertex,*item->polyhedron());\n if(h.second)\n item->temp_selected_edges.insert(edge(h.first, *item->polyhedron()));\n }\n}\n\nvoid Scene_polyhedron_selection_item_priv::addVertexToPath(fg_vertex_descriptor vh, vertex_on_path &first)\n{\n vertex_on_path source;\n source.vertex = vh;\n source.is_constrained = true;\n path.append(source);\n first = source;\n}\nvoid Scene_polyhedron_selection_item::selectPath(fg_vertex_descriptor vh)\n{\n\n bool replace = !temp_selected_edges.empty();\n static Scene_polyhedron_selection_item_priv::vertex_on_path first;\n if(!d->first_selected)\n {\n //if the path doesnt exist, add the vertex as the source of the path.\n if(!replace)\n {\n d->addVertexToPath(vh, first);\n }\n //if the path exists, get the vertex_on_path corresponding to the selected vertex.\n else\n {\n //The first vertex of the path can not be moved, but you can close your path on it to make a loop.\n bool alone = true;\n QList::iterator it;\n for(it = d->path.begin(); it!=d->path.end(); ++it)\n {\n if(it->vertex == vh&& it!=d->path.begin())\n alone = false;\n }\n if(d->path.begin()->vertex == vh )\n if(alone)\n {\n d->constrained_vertices.append(vh); //if the path loops, the indexOf may be invalid, hence the check.\n //Display the new path\n d->computeAndDisplayPath();\n d->first_selected = false;\n d->constrained_vertices.clear();\n fixed_vertices.clear();\n for(it = d->path.begin(); it!=d->path.end(); ++it)\n {\n if(it->is_constrained )\n {\n d->constrained_vertices.append(it->vertex);\n fixed_vertices.insert(it->vertex);\n }\n }\n\n return;\n }\n bool found = false;\n Q_FOREACH(Scene_polyhedron_selection_item_priv::vertex_on_path vop, d->path)\n {\n if(vop.vertex == vh)\n {\n first = vop;\n found = true;\n break;\n }\n }\n if(!found)//add new end_point;\n {\n d->constrained_vertices.append(vh);\n //Display the new path\n d->computeAndDisplayPath();\n d->first_selected = false;\n d->constrained_vertices.clear();\n fixed_vertices.clear();\n for(it = d->path.begin(); it!=d->path.end(); ++it)\n {\n if(it->is_constrained )\n {\n d->constrained_vertices.append(it->vertex);\n fixed_vertices.insert(it->vertex);\n }\n }\n\n return;\n }\n }\n temp_selected_vertices.insert(vh);\n d->first_selected = true;\n }\n else\n {\n if(!replace)\n {\n d->constrained_vertices.append(vh);\n temp_selected_vertices.erase(first.vertex);\n\n updateInstructions(\"You can select a vertex on the green path to move it. \"\n \"If you do so, it will become a red fixed point. \"\n \"The path will be recomputed to go through that point. \"\n \"Click on 'Add to selection' to validate the selection. (2/2)\");\n }\n else\n {\n bool is_same(false), alone(true);\n if( (vh == d->constrained_vertices.first() && first.vertex == d->constrained_vertices.last())\n || (vh == d->constrained_vertices.last() && first.vertex == d->constrained_vertices.first()))\n\n {\n is_same = true;\n }\n if(first.vertex == d->path.begin()->vertex)\n alone =false;\n bool is_last = true;\n //find the previous constrained vertex on path\n Scene_polyhedron_selection_item_priv::vertex_on_path closest = d->path.last();\n QList::iterator it;\n int index = 0;\n int closest_index = 0;\n //get first's index\n for(it = d->path.begin(); it!=d->path.end(); ++it)\n {\n bool end_of_path_is_prio = true;//makes the end of the path prioritary over the other points when there is a conflict\n if(first.vertex == (d->path.end()-1)->vertex)\n if(it != d->path.end()-1)\n end_of_path_is_prio = false;\n //makes the end of the path prioritary over the other points when there is a conflict\n if(it->vertex == first.vertex &&\n !(it == d->path.begin())&&// makes the begining of the path impossible to move\n end_of_path_is_prio)\n {\n if(it!=d->path.end()-1 &&! is_same )\n {\n d->constrained_vertices.removeAll(it->vertex);\n if(!alone)\n d->constrained_vertices.prepend(it->vertex);\n }\n d->path.erase(it);\n break;\n }\n if(it->is_constrained)\n closest_index++;\n index++;\n }\n //get first constrained vertex following first in path\n for(it = d->path.begin() + index; it!=d->path.end(); ++it)\n {\n if(it->is_constrained )\n {\n is_last = false;\n closest = *it;\n break;\n }\n }\n //mark the new vertex as constrained before closest.\n temp_selected_vertices.erase(first.vertex);\n //check if the vertex is contained several times in the path\n if(!is_last)\n {\n d->constrained_vertices.insert(closest_index, vh);//cannot really use indexOf in case a fixed_point is used several times\n }\n else\n d->constrained_vertices.replace(d->constrained_vertices.size()-1, vh);\n\n\n }\n //Display the new path\n d->computeAndDisplayPath();\n d->first_selected = false;\n }\n //update constrained_vertices\n d->constrained_vertices.clear();\n fixed_vertices.clear();\n QList::iterator it;\n for(it = d->path.begin(); it!=d->path.end(); ++it)\n {\n if(it->is_constrained )\n {\n d->constrained_vertices.append(it->vertex);\n fixed_vertices.insert(it->vertex);\n }\n }\n}\n\n\nvoid Scene_polyhedron_selection_item::on_Ctrlz_pressed()\n{\n d->path.clear();\n d->constrained_vertices.clear();\n fixed_vertices.clear();\n validateMoveVertex();\n d->first_selected = false;\n temp_selected_vertices.clear();\n temp_selected_edges.clear();\n temp_selected_facets.clear();\n d->are_temp_buffers_filled = false;\n set_operation_mode(d->operation_mode);\n Q_EMIT itemChanged();\n}\n\nScene_polyhedron_selection_item::Scene_polyhedron_selection_item()\n : Scene_polyhedron_item_decorator(NULL, false)\n{\n d = new Scene_polyhedron_selection_item_priv(this);\n d->original_sel_mode = static_cast(0);\n d->operation_mode = -1;\n for(int i=0; icreate();\n }\n\n for(int i=0; inb_facets = 0;\n d->nb_points = 0;\n d->nb_lines = 0;\n this->setColor(QColor(87,87,87));\n d->first_selected = false;\n d->is_treated = false;\n d->poly_need_update = false;\n d->are_temp_buffers_filled = false;\n d->poly = NULL;\n d->ready_to_move = false;\n}\n\nScene_polyhedron_selection_item::Scene_polyhedron_selection_item(Scene_face_graph_item* poly_item, QMainWindow* mw)\n : Scene_polyhedron_item_decorator(NULL, false)\n{\n d = new Scene_polyhedron_selection_item_priv(this);\n d->original_sel_mode = static_cast(0);\n d->operation_mode = -1;\n d->nb_facets = 0;\n d->nb_points = 0;\n d->nb_lines = 0;\n\n for(int i=0; icreate();\n }\n\n for(int i=0; ipoly = NULL;\n init(poly_item, mw);\n this->setColor(QColor(87,87,87));\n invalidateOpenGLBuffers();\n compute_normal_maps();\n d->first_selected = false;\n d->is_treated = false;\n d->poly_need_update = false;\n d->ready_to_move = false;\n\n}\n\nScene_polyhedron_selection_item::~Scene_polyhedron_selection_item()\n{\n delete d;\n QGLViewer* v = *QGLViewer::QGLViewerPool().begin();\n CGAL::Three::Viewer_interface* viewer = dynamic_cast(v);\n viewer->setBindingSelect();\n}\n\nvoid Scene_polyhedron_selection_item::setPathSelection(bool b) {\n k_ring_selector.setEditMode(b);\n d->is_path_selecting = b;\n if(d->is_path_selecting){\n int ind = 0;\n boost::property_map::type vsm =\n get(CGAL::vertex_selection,*polyhedron());\n BOOST_FOREACH(fg_vertex_descriptor vd, vertices(*polyhedron())){\n put(vsm,vd, ind++);\n }\n }\n}\n\nvoid Scene_polyhedron_selection_item::update_poly()\n{\n if(d->poly_need_update)\n poly_item->invalidateOpenGLBuffers();\n}\n\nvoid Scene_polyhedron_selection_item::resetIsTreated() { d->is_treated = false;}\n\nvoid Scene_polyhedron_selection_item::invalidateOpenGLBuffers() {\n\n // do not use decorator function, which calls changed on poly_item which cause deletion of AABB\n // poly_item->invalidateOpenGLBuffers();\n are_buffers_filled = false;\n d->are_temp_buffers_filled = false;\n d->poly = polyhedron();\n compute_bbox();\n}\n\nvoid Scene_polyhedron_selection_item::add_to_selection()\n{\n Q_FOREACH(fg_edge_descriptor ed, temp_selected_edges)\n {\n selected_edges.insert(ed);\n temp_selected_edges.erase(ed);\n }\n on_Ctrlz_pressed();\n invalidateOpenGLBuffers();\n QGLViewer* v = *QGLViewer::QGLViewerPool().begin();\n v->update();\n d->tempInstructions(\"Path added to selection.\",\n \"Select two vertices to create the path between them. (1/2)\");\n}\n\nvoid Scene_polyhedron_selection_item::save_handleType()\n{\n d->original_sel_mode = get_active_handle_type();\n}\nvoid Scene_polyhedron_selection_item::compute_normal_maps()\n{\n\n d->face_normals_map.clear();\n d->vertex_normals_map.clear();\n d->nf_pmap = boost::associative_property_map< CGAL::Unique_hash_map >(d->face_normals_map);\n d->nv_pmap = boost::associative_property_map< CGAL::Unique_hash_map >(d->vertex_normals_map);\n PMP::compute_normals(*d->poly, d->nv_pmap, d->nf_pmap);\n}\n\nvoid Scene_polyhedron_selection_item::updateTick()\n{\n d->ready_to_move = true;\n QTimer::singleShot(0,this,SLOT(moveVertex()));\n}\n\n\nvoid Scene_polyhedron_selection_item::moveVertex()\n{\n if(d->ready_to_move)\n {\n const qglviewer::Vec offset = static_cast(QGLViewer::QGLViewerPool().first())->offset();\n fg_vertex_descriptor vh = *temp_selected_vertices.begin();\n\n VPmap vpm = get(CGAL::vertex_point,*polyhedron());\n put(vpm, vh, Point_3(d->manipulated_frame->position().x-offset.x,\n d->manipulated_frame->position().y-offset.y,\n d->manipulated_frame->position().z-offset.z));\n invalidateOpenGLBuffers();\n poly_item->invalidateOpenGLBuffers();\n d->ready_to_move = false;\n }\n}\n\nvoid Scene_polyhedron_selection_item::validateMoveVertex()\n{\n temp_selected_vertices.clear();\n QGLViewer* viewer = *QGLViewer::QGLViewerPool().begin();\n k_ring_selector.setEditMode(true);\n viewer->setManipulatedFrame(NULL);\n invalidateOpenGLBuffers();\n Q_EMIT updateInstructions(\"Select a vertex. (1/2)\");\n}\n\n\nbool Scene_polyhedron_selection_item_priv::canAddFace(fg_halfedge_descriptor hc, fg_halfedge_descriptor t)\n{\n bool found(false), is_border_h(false);\n\n //if the selected halfedge is not a border, stop and signal it.\n if(is_border(hc,*polyhedron()))\n is_border_h = true;\n else if(is_border(opposite(hc,*polyhedron()),*polyhedron()))\n {\n hc = opposite(hc,*polyhedron());\n is_border_h = true;\n }\n if(!is_border_h)\n {\n tempInstructions(\"Edge not selected : no shared border found.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n //if the halfedges are the same, stop and signal it.\n if(hc == t)\n {\n tempInstructions(\"Edge not selected : halfedges must be different.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n //if the halfedges are adjacent, stop and signal it.\n if(next(t, *item->polyhedron()) == hc || next(hc, *item->polyhedron()) == t)\n {\n tempInstructions(\"Edge not selected : halfedges must not be adjacent.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n\n //if the halfedges are not on the same border, stop and signal it.\n fg_halfedge_descriptor iterator = next(t, *item->polyhedron());\n while(iterator != t)\n {\n if(iterator == hc)\n {\n found = true;\n fg_halfedge_descriptor res =\n CGAL::Euler::add_face_to_border(t,hc, *item->polyhedron());\n\n if(CGAL::is_degenerate_triangle_face(res, *item->polyhedron(), get(CGAL::vertex_point, *item->polyhedron()), Kernel()))\n {\n CGAL::Euler::remove_face(res, *item->polyhedron());\n tempInstructions(\"Edge not selected : resulting facet is degenerated.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n break;\n }\n iterator = next(iterator, *item->polyhedron());\n }\n if(!found)\n {\n tempInstructions(\"Edge not selected : no shared border found.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n return true;\n}\n\nbool Scene_polyhedron_selection_item_priv::canAddFaceAndVertex(fg_halfedge_descriptor hc, fg_halfedge_descriptor t)\n{\n bool found(false), is_border_h(false);\n\n //if the selected halfedge is not a border, stop and signal it.\n if(is_border(hc,*polyhedron()))\n is_border_h = true;\n else if(is_border(opposite(hc,*polyhedron()),*polyhedron()))\n {\n hc = opposite(hc,*polyhedron());\n is_border_h = true;\n }\n if(!is_border_h)\n {\n tempInstructions(\"Edge not selected : no shared border found.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n //if the halfedges are the same, stop and signal it.\n if(hc == t)\n {\n tempInstructions(\"Edge not selected : halfedges must be different.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n\n //if the halfedges are not on the same border, stop and signal it.\n fg_halfedge_descriptor iterator = next(t, *item->polyhedron());\n while(iterator != t)\n {\n if(iterator == hc)\n {\n found = true;\n CGAL::Euler::add_vertex_and_face_to_border(hc,t, *item->polyhedron());\n break;\n }\n iterator = next(iterator, *item->polyhedron());\n }\n if(!found)\n {\n tempInstructions(\"Edge not selected : no shared border found.\",\n \"Select the second edge. (2/2)\");\n return false;\n }\n return true;\n}\n\nvoid Scene_polyhedron_selection_item::clearHL()\n{\n HL_selected_edges.clear();\n HL_selected_facets.clear();\n HL_selected_vertices.clear();\n d->are_HL_buffers_filled = false;\n Q_EMIT itemChanged();\n}\nvoid Scene_polyhedron_selection_item::selected_HL(const std::set& m)\n{\n HL_selected_edges.clear();\n HL_selected_facets.clear();\n HL_selected_vertices.clear();\n HL_selected_vertices.insert(*m.begin());\n\n d->are_HL_buffers_filled = false;\n Q_EMIT itemChanged();\n}\n\nvoid Scene_polyhedron_selection_item::selected_HL(const std::set& m)\n{\n HL_selected_edges.clear();\n HL_selected_facets.clear();\n HL_selected_vertices.clear();\n HL_selected_facets.insert(*m.begin());\n d->are_HL_buffers_filled = false;\n Q_EMIT itemChanged();\n}\n\nvoid Scene_polyhedron_selection_item::selected_HL(const std::set& m)\n{\n HL_selected_edges.clear();\n HL_selected_facets.clear();\n HL_selected_vertices.clear();\n HL_selected_edges.insert(*m.begin());\n d->are_HL_buffers_filled = false;\n Q_EMIT itemChanged();\n}\n\nvoid Scene_polyhedron_selection_item::init(Scene_face_graph_item* poly_item, QMainWindow* mw)\n{\n this->poly_item = poly_item;\n d->poly =poly_item->polyhedron();\n connect(poly_item, SIGNAL(item_is_about_to_be_changed()), this, SLOT(poly_item_changed()));\n //parameters type must be of the same name here and there, so they must be hardcoded.\n connect(&k_ring_selector, SIGNAL(selected(const std::set&)), this,\n SLOT(selected(const std::set&)));\n\n connect(&k_ring_selector, SIGNAL(selected(const std::set&)), this,\n SLOT(selected(const std::set&)));\n\n connect(&k_ring_selector, SIGNAL(selected(const std::set&)), this,\n SLOT(selected(const std::set&)));\n\n connect(&k_ring_selector, SIGNAL(selected_HL(const std::set&)), this,\n SLOT(selected_HL(const std::set&)));\n\n connect(&k_ring_selector, SIGNAL(selected_HL(const std::set&)), this,\n SLOT(selected_HL(const std::set&)));\n\n connect(&k_ring_selector, SIGNAL(selected_HL(const std::set&)), this,\n SLOT(selected_HL(const std::set&)));\n connect(&k_ring_selector, SIGNAL(clearHL()), this,\n SLOT(clearHL()));\n connect(poly_item, SIGNAL(selection_done()), this, SLOT(update_poly()));\n connect(&k_ring_selector, SIGNAL(endSelection()), this,SLOT(endSelection()));\n connect(&k_ring_selector, SIGNAL(toogle_insert(bool)), this,SLOT(toggle_insert(bool)));\n connect(&k_ring_selector,SIGNAL(isCurrentlySelected(Scene_facegraph_item_k_ring_selection*)), this, SIGNAL(isCurrentlySelected(Scene_facegraph_item_k_ring_selection*)));\n k_ring_selector.init(poly_item, mw, Active_handle::VERTEX, -1);\n connect(&k_ring_selector, SIGNAL(resetIsTreated()), this, SLOT(resetIsTreated()));\n QGLViewer* viewer = *QGLViewer::QGLViewerPool().begin();\n d->manipulated_frame = new ManipulatedFrame();\n viewer->installEventFilter(this);\n mw->installEventFilter(this);\n}\n\nvoid Scene_polyhedron_selection_item::select_all_NT()\n{\n BOOST_FOREACH(fg_face_descriptor fd, faces(*polyhedron())){\n if(! is_triangle(halfedge(fd,*polyhedron()), *polyhedron()))\n selected_facets.insert(fd);\n }\n invalidateOpenGLBuffers();\n Q_EMIT itemChanged();\n}\n\nvoid Scene_polyhedron_selection_item::selection_changed(bool b)\n{\n QGLViewer* v = *QGLViewer::QGLViewerPool().begin();\n CGAL::Three::Viewer_interface* viewer = dynamic_cast(v);\n if(!viewer)\n return;\n\n if(!b)\n {\n viewer->setBindingSelect();\n }\n else\n {\n viewer->setNoBinding();\n }\n}\n\nvoid Scene_polyhedron_selection_item::printPrimitiveId(QPoint p, CGAL::Three::Viewer_interface* viewer)\n{\n d->item->polyhedron_item()->printPrimitiveId(p, viewer);\n}\nbool Scene_polyhedron_selection_item::printVertexIds(CGAL::Three::Viewer_interface* viewer) const\n{\n return d->item->polyhedron_item()->printVertexIds(viewer);\n return false;\n}\nbool Scene_polyhedron_selection_item::printEdgeIds(CGAL::Three::Viewer_interface* viewer) const\n{\n d->item->polyhedron_item()->printEdgeIds(viewer);\n return false;\n}\nbool Scene_polyhedron_selection_item::printFaceIds(CGAL::Three::Viewer_interface* viewer) const\n{\n return d->item->polyhedron_item()->printFaceIds(viewer);\n return false;\n}\nvoid Scene_polyhedron_selection_item::printAllIds(CGAL::Three::Viewer_interface* viewer)\n{\n d->item->polyhedron_item()->printAllIds(viewer);\n}\nbool Scene_polyhedron_selection_item::testDisplayId(double x, double y, double z, CGAL::Three::Viewer_interface* viewer)const\n{\n return d->item->polyhedron_item()->testDisplayId(x, y, z, viewer);\n return false;\n}\n\nbool Scene_polyhedron_selection_item::shouldDisplayIds(CGAL::Three::Scene_item *current_item) const\n{\n return d->item->polyhedron_item() == current_item;\n return false;\n}\n", "meta": {"hexsha": "5f982baab6a60fc03f5cc0eed2b854ffd9dff6a4", "size": 76465, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/Scene_polyhedron_selection_item.cpp", "max_stars_repo_name": "liminchen/OptCuts", "max_stars_repo_head_hexsha": "cb85b06ece3a6d1279863e26b5fd17a5abb0834d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 187.0, "max_stars_repo_stars_event_min_datetime": "2019-01-23T04:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T03:44:58.000Z", "max_issues_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/Scene_polyhedron_selection_item.cpp", "max_issues_repo_name": "xiaoxie5002/OptCuts", "max_issues_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2019-03-22T13:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T13:23:23.000Z", "max_forks_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/Scene_polyhedron_selection_item.cpp", "max_forks_repo_name": "xiaoxie5002/OptCuts", "max_forks_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34.0, "max_forks_repo_forks_event_min_datetime": "2019-02-13T01:11:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T03:29:40.000Z", "avg_line_length": 34.0601336303, "max_line_length": 187, "alphanum_fraction": 0.6589289217, "num_tokens": 18739, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5389832058771036, "lm_q2_score": 0.37022539259558657, "lm_q1q2_score": 0.19954526899827854}} {"text": "//\n// main.cpp\n// sthlm\n//\n// Created by Jilin Zhang on 2/14/17.\n// Copyright \u00a9 2017 Jilin Zhang. All rights reserved.\n//\n\n\n\n#include \n#include \n#include \n#include \"unordered_map\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n//#include \"omp.h\"\n#include \"kseq.h\"\n#include \n\nKSEQ_INIT(gzFile,gzread);\n\n//\nstruct Unitscore{\n long count;\n double discore;\n double monoscore;\n};\n\nstd::unordered_map kstore;\n\n\n//g++ -std=c++11 main.cpp -o rmap\n//g++ -std=c++11 main.cpp -o rmap -lboost_program_options -lz\n/*\n basemap[] works by storing a very small array that maps a base to\n its complement, by dereferencing the array with the ASCII char's\n decimal value as the index\n (int) 'A' = 65;\n (int) 'C' = 67;\n (int) 'G' = 71;\n (int) 'T' = 84;\n (int) 'a' = 97;\n (int) 'c' = 99;\n (int) 'g' = 103;\n (int) 't' = 116;\n (int) 'N' = 78;\n (int) 'U' = 85;\n (int) 'u' = 117;\n for example: basemap['A'] => basemap[65] => 'T' etc.\n */\nstatic const char basemap[255] =\n{\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 0 - 9 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 10 - 19 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 20 - 29 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 30 - 39 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 40 - 49 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 50 - 59 */\n '\\0', '\\0', '\\0', '\\0', '\\0', 'U', '\\0', 'G', '\\0', '\\0', /* 60 - 69 */\n '\\0', 'C', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', 'N', '\\0', /* 70 - 79 */\n '\\0', '\\0', '\\0', '\\0', 'A', 'A', '\\0', '\\0', '\\0', '\\0', /* 80 - 89 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', 'u', '\\0', 'g', /* 90 - 99 */\n '\\0', '\\0', '\\0', 'c', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 100 - 109 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', 'a', 'a', '\\0', '\\0', /* 110 - 119 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 120 - 129 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 130 - 139 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 140 - 149 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 150 - 159 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 160 - 169 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 170 - 179 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 180 - 189 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 190 - 199 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 200 - 209 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 210 - 219 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 220 - 229 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 230 - 239 */\n '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', '\\0', /* 240 - 249 */\n '\\0', '\\0', '\\0', '\\0', '\\0' /* 250 - 254 */\n};\n\nvoid fetch_matrix(std::unordered_map > *map, std::string str, int ind, double count);\nvoid show_matrix(std::unordered_map > *x);\nvoid kmerizing (std::string *id, std::string* seq, int kmer, int gap, std::string* name, std::unordered_map > *map);\ndouble monoscoring(std::string subseq, std::unordered_map > *map, int gap_len=0, int gap_pos=0);\ndouble discoring(std::string subseq, std::unordered_map > *map, int gap_len=0, int gap_pos=0);\ninline double singleStringScore(double x, std::string* s, long p, std::unordered_map > *m);\ninline double pairStringScore(double x, std::string* s, long sp, std::unordered_map > *m);\nvoid Matrixfile(const char*x, std::unordered_map > *y, double pseudo_count);\nint listParser(const char* list_name, std::string* names, int* length, std::unordered_map >* map,double pcount);\nstd::string seq_revcomp(std::string str);\nvoid GenomeMap(std::string *id, std::string *seq, long monoLen, std::string *mname, std::unordered_map >* map );\nvoid hashOut();\nvoid unCenterCorr();\n\n\n\ntemplate\nstd::ostream& operator<<(std::ostream& os, const std::vector& v){\n copy(v.begin(), v.end(), std::ostream_iterator(os, \" \"));\n return os;\n}\n\nint main(int argc, char * argv[]) {\n \n std::unordered_map > matrixmap[100];\n int motif_len[100];\n std::string names_motif[100];\n double pseudo_count=0.00001;\n int motif_counts=0;\n char list_name[100],fasta_name[100];\n int kmer_length, gap_length;\n bool kmerflag=false ,outflag=false,corrflag=false, revcomflag=false;\n \n \n boost::program_options::options_description desc(\"Allowed options:\");\n desc.add_options()\n (\"help\", \"produce help message\")\n (\"fasta\", boost::program_options::value(), \"input sequence file\")\n (\"list\", boost::program_options::value (), \"specify a list of motifs\")\n (\"motif\", boost::program_options::value(), \"specify one motif file\")\n (\"kmer,k\", boost::program_options::value(&kmer_length)->default_value(8), \"specify the kmer length\")\n (\"gap,g\", boost::program_options::value(&gap_length)->default_value(0), \"specify the gap length\")\n (\"kmerflag,f\", boost::program_options::bool_switch(&kmerflag), \"flag to switch to the kmer counting-score procedure\")\n (\"revcomp,r\",boost::program_options::bool_switch(&revcomflag),\"reverse compliment sequence\")\n (\"output,o\",boost::program_options::bool_switch(&outflag),\"output the kmer count-score\")\n (\"corr,c\",boost::program_options::bool_switch(&corrflag),\"output the correlation score (un centered cosine)\")\n ;\n boost::program_options::variables_map vm;\n boost::program_options::positional_options_description p;\n p.add(\"fasta\",-1);\n boost::program_options::store(boost::program_options::command_line_parser(argc,argv).options(desc).positional(p).run(), vm);\n boost::program_options::notify(vm);\n \n if (vm.count(\"list\"))\n {\n const char* temx=vm[\"list\"].as().c_str();\n motif_counts= listParser(temx,names_motif, motif_len, matrixmap, pseudo_count);\n }\n if (vm.count(\"motif\")) {\n const char* temname =vm[\"motif\"].as().c_str();\n motif_counts=1;\n strcpy(list_name,temname);\n names_motif[0]=(std::string) list_name;\n Matrixfile(temname, matrixmap, pseudo_count);\n }\n if (vm.count(\"fasta\"))\n {\n const char * fastaname=vm[\"fasta\"].as().c_str();\n motif_len[0]=matrixmap[0].at(\"A\").size();\n strcpy(fasta_name, fastaname );\n }\n if (vm.count(\"help\")) {\n std::cout << \"Usage:\" << argv[0] << \" [options]\\n\";\n std::cout << desc;\n return 0;\n }\n\n \n\n// Load fasta sequence one by one;\n gzFile fp;\n kseq_t *seq;\n int l;\n fp=gzopen(fasta_name, \"r\");\n seq=kseq_init(fp);\n while((l=kseq_read(seq)) >=0){\n std::string seqnametmp, seqtmp;\n seqnametmp= &seq->name.s[0];\n seqtmp= &seq->seq.s[0];\n if(revcomflag)\n seqtmp=seq_revcomp(seqtmp);\n \n for(int i=0;i < motif_counts ; i++){\n if(kmerflag)\n kmerizing(&seqnametmp, &seqtmp,kmer_length,gap_length,&names_motif[i], &matrixmap[i]);\n else\n GenomeMap(&seqnametmp, &seqtmp,motif_len[i],&names_motif[i], &matrixmap[i]);\n }\n }\n kseq_destroy(seq);\n gzclose(fp);\n if(kmerflag && outflag)\n hashOut();\n if(kmerflag && corrflag)\n unCenterCorr();\n\n return 0;\n}\n\n//output the kmer count-score table\nvoid hashOut(){\n for(auto it=kstore.begin(); it !=kstore.end(); it++)\n std::cout<first << \"\\t\" << it->second.count<< \"\\t\" << it->second.discore <first << \"\\t\" << it->second.count<< \"\\t\" << it->second.discore << \"\\t\" << it->second.monoscore <second.count * it->second.count;\n sumy+=it->second.discore * it->second.discore ;\n sumz+=it->second.monoscore * it->second.monoscore ;\n sumxy+= it->second.count * it->second.discore;\n sumxz+= it->second.count * it->second.monoscore;\n }\n// std::cout << sumxy/sqrt(sumx *sumy)<< \"\\t\" << sumxz/sqrt(sumx *sumz) << std::endl;\n std::cout << sumxy/sqrt(sumx *sumy)<< std::endl;\n}\n\n// function to score all the locus in the sequence\nvoid GenomeMap(std::string *id, std::string *seq, long monoLen, std::string *mname, std::unordered_map >* map ){\n unsigned long slen =seq->size();\n \n std::transform(seq->begin(), seq->end(), seq->begin(),::toupper);\n std::replace(seq->begin(),seq->end(),'T','U');\n \n for(long i=0; i < slen - monoLen +1 ; i++){\n std::string subseq=seq->substr(i,monoLen);\n if(subseq.find(\"N\") != std::string::npos)\n continue;\n double mscore=0,dscore=0,rc_mscore=0,rc_dscore=0;\n std::string rc_subseq=seq_revcomp(subseq);\n \n mscore=singleStringScore(mscore, &subseq, 0, map);\n dscore= pairStringScore(dscore, &subseq,0, map);\n rc_mscore=singleStringScore(rc_mscore, &rc_subseq, 0,map);\n rc_dscore=pairStringScore(rc_dscore,&rc_subseq,0,map);\n std::cout << *id << \"\\t\"<< i << \"\\t\" << std::setprecision(5) < >* map,double pcount){\n std::fstream motif_list;\n char motif_file_name[100];\n motif_list.open(list_name);\n int motif_line=0;\n \n while( motif_list.getline( motif_file_name,100) ){\n Matrixfile(motif_file_name, &map[motif_line], pcount);\n// std::cout << motif_file_name << std::endl;\n length[motif_line]=(int) (map[motif_line])[\"A\"].size();\n names[motif_line]=(std::string) motif_file_name;\n ++motif_line;\n }\n// std::cout << motif_line << \" motifs loaded \\n\"< > *y, double pseudo_count){\n std::fstream file;\n file.open(x);\n \n while(!file.is_open()){\n std::cout << \"Something wrong with the filename\" << std::endl;\n std::exit(EXIT_FAILURE);\n }\n int line_count=0;\n std::string iss;\n while(getline(file, iss)){\n line_count++;\n if(line_count <=2){\n continue;\n }\n else if(line_count >=3 and line_count <=18){\n fetch_matrix(y, iss, 1, pseudo_count);\n }\n else if(line_count >=19 && line_count<=22){\n fetch_matrix(y, iss, 2, pseudo_count);\n }\n }\n}\n\n//show the matrix and output or the screen\nvoid show_matrix(std::unordered_map > *x){\n for(auto it=(*x).begin(); it !=(*x).end(); it++){\n std::cout << it->first << \"\\t\";\n std::vector tem= it->second;\n for( int i=0; i< tem.size(); ++i)\n std::cout << \"\\t\" << tem[i];\n std::cout << std::endl;\n }\n}\nstd::string seq_revcomp(std::string str){\n std::string rc_seq;\n for(int i=str.size()-1; i>=0; --i){\n rc_seq += (char) basemap[(int)str[i]];\n }\n return rc_seq;\n}\n\n//store the matrix into map table\nvoid fetch_matrix(std::unordered_map > *map, std::string str, int ind, double count){\n std::vector matrix;\n std::stringstream ms(str);\n std::string units;\n while(ms >> units){\n matrix.push_back(units);\n }\n unsigned long matrix_length_raw=matrix.size();\n std::string base_name= matrix.back();\n long matrix_length_new;\n if(ind ==1){\n base_name=base_name.substr(0,2);\n matrix_length_new=matrix_length_raw-2;\n }\n else if(ind==2){\n base_name=base_name.substr(14,1);\n matrix_length_new=matrix_length_raw-1;\n }\n double new_matrix[matrix_length_new];\n std::vector hallo (new_matrix,new_matrix+ matrix_length_new);\n for(int i=0; i< matrix_length_new; i++){\n std::string::size_type sz;\n hallo[i]= std::stod(matrix[i], &sz);\n if(ind ==1)\n hallo[i] = std::log2((hallo[i]+ count)/(0.0625 + count));\n else\n hallo[i] = std::log2((hallo[i]+ count)/(0.25+ count));\n }\n map->insert({base_name,hallo});\n// return map;\n}\n\n//function to generate the kmer count-score table\nvoid kmerizing (std::string* id, std::string* seq, int kmer, int gap, std::string* name, std::unordered_map > *map){\n std::transform(seq->begin(), seq->end(), seq->begin(),::toupper);\n std::replace(seq->begin(),seq->end(),'T','U');\n unsigned long seq_length=seq->size();\n int raw_kseq_len=kmer+gap;\n\n for(unsigned long i=0; i < seq_length-raw_kseq_len +1; i++){\n std::string subseq=seq->substr(i,raw_kseq_len);\n if(subseq.find(\"N\") != std::string::npos)\n continue;\n \n if(gap >0){\n for (int ii=1; ii > *map, int gap_len, int gap_pos){\n long monoLen=(*map)[\"A\"].size();\n if(gap_len >0)\n subseq.replace(gap_pos,gap_len, gap_len, '-' );\n \n long kseq_prime_len= subseq.size();\n long cycle=kseq_prime_len+ monoLen;\n std::string valid_seq;\n double maximum_score=-1000;\n for(int p=0; p < cycle -1 ; p++){\n long pos;\n double score=0;\n if(p = kseq_prime_len){\n valid_seq=subseq.substr(kseq_prime_len-(p+1),p+1);\n pos=0;\n }\n else if(monoLen < kseq_prime_len){\n pos=0;\n if(p =kseq_prime_len -1){\n if(monoLen >=kseq_prime_len){\n pos=p+1-kseq_prime_len;\n if(p maximum_score)?score:maximum_score;\n }\n return maximum_score ;\n}\n// The scoring function for the dependency matrix\ndouble discoring(std::string subseq, std::unordered_map > *map, int gap_len, int gap_pos){\n if(gap_len >0)\n subseq.replace(gap_pos,gap_len,gap_len,'-');\n long monoLen=(*map)[\"A\"].size();\n long kseq_prime_len=subseq.size();\n double maximum_score=-1000;\n if(monoLen %2 ==0){ //matrix length is even\n for(int p=0; p < monoLen + kseq_prime_len -1; p++){\n std::string subs;\n double score=0;\n long pos;\n if( p< monoLen/2){\n if( p < kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-1-p,p+1); //for kmers longer than the 1/2 matrix length\n pos=0;\n }\n else if(p >=kseq_prime_len-1){ //for kmers shorter than the 1/2 marix length\n subs=subseq;\n pos=p-kseq_prime_len+1;\n }\n score= singleStringScore(score, &subs, pos, map);\n maximum_score=(score > maximum_score)?score:maximum_score;\n }\n else if(p >= monoLen/2 && (p - monoLen/2 < kseq_prime_len -1)){\n std::string left_seq; // leftover from the kmer which has been cut for dinucleotide matrix\n long single_pos;\n if(kseq_prime_len <= monoLen && kseq_prime_len >= 0.5 * monoLen){\n if(2*(p-(monoLen/2 -1)) <=kseq_prime_len){\n subs=subseq.substr(kseq_prime_len-2*(p-(monoLen/2 -1)),2*(p-(monoLen/2 -1)));\n single_pos=p+1-subs.size();\n if(p+1 >= kseq_prime_len){\n pos=p+1-kseq_prime_len;\n left_seq=subseq.substr(0,kseq_prime_len-2*(p-(monoLen/2-1))); //if($kseq_prime_len-2*($p-($mono_len/2 -1)) > 0);\n }\n else{\n pos=0;\n left_seq=subseq.substr(kseq_prime_len-(p+1),p+1-2*(p-(monoLen/2-1)));\n }\n }\n else{\n subs=subseq.substr(0,2*(kseq_prime_len-(p+1-(monoLen-1)/2 )+1) );\n pos=p-kseq_prime_len+1+subs.size();\n single_pos=p+1-kseq_prime_len;\n if(p >= monoLen ){\n std::string left_seq_pre=subseq.substr(2*(kseq_prime_len-(p-(monoLen/2-1))));\n left_seq=left_seq_pre.substr(0,kseq_prime_len-(p+1-monoLen)-subs.size());\n }\n else\n left_seq=subseq.substr(2*(kseq_prime_len-(p-(monoLen/2 -1))));\n }\n// std::cout << subseq<< \"\\t\" << subs << \"\\t\" << single_pos << \"\\t\" << pos << std::endl;\n }\n else if(kseq_prime_len > monoLen){\n std::string left_seq_pre;\n if(p < monoLen-1){\n subs=subseq.substr(kseq_prime_len-2*(p-(monoLen/2 -1)),2*(p-(monoLen/2 -1)));\n if(2*(p-(monoLen/2 -1)) = monoLen-1 && p <= kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-(p+1),monoLen);\n pos=0;\n single_pos=0;\n \n }\n else if(p > kseq_prime_len -1){\n subs=subseq.substr(0,2*(kseq_prime_len-(p-(monoLen/2 -1))));\n left_seq_pre=subseq.substr(2*(kseq_prime_len-(p-(monoLen/2 -1))));\n left_seq=left_seq_pre.substr(0, left_seq_pre.size()-(p+1-monoLen));\n pos=p-kseq_prime_len+1+subs.size();\n single_pos=p+1-kseq_prime_len;\n }\n// std::cout << subseq<< \"\\t\" << subs << \"\\t\" << single_pos << \"\\t\" << pos << std::endl;\n }\n else if(kseq_prime_len < 0.5 * monoLen){\n if(2*(p-(monoLen/2 -1)) <=kseq_prime_len){\n subs=subseq.substr(kseq_prime_len-2*(p-(monoLen/2 -1)),2*(p-(monoLen/2 -1)));\n pos=p+1-kseq_prime_len;\n left_seq=subseq.substr(0,kseq_prime_len-2*(p-(monoLen/2 -1)));\n single_pos=p+1-subs.size();\n\n }\n else{\n subs=subseq.substr(0,2*(kseq_prime_len-(p-(monoLen/2 -1))));\n pos=p-kseq_prime_len+1+subs.size();\n left_seq=subseq.substr(2*(kseq_prime_len-(p-(monoLen/2 -1))));\n single_pos=p+1-kseq_prime_len;\n }\n }\n\n score=pairStringScore(score,&subs,single_pos,map);\n// std::cout << subseq<< \"\\t\" << subs << \"\\t\" << single_pos << \"\\t\" << pos << std::endl;\n\n if(!left_seq.empty())\n score= singleStringScore(score, &left_seq, pos, map);\n maximum_score=(score > maximum_score)?score:maximum_score;\n }\n else if(p >= monoLen/2 + kseq_prime_len -1){\n if(p = monoLen){\n subs=subseq.substr(0,kseq_prime_len-(p-monoLen)-1);\n pos=p+1-kseq_prime_len;\n }\n score= singleStringScore(score, &subs, pos, map);\n maximum_score=(score > maximum_score)?score:maximum_score;\n }\n }\n }\n else{\n for(int p=0; p < monoLen + kseq_prime_len -1; p++){\n std::string subs;\n double score=0;\n long pos;\n if( p < (monoLen-1)/2){\n if(p < kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-1-p,p+1); // for kmers longer than the 1/2 matrix length\n pos=0;\n }\n else if(p >=kseq_prime_len-1){ //for kmers shorter than the 1/2marix length\n subs=subseq;\n pos=p-kseq_prime_len+1;\n }\n score= singleStringScore(score, &subs, pos, map);\n maximum_score=(score > maximum_score)?score:maximum_score;\n }\n else if(p >= (monoLen-1)/2 && (p - (monoLen-1)/2 <= kseq_prime_len -1)){\n std::string left_seq;\n long single_pos;\n if(kseq_prime_len <= monoLen && kseq_prime_len >= 0.5 * (monoLen-1)){\n if(2*(p-(monoLen-1)/2 ) <=kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-2*(p+1-(monoLen-1)/2)+1,2*(p+1-(monoLen-1)/2)-1);\n single_pos=p+1-subs.size();\n pos=0;\n if(p+1 >= kseq_prime_len){\n pos=p+1-kseq_prime_len;\n left_seq=subseq.substr(0,kseq_prime_len-2*(p-(monoLen-1)/2)-1); //if($kseq_prime_len-2*($ p-($mono_len/2 -1)) > 0);\n }\n else{\n pos=0;\n left_seq=subseq.substr(kseq_prime_len-(p+1),p+1-2*(p-(monoLen-1)/2)-1);\n }\n }\n else{\n subs=subseq.substr(0,2*(kseq_prime_len-(p+1-(monoLen-1)/2 ))+1);\n pos= p-kseq_prime_len+1+subs.size();\n single_pos=p+1-kseq_prime_len;\n if(p >= monoLen){\n std::string left_seq_pre=subseq.substr(2*(kseq_prime_len-(p+1-(monoLen-1)/2))+1);\n left_seq=left_seq_pre.substr(0,kseq_prime_len-(p+1-monoLen)-subs.size());\n }\n else{\n left_seq=subseq.substr(2*(kseq_prime_len-(p+1-(monoLen-1)/2))+1);\n }\n }\n }\n else if( kseq_prime_len > monoLen){\n std::string left_seq_pre;\n if(p < monoLen-1){\n subs=subseq.substr(kseq_prime_len-2*(p+1-(monoLen-1)/2)+1,2*(p+1-(monoLen-1)/2)+1);\n if(2*(p-(monoLen/2 -1)) < kseq_prime_len)\n left_seq_pre=subseq.substr(0,kseq_prime_len-2*(p+1-(monoLen-1)/2)+1);\n left_seq=left_seq_pre.substr(kseq_prime_len - (p+1));\n pos=0;\n single_pos=p+1-subs.size();\n }\n else if( p >= monoLen-1 && p <= kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-(p+1),monoLen);\n pos=0;\n single_pos=0;\n }\n else if(p > kseq_prime_len -1){\n subs=subseq.substr(0,2*(kseq_prime_len-(p+1-(monoLen-1)/2 ))+1);\n left_seq_pre=subseq.substr(2*(kseq_prime_len-(p+1-(monoLen-1)/2 ))+1);\n left_seq=left_seq_pre.substr(0, left_seq_pre.size()-(p+1-monoLen));\n pos=p-kseq_prime_len+1+subs.size();\n single_pos=p+1-kseq_prime_len;\n }\n }\n else if(kseq_prime_len < 0.5 * (monoLen-1)){\n if(2*(p-(monoLen-1)/2 ) <=kseq_prime_len-1){\n subs=subseq.substr(kseq_prime_len-2*(p+1-(monoLen-1)/2)+1,2*(p+1-(monoLen-1)/2)+1);\n pos=p+1-kseq_prime_len;\n left_seq=subseq.substr(0,kseq_prime_len-2*(p-(monoLen-1)/2)-1);\n single_pos=p+1-subs.size();\n }\n else{\n subs=subseq.substr(0,2*(kseq_prime_len-(p+1-(monoLen-1)/2))+1);\n pos= p-kseq_prime_len+1+ subs.size();\n left_seq=subseq.substr(2*(kseq_prime_len-(p+1-(monoLen-1)/2))+1);\n single_pos=p+1-kseq_prime_len;\n }\n }\n score=pairStringScore(score,&subs,single_pos,map);\n if(!left_seq.empty())\n score= singleStringScore(score, &left_seq, pos, map);\n maximum_score=(score > maximum_score)?score:maximum_score;\n }\n else if(p >= (monoLen-1)/2 +kseq_prime_len -1){\n if(p = monoLen){\n subs=subseq.substr(0,kseq_prime_len-(p-monoLen)-1);\n pos=p+1-kseq_prime_len;\n }\n score= singleStringScore(score, &subs, pos, map);\n maximum_score=(score > maximum_score )?score:maximum_score;\n }\n }\n }\n return maximum_score;\n}\n//sub-routines for the dangling sequence\ninline double singleStringScore(double x, std::string* s, long p, std::unordered_map > *m){\n for(int j=0; jsize(); j++){\n std::string letter=s->substr(j,1);\n if(letter == \"-\")\n continue;\n x+= (*m)[letter][p+j];\n }\n return x;\n}\n//sub-routines for the paired sequence\ninline double pairStringScore(double x, std::string * s, long sp, std::unordered_map > *m){\n long subs_len=s->size();\n long diLen=(*m)[\"AA\"].size();\n int half_subs_len;\n if(subs_len %2==0)\n half_subs_len=0.5* subs_len ;\n else\n half_subs_len=0.5* (subs_len-1) ;\n \n for (int j=0; j <= half_subs_len -1 ; j++){\n std::string letterA=s->substr(j,1);\n std::string letterB=s->substr(subs_len-j-1,1);\n std::string letter;\n\n if(letterA ==\"-\" && letterB==\"-\")\n continue;\n else if(letterA ==\"-\" ||letterB==\"-\"){\n long spos;\n if(letterB==\"-\"){\n spos=sp +j ;\n letter=letterA;\n }//#letterA is not gap so use the postion from the subs' beginning\n else if(letterA==\"-\"){\n spos=sp+(subs_len-1)-j; //#B is not gap, so from the end of subs\n letter=letterB;\n }\n x+=(*m)[letter][spos];\n }\n else{\n letter=letterA+letterB;\n// std::cout << *s << \"\\t\" << letter << \"\\t\" << diLen-half_subs_len+j<< std::endl;\n x+=(*m)[letter][diLen-half_subs_len+j];\n }\n }\n return x;\n}\n\n\n\n", "meta": {"hexsha": "f6c00cf8b848a561d8ada4ce1375ca87d7befdb3", "size": 30345, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "main.cpp", "max_stars_repo_name": "zhjilin/rmap", "max_stars_repo_head_hexsha": "44dd7e336303181c53733a3fa30cdd274c08fa5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-07-27T06:12:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-06T22:14:40.000Z", "max_issues_repo_path": "main.cpp", "max_issues_repo_name": "zhjilin/rmap", "max_issues_repo_head_hexsha": "44dd7e336303181c53733a3fa30cdd274c08fa5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.cpp", "max_forks_repo_name": "zhjilin/rmap", "max_forks_repo_head_hexsha": "44dd7e336303181c53733a3fa30cdd274c08fa5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1036931818, "max_line_length": 184, "alphanum_fraction": 0.5074971165, "num_tokens": 8549, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.538983220687684, "lm_q2_score": 0.37022537869825406, "lm_q1q2_score": 0.19954526699110245}} {"text": "/*\n\nCopyright (c) 2005-2021, University of Oxford.\nAll rights reserved.\n\nUniversity of Oxford means the Chancellor, Masters and Scholars of the\nUniversity of Oxford, having an administrative office at Wellington\nSquare, Oxford OX1 2JD, UK.\n\nThis file is part of Chaste.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of the University of Oxford nor the names of its\n contributors may be used to endorse or promote products derived from this\n software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\nGOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n*/\n\n\n#ifndef EXTENDEDBIDOMAINTISSUE_HPP_\n#define EXTENDEDBIDOMAINTISSUE_HPP_\n\n#include \"ChasteSerialization.hpp\"\n#include \n\n#include \n#include \"UblasMatrixInclude.hpp\"\n\n#include \"AbstractStimulusFunction.hpp\"\n#include \"AbstractStimulusFactory.hpp\"\n#include \"AbstractConductivityTensors.hpp\"\n\n#include \"AbstractCardiacTissue.hpp\"\n\n/**\n * Class that provides functionalities to specify a tissue within the context of the extended bidomain framework.\n *\n * The extended bidomain equations are of the form:\n *\n * - div ( sigma_i1 grad Phi_1 ) + Am1*Cm1*d(phi_1)/dt - Am2*Cm2*d(phi_e)/dt + Am1*I_ion1 - Am1*I_stim1 + Amgap*G_gap*(phi_1 - phi_2)\n * - div ( sigma_i2 grad Phi_2 ) + Am2*Cm2*d(phi_2)/dt - Am2*Cm2*d(phi_e)/dt + Am2*I_ion2 - Am2*I_stim2 - Amgap*G_gap*(phi_1 - phi_2)\n * div ( sigma_e grad Phi_e ) + div ( sigma_i1 grad Phi_1 ) + div ( sigma_i2 grad Phi_2 ) = I_stim\n *\n * The unknowns are:\n *\n * - Phi_1 (intracellular potential of the first cell).\n * - Phi_2 (intracellular potential of the second cell).\n * - Phi_e (extracellular potential).\n *\n * Am1, Am2 and Amgap are surface-to-volume ratios for first cell, second cell and gap junction. User can set their values.\n * Cm1 and cm2 are capaciatnce values of first and second cell respectively\n * sigma_i1 and sigma_i2 are intracellular conductivity tensors of first and second cell respectively\n * sigma_e is the conductivity tensor for the extracellular space\n * G_gap is the conductance (in ms/cm2) of the gap junction channel.\n *\n *\n *\n */\ntemplate \nclass ExtendedBidomainTissue : public virtual AbstractCardiacTissue\n{\nprivate:\n friend class TestExtendedBidomainTissue; // for testing.\n\n /** Needed for serialization. */\n friend class boost::serialization::access;\n /**\n * Archive the member variables.\n *\n * @param archive\n * @param version\n */\n template\n void serialize(Archive & archive, const unsigned int version)\n {\n archive & boost::serialization::base_object >(*this);\n // Conductivity tensors are dealt with by HeartConfig, and the caches get regenerated.\n\n archive & mAmFirstCell;\n archive & mAmSecondCell;\n archive & mAmGap;\n archive & mCmFirstCell;\n archive & mCmSecondCell;\n archive & mGGap;\n archive & mUserSuppliedExtracellularStimulus;\n }\n\n /** Intracellular conductivity tensors for the second cell.*/\n AbstractConductivityTensors *mpIntracellularConductivityTensorsSecondCell;\n\n /**\n * Stores the values of the conductivities for the second cell. Accessible via get and set methods. The problem class will set it\n * This variable is a convenient interface for other classes. It is used to fill in mpIntracellularConductivityTensorsSecondCell.\n */\n c_vector mIntracellularConductivitiesSecondCell;\n\n\n /** Extracellular conductivity tensors. */\n AbstractConductivityTensors *mpExtracellularConductivityTensors;\n\n /**\n * Cache containing all the stimulus currents for each node,\n * replicated over all processes.\n */\n ReplicatableVector mExtracellularStimulusCacheReplicated;\n\n /**\n * Cache containing all the stimulus currents for each node,\n * replicated over all processes.\n */\n ReplicatableVector mGgapCacheReplicated;\n\n /**\n * Cache containing all the ionic currents for each node for the seconed cell,\n * replicated over all processes.\n */\n ReplicatableVector mIionicCacheReplicatedSecondCell;\n\n /**\n * Cache containing all the stimulus currents for each node for the second cell,\n * replicated over all processes.\n */\n ReplicatableVector mIntracellularStimulusCacheReplicatedSecondCell;\n\n /** The vector of cells (the second one). Distributed. */\n std::vector< AbstractCardiacCellInterface* > mCellsDistributedSecondCell;\n\n /** The vector of stimuli for the extracellular stimulus. Distributed. */\n std::vector > mExtracellularStimuliDistributed;\n\n /** The vector of gap junction conductances. Distributed*/\n std::vector mGgapDistributed;\n\n /**the Am for the first cell, set by the problem class and picked up by the assembler*/\n double mAmFirstCell;\n /**the Am for the second cell, set by the problem class and picked up by the assembler*/\n double mAmSecondCell;\n /**the Am for the gap junction, set by the problem class and picked up by the assembler*/\n double mAmGap;\n /**the Cm for the first cell, set by the problem class and picked up by the assembler*/\n double mCmFirstCell;\n /**the Cm for the second cell, set by the problem class and picked up by the assembler*/\n double mCmSecondCell;\n /**the conductance of the gap junction, in mS/cm2. Set by the problem class and picked up by the assembler*/\n double mGGap;\n\n /**\n * Whether the extracellular stimulus that is passed in was supplied by the user or not\n * (it could be the default zero implementation). Initialise to false (user did not pass in anything).\n */\n bool mUserSuppliedExtracellularStimulus;\n\n /**\n * Convenience method for extracellular conductivity tensors creation\n */\n void CreateExtracellularConductivityTensors();\n\n /**\n * The parent class AbstractCardiacTissue has a method UpdateCaches that updates some caches of general use.\n * This method updates more caches that are specific to extended bidomain problems, namely:\n *\n * - Iionic and intracellular stimulus for the second cell\n * - Extracellular stimulus\n * - Gap junction conductivities (Ggap)\n *\n * It is typically called right after the UpdateCaches method in the parent class.\n *\n * @param globalIndex global index of the entry to update\n * @param localIndex local index of the entry to update\n * @param nextTime the next PDE time point, at which to evaluate the stimulus current\n */\n void UpdateAdditionalCaches(unsigned globalIndex, unsigned localIndex, double nextTime);\n\n /**\n * The parent class AbstractCardiacTissue has a method ReplicateCaches that replicates some caches of general use.\n * This method replicates more caches that are specific to extended bidomain problems, namely:\n *\n * - Iionic and intracellular stimulus for the second cell\n * - Extracellular stimulus\n * - Gap junction conductivities (Ggap)\n *\n * It is typically called right after the ReplicateCaches method in the parent class.\n */\n void ReplicateAdditionalCaches();\n\n /** vector of regions for Ggap heterogeneities*/\n std::vector > > mGgapHeterogeneityRegions;\n /**values of heterogeneous Ggaps corresponding to mGgapHeterogeneityRegions. This has the same size as mGgapHeterogeneityRegions*/\n std::vector mGgapValues;\n\npublic:\n\n /**\n * Constructor sets up extracellular conductivity tensors.\n * @param pCellFactory factory to pass on to the base class constructor\n * @param pCellFactorySecondCell factory to pass on to the base class constructor for the second cell\n * @param pExtracellularStimulusFactory factory for creating extracellular stimuli\n */\n ExtendedBidomainTissue(AbstractCardiacCellFactory* pCellFactory, AbstractCardiacCellFactory* pCellFactorySecondCell, AbstractStimulusFactory* pExtracellularStimulusFactory);\n\n /**\n * Archiving constructor\n * @param rCellsDistributed local cell models (recovered from archive)\n * @param rSecondCellsDistributed local cell models for second cells (recovered from archive)\n * @param rExtraStimuliDistributed local extracellular stimuli (recovered from archive)\n * @param rGgapsDistributed distributed Ggaps (recovered from archive)\n * @param pMesh a pointer to the AbstractTetrahedral mesh (recovered from archive).\n * @param intracellularConductivitiesSecondCell a vector with the orthotropic conductivities for the second cell (this is needed because the second cell values may not be taken from HeartConfig as the the ones for the first cell are).\n */\n ExtendedBidomainTissue(std::vector & rCellsDistributed,\n std::vector & rSecondCellsDistributed,\n std::vector > & rExtraStimuliDistributed,\n std::vector& rGgapsDistributed,\n AbstractTetrahedralMesh* pMesh,\n c_vector intracellularConductivitiesSecondCell);\n\n /**\n * Destructor\n */\n virtual ~ExtendedBidomainTissue();\n\n /**\n * Sets the value of the conductivities for the second cell.\n *\n * @param conductivities the conductivities to be set.\n */\n void SetIntracellularConductivitiesSecondCell(c_vector conductivities);\n\n /**\n * @return a pointer to the second cell\n *\n * @param globalIndex the global index in the mesh\n */\n AbstractCardiacCellInterface* GetCardiacSecondCell( unsigned globalIndex );\n\n\n /**\n * @return a pointer to the extracellular stimulus. Useful for testing\n *\n * @param globalIndex the global index in the mesh\n */\n boost::shared_ptr GetExtracellularStimulus( unsigned globalIndex );\n\n /**\n * @return a reference to the vector of distributed cells (second cell). Needed for archiving.\n */\n const std::vector& rGetSecondCellsDistributed() const;\n\n /**\n * @return a reference to the vector of distributed values of Ggaps. Needed for archiving.\n */\n const std::vector& rGetGapsDistributed() const;\n\n\n /**\n * @return a reference to the vector of distributed extracellular stimuli. Needed for archiving.\n */\n const std::vector >& rGetExtracellularStimulusDistributed() const;\n\n\n /**\n * @return the intracellular conductivities of the second cell\n */\n c_vector GetIntracellularConductivitiesSecondCell() const;\n\n /**\n * Integrate the cell ODEs and update ionic current etc for each of the\n * cells, between the two times provided. This is a re-implementation from the version in the base class.\n *\n * @param existingSolution the current voltage solution vector\n * @param time the current simulation time\n * @param nextTime when to simulate the cells until\n * @param updateVoltage (unused here)\n */\n virtual void SolveCellSystems(Vec existingSolution, double time, double nextTime, bool updateVoltage = false);\n\n /**\n * Convenience method for intracellular conductivity tensors creation for the second cell\n */\n void CreateIntracellularConductivityTensorSecondCell();\n\n /**\n * Set the values of mCellHeterogeneityRegions and mGgapValues for the heterogeneities of Ggap.\n *\n * @param rGgapHeterogeneityRegions a vector of (pointers to) heterogeneity regions for gap junctions\n * @param rGgapValues a vector (of the same size as rGgapHeterogeneityRegions) with the respective values of Ggap for every region.\n */\n void SetGgapHeterogeneities ( std::vector > > & rGgapHeterogeneityRegions, std::vector rGgapValues);\n\n /**\n * Create the pattern of Ggap across the mesh based upon mCellHeterogeneityRegions, mGgapValues and mGgap. This will fill in mGgapDistributed.\n * It will set mGgap everywhere except in the areas mCellHeterogeneityRegions[i] where it will put mGgapValues[i] instead.\n * If mCellHeterogeneityRegions (and mGgapValues) are empty, mGgap will be set everywhere.\n */\n void CreateGGapConductivities();\n\n /**\n * @return the extracellular conductivity tensor for the given element\n * @param elementIndex index of the element of interest\n */\n const c_matrix& rGetExtracellularConductivityTensor(unsigned elementIndex);\n\n /**\n * @return the intracellular conductivity tensor for the given element for tehs econd cell\n * @param elementIndex index of the element of interest\n */\n const c_matrix& rGetIntracellularConductivityTensorSecondCell(unsigned elementIndex);\n\n\n /** @return the entire ionic current cache for the second cell*/\n ReplicatableVector& rGetIionicCacheReplicatedSecondCell();\n\n /** @return the entire stimulus current cache for the second cell*/\n ReplicatableVector& rGetIntracellularStimulusCacheReplicatedSecondCell();\n\n /** @return the extracellular stimulus*/\n ReplicatableVector& rGetExtracellularStimulusCacheReplicated();\n\n /** @return the values of ggap*/\n ReplicatableVector& rGetGgapCacheReplicated();\n\n /**\n * @return Am for the first cell\n */\n double GetAmFirstCell();\n\n /**\n * @return Am for the second cell\n */\n double GetAmSecondCell();\n\n /**\n * @return Am for the gap junction\n */\n double GetAmGap();\n\n /**\n * @return Cm for the first cell\n */\n double GetCmFirstCell();\n\n /**\n * @return Cm for the second cell\n */\n double GetCmSecondCell();\n\n /**\n * @return the conducatnce of the gap junction (mGGap)\n */\n double GetGGap();\n\n /**\n * @param value Am for the first cell\n */\n void SetAmFirstCell(double value);\n\n /**\n * @param value Am for the second cell\n */\n void SetAmSecondCell(double value);\n\n /**\n * @param value Am for the gap junction\n */\n void SetAmGap(double value);\n\n /**\n * @param value Cm for the first cell\n */\n void SetCmFirstCell(double value);\n\n /**\n * @param value Cm for the first cell\n */\n void SetCmSecondCell(double value);\n\n /**\n * @param value conductance, in mS of the gap junction\n */\n void SetGGap(double value);\n\n /**\n * This method gives access to the member variable mUserSuppliedExtracellularStimulus,\n * which is false by default but turned true if the user supplies an extracellular stimulus\n * in any form.\n *\n * @return true if the user supplied an extracellular stimulus.\n */\n bool HasTheUserSuppliedExtracellularStimulus();\n\n /**\n * This method allows modifications of the mUserSuppliedExtracellularStimulus flag (false by default).\n * Other classes (e.g., Problem classes) can use this method to tell the Tissue that the user\n * specified an extracellular stimulus.\n *\n * @param flag ; true if you want to tell the Tissue object that the user supplied an extracellular stimulus explicitly\n */\n void SetUserSuppliedExtracellularStimulus(bool flag);\n\n\n /**\n * This method is the equivalent of SaveCardiacCells in the abstract class but save both cells of the extended bidomain tissue\n *\n * @param archive the master archive; cells will actually be written to the process-specific archive.\n * @param version\n */\n template\n void SaveExtendedBidomainCells(Archive & archive, const unsigned int version) const\n {\n Archive& r_archive = *ProcessSpecificArchive::Get();\n const std::vector & r_cells_distributed = this->rGetCellsDistributed();\n const std::vector & r_cells_distributed_second_cell = rGetSecondCellsDistributed();\n const std::vector & r_ggaps_distributed = rGetGapsDistributed();\n\n r_archive & this->mpDistributedVectorFactory; // Needed when loading\n const unsigned num_cells = r_cells_distributed.size();\n r_archive & num_cells;\n for (unsigned i=0; i(r_cells_distributed[i]);\n bool is_dynamic = (p_entity != NULL);\n r_archive & is_dynamic;\n if (is_dynamic)\n {\n #ifdef CHASTE_CAN_CHECKPOINT_DLLS\n ///\\todo Dynamically loaded cell models aren't saved to archive in extended Bidomain\n NEVER_REACHED;\n //r_archive & p_entity->GetLoader()->GetLoadableModulePath();\n #else\n // We should have thrown an exception before this point\n NEVER_REACHED;\n #endif // CHASTE_CAN_CHECKPOINT_DLLS\n }\n r_archive & r_cells_distributed[i];\n r_archive & r_cells_distributed_second_cell[i];\n r_archive & r_ggaps_distributed[i];\n }\n }\n\n /**\n * Load our tissue from an archive. This is the equivalent of LoadCardiacCells in the abstract class.\n * it loads the two cells instead of only one.\n *\n * Handles the checkpoint migration case, deleting loaded cells immediately if they are\n * not local to this process.\n *\n * @param archive the process-specific archive to load from\n * @param version archive version\n * @param rCells vector to fill in with pointers to local cells\n * @param rSecondCells vector to fill in with pointers to the second cells\n * @param rGgaps vector of values of gap junctions\n * @param pMesh the mesh, so we can get at the node permutation, if any\n */\n template\n static void LoadExtendedBidomainCells(Archive & archive, const unsigned int version,\n std::vector& rCells,\n std::vector& rSecondCells,\n std::vector& rGgaps,\n AbstractTetrahedralMesh* pMesh)\n {\n assert(pMesh!=NULL);\n DistributedVectorFactory* p_factory;\n archive & p_factory;\n unsigned num_cells;\n archive & num_cells;\n rCells.resize(p_factory->GetLocalOwnership());\n rSecondCells.resize(p_factory->GetLocalOwnership());\n rGgaps.resize(p_factory->GetLocalOwnership());\n #ifndef NDEBUG\n // Paranoia\n assert(rCells.size() == rSecondCells.size());\n for (unsigned i=0; iGetOriginalFactory() ? p_factory->GetOriginalFactory()->GetLow() : p_factory->GetLow();\n\n for (unsigned local_index=0; local_indexGetLow();\n bool local = p_factory->IsGlobalIndexLocal(global_index);\n\n bool is_dynamic;\n archive & is_dynamic;\n\n if (is_dynamic)\n {\n #ifdef CHASTE_CAN_CHECKPOINT_DLLS\n ///\\todo Dynamically loaded cell models aren't loaded from archive in extended Bidomain\n NEVER_REACHED;\n // Ensure the shared object file for this cell model is loaded.\n // We need to do this here, rather than in the class' serialization code,\n // because that code won't be available until this is done...\n// std::string shared_object_path;\n// archive & shared_object_path;\n// DynamicModelLoaderRegistry::Instance()->GetLoader(shared_object_path);\n #else\n // Could only happen on Mac OS X, and will probably be trapped earlier.\n NEVER_REACHED;\n #endif // CHASTE_CAN_CHECKPOINT_DLLS\n }\n\n AbstractCardiacCellInterface* p_cell;\n AbstractCardiacCellInterface* p_second_cell;\n double g_gap;\n archive & p_cell;\n archive & p_second_cell;\n archive & g_gap;\n if (local)\n {\n rCells[new_local_index] = p_cell; // Add to local cells\n rSecondCells[new_local_index] = p_second_cell;\n rGgaps[new_local_index] = g_gap;\n }\n else\n {\n //not sure how to cover this, we are already looping over local cells...\n NEVER_REACHED;\n // Non-local real cell, so free the memory.\n // delete p_cell;\n // delete p_second_cell;\n }\n }\n }\n\n /**\n * This method is the equivalent of SaveCardiacCells but Saves the extracellular stimulus instead\n *\n * @param archive the master archive; cells will actually be written to the process-specific archive.\n * @param version\n */\n template\n void SaveExtracellularStimulus(Archive & archive, const unsigned int version) const\n {\n Archive& r_archive = *ProcessSpecificArchive::Get();\n const std::vector > & r_stimulus_distributed = rGetExtracellularStimulusDistributed();\n r_archive & this->mpDistributedVectorFactory; // Needed when loading\n const unsigned num_cells = r_stimulus_distributed.size();\n r_archive & num_cells;\n for (unsigned i=0; i\n void LoadExtracellularStimulus(Archive & archive, const unsigned int version,\n std::vector >& rStimuli,\n AbstractTetrahedralMesh* pMesh)\n {\n\n DistributedVectorFactory* p_factory;\n archive & p_factory;\n unsigned num_cells;\n archive & num_cells;\n rStimuli.resize(p_factory->GetLocalOwnership());\n#ifndef NDEBUG\n // Paranoia\n for (unsigned i=0; iGetOriginalFactory() ? p_factory->GetOriginalFactory()->GetLow() : p_factory->GetLow();\n\n assert(pMesh!=NULL);\n //unsigned num_cells = pMesh->GetNumNodes();\n for (unsigned local_index=0; local_indexGetLow();\n bool local = p_factory->IsGlobalIndexLocal(global_index);\n\n boost::shared_ptr p_stim;\n archive & p_stim;//get from archive\n\n if (local)\n {\n rStimuli[new_local_index] = p_stim; // Add stimulus to local cells\n }\n //otherwise we should delete, but I think shared pointers delete themselves?\n }\n }\n};\n\n // Declare identifier for the serializer\n #include \"SerializationExportWrapper.hpp\"\n EXPORT_TEMPLATE_CLASS_SAME_DIMS(ExtendedBidomainTissue)\n\n namespace boost\n {\n namespace serialization\n {\n\n template\n inline void save_construct_data(\n Archive & ar, const ExtendedBidomainTissue * t, const unsigned int file_version)\n {\n //archive the conductivity tensor of the second cell (which may not be dealt with by heartconfig)\n c_vector intracellular_conductivities_second_cell = t->GetIntracellularConductivitiesSecondCell();\n //note that simple: ar & intracellular_conductivities_second_cell may not be liked by some boost versions\n for (unsigned i = 0; i < SPACE_DIM; i++)\n {\n ar & intracellular_conductivities_second_cell(i);\n }\n\n const AbstractTetrahedralMesh* p_mesh = t->pGetMesh();\n ar & p_mesh;\n\n // Don't use the std::vector serialization for cardiac cells, so that we can load them\n // more cleverly when migrating checkpoints.\n t->SaveExtendedBidomainCells(ar, file_version);\n t->SaveExtracellularStimulus(ar, file_version);\n\n // Creation of conductivity tensors are called by constructor and uses HeartConfig. So make sure that it is\n // archived too (needs doing before construction so appears here instead of usual archive location).\n HeartConfig* p_config = HeartConfig::Instance();\n ar & *p_config;\n ar & p_config;\n }\n\n /**\n * Allow us to not need a default constructor, by specifying how Boost should\n * instantiate an instance (using existing constructor)\n */\n template\n inline void load_construct_data(\n Archive & ar, ExtendedBidomainTissue * t, const unsigned int file_version)\n {\n //Load conductivities of the conductivity of the second cell.\n c_vector intra_cond_second_cell;\n //note that simple: ar & intra_cond_second_cell may not be liked by some boost versions\n for (unsigned i = 0; i < SPACE_DIM; i++)\n {\n double cond;\n ar & cond;\n intra_cond_second_cell(i) = cond;\n }\n\n\n std::vector cells_distributed;\n std::vector cells_distributed_second_cell;\n std::vector > extra_stim;\n std::vector g_gaps;\n AbstractTetrahedralMesh* p_mesh;\n ar & p_mesh;\n\n // Load only the cells we actually own\n t->LoadExtendedBidomainCells(\n *ProcessSpecificArchive::Get(), file_version, cells_distributed, cells_distributed_second_cell, g_gaps, p_mesh);\n\n t->LoadExtracellularStimulus(\n *ProcessSpecificArchive::Get(), file_version, extra_stim, p_mesh);\n\n // CreateIntracellularConductivityTensor() is called by AbstractCardiacTissue constructor and uses HeartConfig.\n // (as does CreateExtracellularConductivityTensor). So make sure that it is\n // archived too (needs doing before construction so appears here instead of usual archive location).\n HeartConfig* p_config = HeartConfig::Instance();\n ar & *p_config;\n ar & p_config;\n\n ::new(t)ExtendedBidomainTissue(cells_distributed, cells_distributed_second_cell, extra_stim, g_gaps, p_mesh, intra_cond_second_cell);\n }\n }\n } // namespace ...\n\n#endif /*EXTENDEDBIDOMAINTISSUE_HPP_*/\n", "meta": {"hexsha": "c8e0609243796ce52c296fdf002c83bb07e32696", "size": 29619, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "heart/src/tissue/ExtendedBidomainTissue.hpp", "max_stars_repo_name": "mdp19pn/Chaste", "max_stars_repo_head_hexsha": "f7b6bafa64287d567125b587b29af6d8bd7aeb90", "max_stars_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_stars_count": 100.0, "max_stars_repo_stars_event_min_datetime": "2015-02-23T08:32:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T11:39:26.000Z", "max_issues_repo_path": "heart/src/tissue/ExtendedBidomainTissue.hpp", "max_issues_repo_name": "mdp19pn/Chaste", "max_issues_repo_head_hexsha": "f7b6bafa64287d567125b587b29af6d8bd7aeb90", "max_issues_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2017-06-14T13:48:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T10:42:07.000Z", "max_forks_repo_path": "heart/src/tissue/ExtendedBidomainTissue.hpp", "max_forks_repo_name": "mdp19pn/Chaste", "max_forks_repo_head_hexsha": "f7b6bafa64287d567125b587b29af6d8bd7aeb90", "max_forks_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_forks_count": 53.0, "max_forks_repo_forks_event_min_datetime": "2015-02-23T13:52:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T18:57:35.000Z", "avg_line_length": 41.8939179632, "max_line_length": 238, "alphanum_fraction": 0.6877004625, "num_tokens": 6697, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.3812195803163618, "lm_q1q2_score": 0.1995380857505491}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_FUNCTION_FAST_HPP_INCLUDED\n#define BOOST_SIMD_FUNCTION_FAST_HPP_INCLUDED\n\n#include \n#include \n#include \n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n /*!\n @ingroup group-decorator\n\n calls a version of the functor\n that can do some agressive optimization at the cost of certain\n properties or corner cases of the original functor.\n\n These losses are of the `fast_math` kind.\n\n @par Semantic\n\n @code\n T r = fast_(func)(< func parameters >);\n @endcode\n\n **/\n template auto fast_(T const& x) {}\n\n} }\n#endif\n\nnamespace boost { namespace simd\n{\n struct fast_tag : decorator_\n {\n using parent = decorator_;\n };\n\n const detail::decorator fast_ = {};\n} }\n\n#endif\n", "meta": {"hexsha": "55ee4d1d693d2911452349f5e9130c0c1fd9c575", "size": 1252, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/function/fast.hpp", "max_stars_repo_name": "xmar/pythran", "max_stars_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T13:45:09.000Z", "max_issues_repo_path": "third_party/boost/simd/function/fast.hpp", "max_issues_repo_name": "xmar/pythran", "max_issues_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/function/fast.hpp", "max_forks_repo_name": "xmar/pythran", "max_forks_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-08T15:55:25.000Z", "avg_line_length": 24.0769230769, "max_line_length": 100, "alphanum_fraction": 0.5974440895, "num_tokens": 267, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807839114812}} {"text": "#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n\n#include \n\n#include \n#include \n\nnamespace hdl_people_detection {\n\n/**\n * @brief A nodelet to detect people using a 3D LIDAR\n */\nclass HdlPeopleDetectionNodelet : public nodelet::Nodelet {\npublic:\n using PointT = pcl::PointXYZI;\n\n HdlPeopleDetectionNodelet() {}\n virtual ~HdlPeopleDetectionNodelet() {}\n\n void onInit() override {\n nh = getNodeHandle();\n mt_nh = getMTNodeHandle();\n private_nh = getPrivateNodeHandle();\n\n initialize_params();\n\n // publishers\n backsub_points_pub = private_nh.advertise(\"backsub_points\", 5);\n cluster_points_pub = private_nh.advertise(\"cluster_points\", 5);\n human_points_pub = private_nh.advertise(\"human_points\", 5);\n detection_markers_pub = private_nh.advertise(\"detection_markers\", 5);\n\n backsub_voxel_points_pub = private_nh.advertise(\"backsub_voxel_points\", 1, true);\n backsub_voxel_markers_pub = private_nh.advertise(\"backsub_voxel_marker\", 1, true);\n\n clusters_pub = private_nh.advertise(\"clusters\", 10);\n\n // subscribers\n globalmap_sub = nh.subscribe(\"/globalmap\", 1, &HdlPeopleDetectionNodelet::globalmap_callback, this);\n if(private_nh.param(\"static_sensor\", false)) {\n static_points_sub = mt_nh.subscribe(\"/velodyne_points\", 32, &HdlPeopleDetectionNodelet::callback_static, this);\n } else {\n odom_sub.reset(new message_filters::Subscriber(mt_nh, \"/odom\", 20));\n points_sub.reset(new message_filters::Subscriber(mt_nh, \"/velodyne_points\", 20));\n sync.reset(new message_filters::TimeSynchronizer(*odom_sub, *points_sub, 20));\n sync->registerCallback(boost::bind(&HdlPeopleDetectionNodelet::callback, this, _1, _2));\n }\n }\n\nprivate:\n /**\n * @brief initialize_params\n */\n void initialize_params() {\n double downsample_resolution = private_nh.param(\"downsample_resolution\", 0.1);\n boost::shared_ptr> voxelgrid(new pcl::VoxelGrid());\n voxelgrid->setLeafSize(downsample_resolution, downsample_resolution, downsample_resolution);\n downsample_filter = voxelgrid;\n\n NODELET_INFO(\"create people detector\");\n detector.reset(new PeopleDetector(private_nh));\n }\n\n /**\n * @brief in case the sensor is fixed\n * @param points_msg\n */\n void callback_static(const sensor_msgs::PointCloud2ConstPtr& points_msg) {\n if(!globalmap) {\n NODELET_INFO(\"constructing globalmap from a points msg\");\n globalmap_callback(points_msg);\n NODELET_INFO(\"done\");\n return;\n }\n\n pcl::PointCloud::Ptr cloud(new pcl::PointCloud());\n pcl::fromROSMsg(*points_msg, *cloud);\n if(cloud->empty()) {\n NODELET_ERROR(\"cloud is empty!!\");\n return;\n }\n\n // downsampling\n pcl::PointCloud::Ptr downsampled(new pcl::PointCloud());\n downsample_filter->setInputCloud(cloud);\n downsample_filter->filter(*downsampled);\n downsampled->header = cloud->header;\n cloud = downsampled;\n\n // background subtraction and people detection\n auto filtered = backsub->filter(cloud);\n auto clusters = detector->detect(filtered);\n\n publish_msgs(points_msg->header.stamp, filtered, clusters);\n }\n\n /**\n * @brief callback\n * @param odom_msg sensor pose\n * @param points_msg point cloud\n */\n void callback(const nav_msgs::OdometryConstPtr& odom_msg, const sensor_msgs::PointCloud2ConstPtr& points_msg) {\n if(!globalmap) {\n NODELET_ERROR(\"globalmap has not been received!!\");\n return;\n }\n\n pcl::PointCloud::Ptr cloud(new pcl::PointCloud());\n pcl::fromROSMsg(*points_msg, *cloud);\n if(cloud->empty()) {\n NODELET_ERROR(\"cloud is empty!!\");\n return;\n }\n\n // downsampling\n pcl::PointCloud::Ptr downsampled(new pcl::PointCloud());\n downsample_filter->setInputCloud(cloud);\n downsample_filter->filter(*downsampled);\n downsampled->header = cloud->header;\n cloud = downsampled;\n\n // transform #cloud into the globalmap space\n const auto& position = odom_msg->pose.pose.position;\n const auto& orientation = odom_msg->pose.pose.orientation;\n Eigen::Matrix4f transform = Eigen::Matrix4f::Identity();\n transform.block<3, 1>(0, 3) = Eigen::Vector3f(position.x, position.y, position.z);\n transform.block<3, 3>(0, 0) = Eigen::Quaternionf(orientation.w, orientation.x, orientation.y, orientation.z).toRotationMatrix();\n pcl::transformPointCloud(*cloud, *cloud, transform);\n cloud->header.frame_id = globalmap->header.frame_id;\n\n // background subtraction and people detection\n auto filtered = backsub->filter(cloud);\n auto clusters = detector->detect(filtered);\n\n publish_msgs(points_msg->header.stamp, filtered, clusters);\n }\n\n void globalmap_callback(const sensor_msgs::PointCloud2ConstPtr& points_msg) {\n NODELET_INFO(\"globalmap received!\");\n pcl::PointCloud::Ptr cloud(new pcl::PointCloud());\n pcl::fromROSMsg(*points_msg, *cloud);\n globalmap = cloud;\n\n NODELET_INFO(\"background subtractor constructed\");\n double backsub_resolution = private_nh.param(\"backsub_resolution\", 0.2);\n int backsub_occupancy_thresh = private_nh.param(\"backsub_occupancy_thresh\", 2);\n\n backsub.reset(new BackgroundSubtractor());\n backsub->setVoxelSize(backsub_resolution, backsub_resolution, backsub_resolution);\n backsub->setOccupancyThresh(backsub_occupancy_thresh);\n backsub->setBackgroundCloud(globalmap);\n\n backsub_voxel_markers_pub.publish(backsub->create_voxel_marker());\n backsub_voxel_points_pub.publish(backsub->voxels());\n }\n\nprivate:\n /**\n * @brief publish messages\n * @param stamp\n * @param filtered\n * @param clusters\n */\n void publish_msgs(const ros::Time& stamp, const pcl::PointCloud::Ptr& filtered, const std::vector& clusters) const {\n if(clusters_pub.getNumSubscribers()) {\n hdl_people_tracking::ClusterArrayPtr clusters_msg(new hdl_people_tracking::ClusterArray());\n clusters_msg->header.frame_id = globalmap->header.frame_id;\n clusters_msg->header.stamp = stamp;\n\n clusters_msg->clusters.resize(clusters.size());\n for(int i=0; iclusters[i];\n cluster_msg.is_human = clusters[i]->is_human;\n cluster_msg.min_pt.x = clusters[i]->min_pt.x();\n cluster_msg.min_pt.y = clusters[i]->min_pt.y();\n cluster_msg.min_pt.z = clusters[i]->min_pt.z();\n\n cluster_msg.max_pt.x = clusters[i]->max_pt.x();\n cluster_msg.max_pt.y = clusters[i]->max_pt.y();\n cluster_msg.max_pt.z = clusters[i]->max_pt.z();\n\n cluster_msg.size.x = clusters[i]->size.x();\n cluster_msg.size.y = clusters[i]->size.y();\n cluster_msg.size.z = clusters[i]->size.z();\n\n cluster_msg.centroid.x = clusters[i]->centroid.x();\n cluster_msg.centroid.y = clusters[i]->centroid.y();\n cluster_msg.centroid.z = clusters[i]->centroid.z();\n }\n\n clusters_pub.publish(clusters_msg);\n }\n\n if(backsub_points_pub.getNumSubscribers()) {\n backsub_points_pub.publish(filtered);\n }\n\n if(cluster_points_pub.getNumSubscribers()) {\n pcl::PointCloud::Ptr accum(new pcl::PointCloud());\n for(const auto& cluster : clusters) {\n std::copy(cluster->cloud->begin(), cluster->cloud->end(), std::back_inserter(accum->points));\n }\n accum->width = accum->size();\n accum->height = 1;\n accum->is_dense = false;\n\n accum->header.stamp = filtered->header.stamp;\n accum->header.frame_id = globalmap->header.frame_id;\n\n cluster_points_pub.publish(accum);\n }\n\n if(human_points_pub.getNumSubscribers()) {\n pcl::PointCloud::Ptr accum(new pcl::PointCloud());\n for(const auto& cluster : clusters) {\n if(cluster->is_human){\n std::copy(cluster->cloud->begin(), cluster->cloud->end(), std::back_inserter(accum->points));\n }\n }\n accum->width = accum->size();\n accum->height = 1;\n accum->is_dense = false;\n\n accum->header.stamp = filtered->header.stamp;\n accum->header.frame_id = globalmap->header.frame_id;\n\n human_points_pub.publish(accum);\n }\n\n if(detection_markers_pub.getNumSubscribers()) {\n detection_markers_pub.publish(create_markers(stamp, clusters));\n }\n }\n\n visualization_msgs::MarkerArrayConstPtr create_markers(const ros::Time& stamp, const std::vector& clusters) const {\n visualization_msgs::MarkerArrayPtr markers(new visualization_msgs::MarkerArray());\n markers->markers.reserve(clusters.size());\n\n for(int i=0; iis_human) {\n continue;\n }\n\n visualization_msgs::Marker cluster_marker;\n cluster_marker.header.stamp = stamp;\n cluster_marker.header.frame_id = globalmap->header.frame_id;\n cluster_marker.action = visualization_msgs::Marker::ADD;\n cluster_marker.lifetime = ros::Duration(0.5);\n cluster_marker.ns = (boost::format(\"cluster%d\") % i).str();\n cluster_marker.type = visualization_msgs::Marker::CUBE;\n\n cluster_marker.pose.position.x = clusters[i]->centroid.x();\n cluster_marker.pose.position.y = clusters[i]->centroid.y();\n cluster_marker.pose.position.z = clusters[i]->centroid.z();\n cluster_marker.pose.orientation.w = 1.0;\n\n cluster_marker.color.r = 0.0;\n cluster_marker.color.g = 0.0;\n cluster_marker.color.b = 1.0;\n cluster_marker.color.a = 0.4;\n\n cluster_marker.scale.x = clusters[i]->size.x();\n cluster_marker.scale.y = clusters[i]->size.y();\n cluster_marker.scale.z = clusters[i]->size.z();\n\n markers->markers.push_back(cluster_marker);\n }\n\n return markers;\n }\n\nprivate:\n // ROS\n ros::NodeHandle nh;\n ros::NodeHandle mt_nh;\n ros::NodeHandle private_nh;\n\n // subscribers\n std::unique_ptr> odom_sub;\n std::unique_ptr> points_sub;\n std::unique_ptr> sync;\n\n ros::Subscriber globalmap_sub;\n ros::Subscriber static_points_sub;\n\n // publishers\n ros::Publisher backsub_points_pub;\n ros::Publisher backsub_voxel_points_pub;\n\n ros::Publisher cluster_points_pub;\n ros::Publisher human_points_pub;\n\n ros::Publisher detection_markers_pub;\n ros::Publisher backsub_voxel_markers_pub;\n\n ros::Publisher clusters_pub;\n\n // global map\n pcl::PointCloud::Ptr globalmap;\n\n pcl::Filter::Ptr downsample_filter;\n std::unique_ptr backsub;\n std::unique_ptr detector;\n\n};\n\n}\n\nPLUGINLIB_EXPORT_CLASS(hdl_people_detection::HdlPeopleDetectionNodelet, nodelet::Nodelet)\n", "meta": {"hexsha": "889577d5f23768f950109f1c52fa8c78d363dae0", "size": 11834, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "apps/hdl_people_detection_nodelet.cpp", "max_stars_repo_name": "shangzhouye/hdl_people_tracking", "max_stars_repo_head_hexsha": "ba1dd664439bedd8b5f99113326ffca4703d4aeb", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 207.0, "max_stars_repo_stars_event_min_datetime": "2018-03-10T14:56:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T07:32:53.000Z", "max_issues_repo_path": "apps/hdl_people_detection_nodelet.cpp", "max_issues_repo_name": "shangzhouye/hdl_people_tracking", "max_issues_repo_head_hexsha": "ba1dd664439bedd8b5f99113326ffca4703d4aeb", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 24.0, "max_issues_repo_issues_event_min_datetime": "2018-02-19T10:50:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T19:44:55.000Z", "max_forks_repo_path": "apps/hdl_people_detection_nodelet.cpp", "max_forks_repo_name": "shangzhouye/hdl_people_tracking", "max_forks_repo_head_hexsha": "ba1dd664439bedd8b5f99113326ffca4703d4aeb", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 91.0, "max_forks_repo_forks_event_min_datetime": "2018-02-23T09:44:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T01:38:14.000Z", "avg_line_length": 35.6445783133, "max_line_length": 148, "alphanum_fraction": 0.7071150921, "num_tokens": 2819, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807839114812}} {"text": "/****\n Copyright 2005-2007, Moshe Looks and Novamente LLC\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n****/\n\n#include \"moses/moses.h\"\n#include \"moses/optimization.h\"\n#include \"moses/scoring_functions.h\"\n#include \n#include \"reduct/reduct.h\"\n#include \n\nusing namespace moses;\nusing namespace reduct;\nusing namespace boost;\nusing namespace std;\n\nint main(int argc,char** argv) { \n vtree tr;\n while (cin.good()) {\n cin >> tr;\n if (!cin.good())\n break;\n \n logical_reduce(tr);\n\n representation rep(logical_reduction(),tr,infer_tree_type(tr));\n \n vtree tmp(rep.exemplar());\n\n for (int i=0;i<10;++i) { \n cout << rep.exemplar() << endl;\n\n eda::instance inst(rep.fields().packed_width());\n for (eda::field_set::disc_iterator it=rep.fields().begin_raw(inst);\n\t it!=rep.fields().end_raw(inst);++it)\n\tit.randomize();\t\n \n cout << rep.fields().stream(inst) << endl;\n rep.transform(inst);\n cout << rep.exemplar() << endl;\n\n rep.clear_exemplar();\n assert(tmp==rep.exemplar());\n }\n }\n}\n", "meta": {"hexsha": "a2c46e1223b10d34b13960d4248a88e9fce75a9b", "size": 1594, "ext": "cc", "lang": "C++", "max_stars_repo_path": "moses2/main/build-representation.cc", "max_stars_repo_name": "moshelooks/moses", "max_stars_repo_head_hexsha": "81568276877f24c2cb1ca5649d44e22fba6f44b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "moses2/main/build-representation.cc", "max_issues_repo_name": "moshelooks/moses", "max_issues_repo_head_hexsha": "81568276877f24c2cb1ca5649d44e22fba6f44b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "moses2/main/build-representation.cc", "max_forks_repo_name": "moshelooks/moses", "max_forks_repo_head_hexsha": "81568276877f24c2cb1ca5649d44e22fba6f44b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0169491525, "max_line_length": 75, "alphanum_fraction": 0.6706398996, "num_tokens": 392, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807839114812}} {"text": "#include \n#include \n#include \n\nusing namespace std;\n\nuint64_t l3_access_cycle_count = 38;\nuint64_t total_dyn_ins_count = 163607974;\ndouble dyn_ins_count_inc_ratio=0.013;\n\n#define BUFFER_SIZE 2048\n#define FANIN 100\n#define FANIN_THRESHOLD 10\n\n\nuint64_t find_prefetch_index(vector> ¤t_lb_record)\n{\n uint64_t running_cycle_count = 0;\n for(uint64_t i = 0; i < current_lb_record.size(); i++)\n {\n running_cycle_count+=current_lb_record[i].second;\n if(running_cycle_count>=l3_access_cycle_count)return i;\n }\n return current_lb_record.size()-1;\n}\n\nbool find_prefetch_index_for_multiple_samples_with_fanin(string missed_pc, vector>> ¤t_lb_record_list, vector &result)\n{\n set candidates_array[FANIN];\n uint64_t candidates_array_count[FANIN];\n for(uint64_t i=0;i &candidates = candidates_array[j];\n \n set tmp;\n for(int k = start_index; k=FANIN)return false;\n for(int k = start_index; k< current_lb_record_list[i].size();k++)\n {\n string pc = current_lb_record_list[i][k].first;\n if(pc!=missed_pc)\n {\n candidates_array[current_index].insert(pc);\n }\n }\n if(candidates_array[current_index].size()<1)return false;\n }\n else\n {\n //j contains the first found set\n }\n }\n \n vector> sorted_result;\n for(int j=0;j< current_index; j++)\n {\n set &candidates = candidates_array[j];\n if(candidates.size()<1)continue;\n vector> sorted_candidates;\n for(auto it: candidates)\n {\n int total_distance = 0;\n for(int i = 0; i< current_lb_record_list.size();i++)\n {\n for(int k=0;k=FANIN_THRESHOLD-1)break;\n }\n return true;\n}\n\nbool find_prefetch_index_for_multiple_samples(string missed_pc, vector>> ¤t_lb_record_list, string &result)\n{\n result=\"\";\n set candidates;\n uint64_t start_index = find_prefetch_index(current_lb_record_list[0]);\n start_index+=1;\n if(start_index==current_lb_record_list[0].size())\n {\n //cout<<\"Not found\\n\";\n return false;\n }\n for(int i = start_index; i tmp;\n start_index = find_prefetch_index(current_lb_record_list[i]);\n start_index+=1;\n if(start_index==current_lb_record_list[i].size())\n {\n found = false;\n break;\n }\n for(int j = start_index; j> sorted_candidates;\n for(auto it: candidates)\n {\n int total_distance = 0;\n for(int i = 0; i< current_lb_record_list.size();i++)\n {\n for(int j=0;j>value;\n return value;\n}\n\nvoid check_pc_miss_same_cache_line(string cache_line_address, string pc)\n{\n assert((string_to_u64(pc)>>6)==string_to_u64(cache_line_address));\n}\n\nint main(int argc, char *argv[])\n{\n if(argc<2)\n {\n cerr << \"Usage ./exec data_file_path\\n\";\n return -1;\n }\n gzFile data_file = gzopen(argv[1], \"rb\");\n if(!data_file)\n {\n cerr << \"Invalid data_file_path\\n\";\n return -1;\n }\n char buffer[BUFFER_SIZE];\n memset(buffer, 0, BUFFER_SIZE);\n if(gzbuffer(data_file, BUFFER_SIZE*128) == -1)\n {\n cerr << \"GZ Input file buffer set unsuccessfull\\n\";\n return -1;\n }\n string line;\n vector parsed;\n string cache_line_address,pc;\n unordered_map>>> profile;\n unordered_map miss_counts;\n while( gzgets(data_file,buffer,BUFFER_SIZE) != Z_NULL )\n {\n line=buffer;\n boost::trim_if(line,boost::is_any_of(\",\\n\"));\n boost::split(parsed,line,boost::is_any_of(\",\\n\"),boost::token_compress_on);\n //for(int i =0;i current_record;\n vector> current_lb_record;\n for(int i=2;i>>();\n miss_counts[pc]=0;\n }\n profile[pc].push_back(current_lb_record);\n miss_counts[pc]+=1;\n }\n vector> sorted_miss_pcs;\n for(auto it: miss_counts)\n {\n sorted_miss_pcs.push_back(make_pair(it.second, it.first));\n }\n sort(sorted_miss_pcs.begin(), sorted_miss_pcs.end());\n reverse(sorted_miss_pcs.begin(), sorted_miss_pcs.end());\n uint64_t permissible_prefetch_count = dyn_ins_count_inc_ratio * total_dyn_ins_count;\n uint64_t current_running_count = 0;\n \n unordered_map> bbl_address_prefetch_map;\n \n for(int i=0;i1)\n {\n /*string result;\n bool is_found = find_prefetch_index_for_multiple_samples(pc, profile[pc],result);\n if(is_found)cout< results;\n bool is_found = find_prefetch_index_for_multiple_samples_with_fanin(pc, profile[pc],results);\n if(is_found)\n {\n //for(int j=0;j();\n }\n bbl_address_prefetch_map[bbl_address].insert(pc);\n }\n }\n //}\n }\n else if(profile[pc].size()==1)\n {\n uint64_t index = find_prefetch_index(profile[pc][0]);\n index+=1;\n if(index==profile[pc][0].size())\n {\n //cerr<<\"No good prefetch position found\\n\";\n }\n else\n {\n //cout<\"<();\n }\n bbl_address_prefetch_map[bbl_address].insert(pc);\n }\n }\n else\n {\n cerr<<\"Miss without lbr profile\\n\";\n return -1;\n }\n //cout<=permissible_prefetch_count)break;\n }\n \n for(auto it:bbl_address_prefetch_map)\n {\n cout<\n#include \n#include \n#include \n\nusing namespace std;\nusing namespace boost::property_tree;\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"wallet_lib.hpp\"\n\nusing namespace std;\n\nstruct binary_key\n{\n binary_key(){}\n uint32_t check = 0;\n fc::ecc::public_key_data data;\n};\n\nFC_REFLECT( binary_key, (data)(check) )\n\nstd::string key_to_wif(const fc::sha256& secret )\n{\n const size_t size_of_data_to_hash = sizeof(secret) + 1;\n const size_t size_of_hash_bytes = 4;\n char data[size_of_data_to_hash + size_of_hash_bytes];\n data[0] = (char)0x80;\n memcpy(&data[1], (char*)&secret, sizeof(secret));\n fc::sha256 digest = fc::sha256::hash(data, size_of_data_to_hash);\n digest = fc::sha256::hash(digest);\n memcpy(data + size_of_data_to_hash, (char*)&digest, size_of_hash_bytes);\n return fc::to_base58(data, sizeof(data));\n}\n\n/*\n * global parameters to hold the keys\n */\n\n//cybex_priv_key_type active_priv_key, owner_priv_key, memo_priv_key;\n\nstatic map stored_keys;\nstatic string default_public_key = \"\";\n\n\nvoid set_default_public_key(string pub_key_base58_str)\n{\n default_public_key = pub_key_base58_str;\n}\n\nvoid clear_user_key()\n{\n stored_keys.clear();\n}\n\nvoid add_user_key(string public_key, fc::ecc::private_key key)\n{\n stored_keys.insert(map::value_type(public_key, key));\n}\n\nfc::ecc::private_key& get_private_key(string public_key)\n{\n map::iterator iter;\n\n if(public_key == \"\")\n iter = stored_keys.find(default_public_key);\n else\n iter = stored_keys.find(public_key);\n if(iter != stored_keys.end())\n return iter->second;\n\n FC_THROW_EXCEPTION(fc::exception, \"private key not found\");\n}\n\nfc::mutable_variant_object get_dev_key(string type, string secret)\n{\n fc::ecc::private_key priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string(secret)));\n string private_key = key_to_wif( priv_key );\n fc::ecc::public_key pub_key = priv_key.get_public_key();\n struct binary_key bkey;\n bkey.data = pub_key.serialize();\n bkey.check = fc::ripemd160::hash( bkey.data.data, bkey.data.size() )._hash[0];\n \n auto data = fc::raw::pack( bkey );\n string public_key = \"CYB\" + fc::to_base58( data.data(), data.size() );\n \n auto dat = pub_key.serialize();\n fc::ripemd160 addr = fc::ripemd160::hash( fc::sha512::hash( dat.data, sizeof( dat ) ) );\n fc::array bin_addr;\n \n memcpy( (char*)&bin_addr, (char*)&addr, sizeof( addr ) );\n auto checksum = fc::ripemd160::hash( (char*)&addr, sizeof( addr ) );\n memcpy( ((char*)&bin_addr)+20, (char*)&checksum._hash[0], 4 );\n string address = \"CYB\" + fc::to_base58( bin_addr.data, sizeof( bin_addr ) );\n \n graphene::chain::pts_address compress_pts_addr(pub_key, true, 56);\n graphene::chain::pts_address uncompress_pts_addr(pub_key, false, 56);\n \n fc::mutable_variant_object mvo;\n mvo( \"private_key\", private_key)\n ( \"public_key\", public_key)\n ( \"address\", address)\n ( \"compressed\", string(graphene::chain::address(compress_pts_addr)))\n ( \"uncompressed\", string(graphene::chain::address(uncompress_pts_addr)))\n ;\n\n add_user_key(public_key, priv_key);\n if(type == \"active\")\n default_public_key = public_key;\n\n return mvo;\n //return fc::json::to_string(mvo);\n}\n\nstring get_user_key(string user_name, string password)\n{\n clear_user_key();\n fc::mutable_variant_object mvo;\n mvo(\"active-key\", get_dev_key(\"active\", user_name + \"active\" + password))\n (\"owner-key\", get_dev_key(\"owner\", user_name + \"owner\" + password))\n (\"memo-key\", get_dev_key(\"memo\", user_name + \"memo\" + password))\n ;\n return fc::json::to_string(mvo);\n}\n", "meta": {"hexsha": "20282c1288c424f7e81349eef13e1ef1ef1f3508", "size": 4150, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Recika/bitshare-core/get_dev_key.cpp", "max_stars_repo_name": "qingcai518/Recika", "max_stars_repo_head_hexsha": "06b3870ef07f2135c5ed99f6d7f5dddf773aeec6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-10-14T16:09:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-14T16:09:02.000Z", "max_issues_repo_path": "Recika/bitshare-core/get_dev_key.cpp", "max_issues_repo_name": "qingcai518/Recika", "max_issues_repo_head_hexsha": "06b3870ef07f2135c5ed99f6d7f5dddf773aeec6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Recika/bitshare-core/get_dev_key.cpp", "max_forks_repo_name": "qingcai518/Recika", "max_forks_repo_head_hexsha": "06b3870ef07f2135c5ed99f6d7f5dddf773aeec6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4326241135, "max_line_length": 101, "alphanum_fraction": 0.7106024096, "num_tokens": 1128, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807839114812}} {"text": "/* PREM NIRMAL\n Fordham RCV Lab\n Fordham University\n Bronx NY 10458\n*/\n/*\n 07/2012\n An implemtation of Churchill and Vardy's VHiSS algorithm,\n using David Lowe's SIFT feature extractor.\n*/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"Aria.h\"\n\n#include \"defs.h\"\n\nusing namespace cv;\nusing namespace std;\nusing namespace Eigen;\n\n#define HOMEIMAGE \"data/home.pgm\"\n#define HOMEKEY \"data/home.key\"\n#define IMG_WIDTH 1771\n#define IMG_HEIGHT 270\n#define DISTANCE 400\n#define EPS 2\n#define PERCENT 60\n\n/* -------------------- Local function prototypes ------------------------ */\nVector2d FindMatches(Image im1, Keypoint keys1, Image im2, Keypoint keys2, int imageCount);\nKeypoint CheckForMatch(Keypoint key, Keypoint klist);\nint DistSquared(Keypoint k1, Keypoint k2);\nImage CombineImagesHorizontally(Image im1, Image im2);\nImage CombineImagesVertically(Image im1, Image im2);\nIplImage *rotateImage(const IplImage *src, float angleDegrees);\n/*----------------------------- Routines ----------------------------------*/\nint main (int argc, char **argv)\n{\n Image im1 = NULL, im2 = NULL;\n Keypoint k1 = NULL, k2 = NULL;\n int imageCount=1;\n char imageName[100], keypointName[100], command[256];\n double alpha;\n Vector2d move;\n\n /* ROBOT DECLARATIONS */\n ArArgumentParser parser(&argc, argv); // set up our parser\n ArSimpleConnector simpleConnector(&parser); // set up our simple connector\n ArRobot robot; // declare robot object\n // ArSonarDevice sonar;\n \n /* INITIALIZATION OF CONNECTION TO ROBOT */\n Aria::init(); // mandatory init\n parser.loadDefaultArguments(); // load the default arguments \n if (!simpleConnector.parseArgs() // check command line args\n || !parser.checkHelpAndWarnUnparsed(1))\n { \n simpleConnector.logOptions();\n Aria::shutdown();\n return 1;\n }\n if (!simpleConnector.connectRobot(&robot)) // ask for connection to robot\n {\n printf(\"Could not connect to robot... exiting\\n\");\n Aria::shutdown();\n return 1;\n }\n\n /* INITIALIZATION OF ROBOT*/\n robot.runAsync(true); // commands processed in separate thread\n robot.enableMotors(); // turn the power to the motors on\n // robot.addRangeDevice(&sonar); // add sonar (THIS IS UNNECCESARY FOR VH)\n ArUtil::sleep(1000); // sleep time allows robot to initialise sonar, motors, etc\n\n robot.setRotVelMax(30);\n robot.setTransVelMax(80);\n\n /*** CAPTURE HOME IMAGE ***/\n cout<width*PERCENT/100),(int)(homeGray->height*PERCENT/100)), homeGray->depth, homeGray->nChannels );\n cvResize(homeGray,homeImage);\n cvReleaseImage(&homeGray);\n homeImage=rotateImage(homeImage,180); // rotate by 180 deg\n cvSaveImage(\"data/home.pgm\",homeImage);\n remove(\"data/temphome.jpg\");\n // remove(\"data/home.jpg\");\n \n sleep(1);\n \n /*** OBTAIN SIFT FEATURES ***/\n cout< data/home.key\");\n /*** -------------------- ***/\n sleep(2);\n /******************************/\n\n cout< angleList; angleList.clear();\n vector signList; signList.clear();\n\n do\n { \n robot.stop();\n ArUtil::sleep(1000);\n\n system(\"mplayer tv:// -tv width=1024:height=768:device=/dev/video1:outfmt=rgb24 -frames 1 -vo jpeg:outdir=data\");\n sleep(1);\n system(\"mv data/00000001.jpg data/image.jpg\");\n sleep(1);\n system(\"./omnicamtools_test calib_results.txt data/image.jpg\");\n sleep(1);\n sprintf(buf,\"mv data/image.jpg data/img%d.jpg\",imageCount);\n system(buf);\n system(\"mv unwarped_image.jpg data/image.jpg\");\n \n cout<width*PERCENT/100),(int)(gray->height*PERCENT/100)), gray->depth, gray->nChannels );\n cvResize(gray,image);\n sprintf(imageName,\"data/image%d.pgm\",imageCount);\n image=rotateImage(image,180); // rotate by 180 deg\n cvSaveImage(imageName,image);\n sprintf(keypointName,\"data/image%d.key\",imageCount);\n remove(\"data/image.jpg\");\n cvReleaseImage(&gray);\n cvReleaseImage(&image);\n\n /*** OBTAIN SIFT FEATURES ***/\n snprintf(command,256,\"./sift <%s >%s\",imageName,keypointName);\n system(command);\n /*** -------------------- ***/\n\n im1=ReadPGMFile(HOMEIMAGE);\n k1=ReadKeyFile(HOMEKEY);\n im2=ReadPGMFile(imageName);\n k2=ReadKeyFile(keypointName);\n \n /* determine movement vector using SIFT features */\n move=FindMatches(im1, k1, im2, k2,imageCount);\n alpha=move(0)*180/M_PI;\n\n angleList.push_back(alpha);\n signList.push_back((int)move(1));\n\n if(alpha!=alpha)\n\t{\n\t cout<<\"Alpha = \"<=EPS && alpha==alpha)\n\t{\n\t cout<<\"Robot orienting by \"<0)\n\t cout<<\"and moving forwards by \"<::iterator dit;\n vector::iterator iit;\n for(dit=angleList.begin(),iit=signList.begin();dit!=angleList.end();dit++,iit++)\n cout<<\"\\talpha=\"<<*dit<<\", sign=\"<<*iit< > unitVec;\n Image result;\n result=CombineImagesVertically(im1,im2);\n\n vector mPOS1, mPOS2, mNEG1, mNEG2;\n vector thetaPOS, thetaNEG;\n Vector2d move;\n\n /* Match the keys in list keys1 to their best matches in keys2 */\n for(k=keys1;k!=NULL;k=k->next) // home image\n {\n match=CheckForMatch(k,keys2); // k = home img, keys2 = current image\n if(match!=NULL) \n\t{\n\t delta = k->scale - match->scale;\n\t DrawLine(result, (int) k->row, (int) k->col,\n\t\t (int) (match->row + im1->rows), (int) match->col);\n\n\t // STORE mPOS and mNEG\n\t if(delta>=0)\n\t {\n\t mPOS1.push_back(k);\n\t mPOS2.push_back(match);\n\t // 360 degrees field of view\n\t thetaPOS.push_back((match->col*(2*M_PI/IMG_WIDTH)) - M_PI);\n\t }\n\t if(delta<0)\n\t {\n\t mNEG1.push_back(k);\n\t mNEG2.push_back(match);\n\t // 360 degrees field of view\n\t thetaNEG.push_back((match->col*(2*M_PI/IMG_WIDTH)) - M_PI);\n\t }\n\t count++;\n\t}//end if(match..\n }//end for(k=keys1..\n\n FILE *matched;\n char matchfileName[100];\n sprintf(matchfileName,\"data/matched%d.pgm\",imageCount);\n matched=fopen(matchfileName,\"w\");\n WritePGM(matched, result);\n free_img(result);\n fclose(matched);\n cout<<\"Wrote '\"<::iterator pit,nit;\n double sintheta=0, costheta=0;\n for(pit=thetaPOS.begin();pit!=thetaPOS.end();pit++)\n {\n sintheta+=sin(*pit);\n costheta+=cos(*pit);\n }\n if(sintheta!=0 && costheta!=0)\n POSthetaAverage=atan(sintheta/costheta);\n else\n POSthetaAverage=0;\n\n sintheta=0; costheta=0;\n\n for(nit=thetaNEG.begin();nit!=thetaNEG.end();nit++)\n {\n sintheta+=sin(*nit);\n costheta+=cos(*nit);\n }\n if(sintheta!=0 && costheta!=0)\n NEGthetaAverage=atan(sintheta/costheta);\n else\n NEGthetaAverage=0;\n\n sintheta=0; costheta=0;\n\n double s,c;\n cout<<\"sin(POSthetaAverage) = \"<=(M_PI/2))\n sign=-1;\n else sign=1;\n cout<<\"s = \"<next) {\n dsq = DistSquared(key, k);\n\n if (dsq < distsq1) {\n distsq2 = distsq1;\n distsq1 = dsq;\n minkey = k;\n } else if (dsq < distsq2) {\n distsq2 = dsq;\n }\n }\n\n /* Check whether closest distance is less than 0.6 of second. */\n if (10 * 10 * distsq1 < 6 * 6 * distsq2)\n return minkey;\n else return NULL;\n}\n\n\n/* Return squared distance between two keypoint descriptors.\n */\nint DistSquared(Keypoint k1, Keypoint k2)\n{\n int i, dif, distsq = 0;\n unsigned char *pk1, *pk2;\n\n pk1 = k1->descrip;\n pk2 = k2->descrip;\n\n for (i = 0; i < 128; i++) {\n dif = (int) *pk1++ - (int) *pk2++;\n distsq += dif * dif;\n }\n return distsq;\n}\n\nImage CombineImagesHorizontally(Image im1, Image im2)\n{\n int rows, cols, r, c;\n Image result;\n rows = MAX(im1->rows,im2->rows);\n cols = im1->cols+im2->cols;\n result = CreateImage(rows,cols);\n /* Set all pixels to 0,5, so that blank regions are grey. */\n for (r = 0; r < rows; r++)\n for (c = 0; c < cols; c++)\n result->pixels[r][c] = 0.5;\n /* Copy images into result. */\n for (r = 0; r < im1->rows; r++)\n for (c = 0; c < im1->cols; c++)\n result->pixels[r][c] = im1->pixels[r][c];\n for (r = 0; r < im2->rows; r++)\n for (c = 0; c < im2->cols; c++)\n result->pixels[r][c+im1->cols] = im2->pixels[r][c];\n\n return result;\n}\n\nImage CombineImagesVertically(Image im1, Image im2)\n{\n int rows, cols, r, c;\n Image result;\n\n rows = im1->rows + im2->rows;\n cols = MAX(im1->cols, im2->cols);\n result = CreateImage(rows, cols);\n\n /* Set all pixels to 0,5, so that blank regions are grey. */\n for (r = 0; r < rows; r++)\n for (c = 0; c < cols; c++)\n result->pixels[r][c] = 0.5;\n\n /* Copy images into result. */\n for (r = 0; r < im1->rows; r++)\n for (c = 0; c < im1->cols; c++)\n result->pixels[r][c] = im1->pixels[r][c];\n for (r = 0; r < im2->rows; r++)\n for (c = 0; c < im2->cols; c++)\n result->pixels[r + im1->rows][c] = im2->pixels[r][c];\n\n return result;\n}\n\n// Rotate the image clockwise (or counter-clockwise if negative).\n// Remember to free the returned image.\nIplImage *rotateImage(const IplImage *src, float angleDegrees)\n{\n // Create a map_matrix, where the left 2x2 matrix\n // is the transform and the right 2x1 is the dimensions.\n float m[6];\n CvMat M = cvMat(2, 3, CV_32F, m);\n int w = src->width;\n int h = src->height;\n float angleRadians = angleDegrees * ((float)CV_PI / 180.0f);\n m[0] = (float)( cos(angleRadians) );\n m[1] = (float)( sin(angleRadians) );\n m[3] = -m[1];\n m[4] = m[0];\n m[2] = w*0.5f; \n m[5] = h*0.5f; \n\n // Make a spare image for the result\n CvSize sizeRotated;\n sizeRotated.width = cvRound(w);\n sizeRotated.height = cvRound(h);\n\n // Rotate\n IplImage *imageRotated = cvCreateImage( sizeRotated,\n\t\t\t\t\t src->depth, src->nChannels );\n\n // Transform the image\n cvGetQuadrangleSubPix( src, imageRotated, &M);\n\n return imageRotated;\n}\n", "meta": {"hexsha": "a5c001f0c60162186c7761431cc379a21a047dbf", "size": 13628, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "vh.cpp", "max_stars_repo_name": "premnirmal/Homing-in-Scale-Space", "max_stars_repo_head_hexsha": "b7740058faea0cffc34dbe05dcce557f19a17a01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-09-03T00:31:21.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-03T00:31:21.000Z", "max_issues_repo_path": "vh.cpp", "max_issues_repo_name": "premnirmal/Homing-in-Scale-Space", "max_issues_repo_head_hexsha": "b7740058faea0cffc34dbe05dcce557f19a17a01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vh.cpp", "max_forks_repo_name": "premnirmal/Homing-in-Scale-Space", "max_forks_repo_head_hexsha": "b7740058faea0cffc34dbe05dcce557f19a17a01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2446351931, "max_line_length": 146, "alphanum_fraction": 0.6372174934, "num_tokens": 4127, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807839114812}} {"text": "#include \"visualization/feature-matches-visualization.h\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace visualization {\n\nvoid saveOpenCvMatchesAndFeaturesAsImage(\n const aslam::VisualFrame& frame_A, const cv::Mat& image_A,\n const aslam::VisualFrame& frame_B, const cv::Mat& image_B,\n const aslam::OpenCvMatches& matches_A_B, const std::string& filename,\n aslam::FeatureVisualizationType visualization_type, vi_map::VIMap* map) {\n CHECK_NOTNULL(map);\n\n if (matches_A_B.empty()) {\n VLOG(1) << \"No matches found.\";\n }\n\n if (frame_A.getNumKeypointMeasurements() == 0 ||\n frame_B.getNumKeypointMeasurements() == 0) {\n VLOG(1) << \"No features found.\";\n return;\n }\n\n // Scale the keypoint score to sth we can visualize.\n const Eigen::Matrix2Xd& key_point_matrix_A(frame_A.getKeypointMeasurements());\n const Eigen::Matrix2Xd& key_point_matrix_B(frame_B.getKeypointMeasurements());\n const Eigen::VectorXd& key_point_scores_A = frame_A.getKeypointScores();\n const Eigen::VectorXd& key_point_scores_B = frame_B.getKeypointScores();\n const double max_score =\n std::max(key_point_scores_A.maxCoeff(), key_point_scores_B.maxCoeff());\n const double min_score =\n std::min(key_point_scores_A.minCoeff(), key_point_scores_B.minCoeff());\n const double score_range = std::max(max_score - min_score, 1.0);\n const double max_key_point_size = 30.0;\n const double min_key_point_size = 5.0;\n\n const Eigen::VectorXd& key_point_orientation_A =\n frame_A.getKeypointOrientations();\n const Eigen::VectorXd& key_point_orientation_B =\n frame_B.getKeypointOrientations();\n\n // Convert the keypoints to cv::KeyPoint.\n std::vector key_points_A, key_points_B;\n for (uint i = 0; i < frame_A.getNumKeypointMeasurements(); ++i) {\n double key_point_size = std::max(\n (key_point_scores_A(i) - min_score) / score_range * max_key_point_size,\n min_key_point_size);\n key_points_A.emplace_back(\n cv::KeyPoint(\n key_point_matrix_A(0, i), key_point_matrix_A(1, i), key_point_size,\n key_point_orientation_A(i)));\n }\n for (uint i = 0; i < frame_B.getNumKeypointMeasurements(); ++i) {\n double key_point_size = std::max(\n (key_point_scores_B(i) - min_score) / score_range * max_key_point_size,\n min_key_point_size);\n key_points_B.emplace_back(\n cv::KeyPoint(\n key_point_matrix_B(0, i), key_point_matrix_B(1, i), key_point_size,\n key_point_orientation_B(i)));\n }\n\n // Convert color images to grayscale if necessary.\n cv::Mat grayscale_image_A, grayscale_image_B;\n bool is_color_image = image_A.channels() == 3;\n if (is_color_image) {\n cv::cvtColor(image_A, grayscale_image_A, CV_BGR2GRAY);\n cv::cvtColor(image_B, grayscale_image_B, CV_BGR2GRAY);\n } else {\n grayscale_image_A = image_A;\n grayscale_image_B = image_B;\n }\n\n // Write features and matches to image file.\n cv::Mat images_w_matches;\n aslam::drawKeyPointsAndMatches(\n grayscale_image_A, key_points_A, grayscale_image_B, key_points_B,\n matches_A_B, visualization_type, &images_w_matches);\n cv::imwrite(filename, images_w_matches);\n}\n\nvoid saveOpenCvMatchesAndFeaturesAsImage(\n const aslam::VisualFrame& frame_A, const aslam::VisualFrame& frame_B,\n const aslam::OpenCvMatches& matches_A_B, const std::string& filename,\n aslam::FeatureVisualizationType visualization_type, vi_map::VIMap* map) {\n CHECK_NOTNULL(map);\n\n if (!frame_A.hasRawImage() || !frame_B.hasRawImage()) {\n LOG(ERROR) << \"One of the frames has no image!\";\n return;\n }\n\n cv::Mat image_A = frame_A.getRawImage();\n cv::Mat image_B = frame_B.getRawImage();\n\n if (image_A.empty() || image_B.empty()) {\n LOG(ERROR) << \"One of the images is empty!\";\n return;\n }\n\n saveOpenCvMatchesAndFeaturesAsImage(\n frame_A, image_A, frame_B, image_B, matches_A_B, filename,\n visualization_type, map);\n}\n\nvoid saveOpenCvMatchesAndFeaturesAsImage(\n const aslam::VisualFrame& frame_A, const vi_map::Vertex& vertex_A,\n const aslam::VisualFrame& frame_B, const vi_map::Vertex& vertex_B,\n const aslam::OpenCvMatches& matches_A_B, const std::string& filename,\n aslam::FeatureVisualizationType visualization_type,\n const backend::ResourceType image_resource_type, vi_map::VIMap* map) {\n CHECK_NOTNULL(map);\n\n const unsigned int current_frame_idx =\n vertex_A.getVisualFrameIndex(frame_A.getId());\n const unsigned int next_frame_idx =\n vertex_B.getVisualFrameIndex(frame_B.getId());\n\n // Load image resources.\n cv::Mat image_A, image_B;\n if (!map->getFrameResource(\n vertex_A, current_frame_idx, image_resource_type, &image_A)) {\n LOG(ERROR) << \"Frame \" << current_frame_idx << \" of vertex \"\n << vertex_A.id() << \" has no resource of type \"\n << static_cast(image_resource_type);\n return;\n }\n if (!map->getFrameResource(\n vertex_B, next_frame_idx, image_resource_type, &image_B)) {\n LOG(ERROR) << \"Frame \" << next_frame_idx << \" of vertex \" << vertex_B.id()\n << \" has no resource of type \"\n << static_cast(image_resource_type);\n return;\n }\n\n saveOpenCvMatchesAndFeaturesAsImage(\n frame_A, image_A, frame_B, image_B, matches_A_B, filename,\n visualization_type, map);\n}\n\nvoid saveLandmarkMatchesAndFeaturesAsImage(\n const aslam::VisualFrame& frame_A, const vi_map::Vertex& vertex_A,\n const aslam::VisualFrame& frame_B, const vi_map::Vertex& vertex_B,\n const std::string& filename,\n aslam::FeatureVisualizationType visualization_type,\n const backend::ResourceType image_resource_type, vi_map::VIMap* map) {\n CHECK_NOTNULL(map);\n\n aslam::OpenCvMatches matches_A_B;\n int index_A = 0;\n int index_B = 0;\n float distance = 0.0;\n const unsigned int current_frame_idx =\n vertex_A.getVisualFrameIndex(frame_A.getId());\n const unsigned int next_frame_idx =\n vertex_B.getVisualFrameIndex(frame_B.getId());\n\n vi_map::LandmarkIdList landmark_ids_A;\n vi_map::LandmarkIdList landmark_ids_B;\n vertex_A.getFrameObservedLandmarkIds(current_frame_idx, &landmark_ids_A);\n vertex_B.getFrameObservedLandmarkIds(next_frame_idx, &landmark_ids_B);\n\n for (const vi_map::LandmarkId& landmark_id_A : landmark_ids_A) {\n for (const vi_map::LandmarkId& landmark_id_B : landmark_ids_B) {\n if (landmark_id_A.isValid() && landmark_id_B.isValid()) {\n if (landmark_id_A == landmark_id_B) {\n vi_map::Landmark landmark = map->getLandmark(landmark_id_A);\n const vi_map::KeypointIdentifierList& observations =\n landmark.getObservations();\n bool found_observation_for_current_vertex = false;\n bool found_observation_for_next_vertex = false;\n\n for (vi_map::KeypointIdentifier observation : observations) {\n if (observation.frame_id.vertex_id == vertex_A.id() &&\n observation.frame_id.frame_index == current_frame_idx) {\n found_observation_for_current_vertex = true;\n index_A = observation.keypoint_index;\n distance = static_cast(frame_A.getKeypointScore(index_A));\n } else if (\n observation.frame_id.vertex_id == vertex_B.id() &&\n observation.frame_id.frame_index == next_frame_idx) {\n found_observation_for_next_vertex = true;\n index_B = observation.keypoint_index;\n distance = static_cast(frame_B.getKeypointScore(index_B));\n }\n }\n if (found_observation_for_current_vertex &&\n found_observation_for_next_vertex) {\n matches_A_B.push_back(cv::DMatch(index_A, index_B, distance));\n }\n }\n }\n }\n }\n saveOpenCvMatchesAndFeaturesAsImage(\n frame_A, vertex_A, frame_B, vertex_B, matches_A_B, filename,\n visualization_type, image_resource_type, map);\n}\n} // namespace visualization\n", "meta": {"hexsha": "17f2584a6f65b64fec251bace6396e6be4d5deac", "size": 8173, "ext": "cc", "lang": "C++", "max_stars_repo_path": "visualization/src/feature-matches-visualization.cc", "max_stars_repo_name": "AdronTech/maplab", "max_stars_repo_head_hexsha": "1340e01466fc1c02994860723b8117daf9ad226d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1936.0, "max_stars_repo_stars_event_min_datetime": "2017-11-27T23:11:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:24:14.000Z", "max_issues_repo_path": "visualization/src/feature-matches-visualization.cc", "max_issues_repo_name": "AdronTech/maplab", "max_issues_repo_head_hexsha": "1340e01466fc1c02994860723b8117daf9ad226d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 353.0, "max_issues_repo_issues_event_min_datetime": "2017-11-29T18:40:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T15:53:46.000Z", "max_forks_repo_path": "visualization/src/feature-matches-visualization.cc", "max_forks_repo_name": "AdronTech/maplab", "max_forks_repo_head_hexsha": "1340e01466fc1c02994860723b8117daf9ad226d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 661.0, "max_forks_repo_forks_event_min_datetime": "2017-11-28T07:20:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T08:06:29.000Z", "avg_line_length": 39.2932692308, "max_line_length": 80, "alphanum_fraction": 0.7039030956, "num_tokens": 2054, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5234203489363239, "lm_q2_score": 0.38121956625615, "lm_q1q2_score": 0.1995380783911481}} {"text": "// Copyright 2015 Patrick Putnam\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n#ifndef CLOTHO_BATCH_CROSSOVER_TASK_POPULATION_SPACE_HPP_\n#define CLOTHO_BATCH_CROSSOVER_TASK_POPULATION_SPACE_HPP_\n\n#include \"clotho/data_spaces/population_space/population_space.hpp\"\n#include \n#include \"clotho/data_spaces/crossover/position_classifier.hpp\"\n#include \"clotho/data_spaces/generators/position_distribution_helper.hpp\"\n#include \"clotho/data_spaces/generators/crossover_event_distribution_helper.hpp\"\n\n#include \"clotho/data_spaces/crossover/block_crossover.hpp\"\n\nnamespace clotho {\nnamespace genetics {\n\ntemplate < class RNG, class MatePairType, class BlockType, class WeightType, class AlleleSpaceType >\nclass batch_crossover_task< RNG, MatePairType, population_space< BlockType, WeightType > , AlleleSpaceType > : public task {\npublic:\n typedef batch_crossover_task< RNG, MatePairType, population_space< BlockType, WeightType >, AlleleSpaceType > self_type;\n\n typedef population_space< BlockType, WeightType > space_type;\n typedef AlleleSpaceType allele_type;\n typedef RNG random_engine_type;\n\n typedef typename space_type::genome_type genome_type;\n\n typedef typename space_type::individual_type individual_type;\n\n typedef MatePairType mate_pair_type;\n typedef typename mate_pair_type::iterator iterator;\n typedef typename mate_pair_type::const_iterator const_iterator;\n\n typedef PositionClassifier< typename allele_type::position_vector > classifier_type;\n typedef typename classifier_type::event_type event_type;\n\n typedef typename position_distribution_helper< typename allele_type::position_type >::type position_distribution_type;\n typedef typename crossover_event_distribution_helper< double >::type event_distribution_type;\n\n batch_crossover_task( random_engine_type * rng, space_type * parent, space_type * offspring, allele_type * alleles, unsigned int off_idx, const_iterator first, const_iterator last, double recomb_rate, double seq_bias ) :\n m_rng(rng)\n , m_parental( parent )\n , m_offspring( offspring )\n , m_alleles( alleles )\n , m_parents(first, last)\n , m_offspring_index(off_idx)\n , m_recomb_rate(recomb_rate)\n , m_seq_bias( seq_bias )\n { }\n\n batch_crossover_task( const self_type & other ) :\n m_rng( other.m_rng )\n , m_parental( other.m_parental )\n , m_offspring( other.m_offspring )\n , m_alleles( other.m_alleles )\n , m_parents( other.m_parents )\n , m_offspring_index( other.m_offspring_index )\n , m_recomb_rate( other.m_recomb_rate )\n , m_seq_bias( other.m_seq_bias )\n { }\n\n void operator()() {\n event_distribution_type event_dist( m_recomb_rate);\n boost::random::bernoulli_distribution< double > bias_dist( m_seq_bias);\n\n iterator mate_it = m_parents.begin(), mate_end = m_parents.end();\n\n#ifdef DEBUGGING\n BOOST_LOG_TRIVIAL(debug) << \"Starting batch crossover task\";\n#endif // DEBUGGING\n size_t i = m_offspring_index;\n while( mate_it != mate_end ) {\n \n assert( mate_it->first < m_parental->individual_count() );\n\n unsigned int offs = mate_it->first;\n\n individual_type ind = m_parental->getIndividual( offs );\n genome_type c0 = m_offspring->create_sequence();\n\n event_type evts;\n\n fill_events( evts, event_dist( *m_rng ) );\n classifier_type cfier0( &m_alleles->getPositions(), evts );\n bool _swap = bias_dist( *m_rng );\n\n#ifdef DEBUGGING\n// BOOST_LOG_TRIVIAL(debug) << mate_it->first << \"; Crossover s0: \" << ind.first << \" - \" << ind.first.size() << \"; s1: \" << ind.second << \" - \" << ind.second.size() << \"; child: \" << c0.first << \"; event size: \" << evts.size();\n#endif // DEBUGGING\n run_crossover_task( cfier0, ind.first, ind.second, c0, _swap );\n\n assert( mate_it->second < m_parental->individual_count() );\n\n genome_type c1 = m_offspring->create_sequence();\n ind = m_parental->getIndividual( mate_it->second );\n#ifdef DEBUGGING\n// BOOST_LOG_TRIVIAL(debug) << mate_it->second << \"; Crossover s0: \" << ind.first << \" - \" << ind.first.size() << \"; s1: \" << ind.second << \" - \" << ind.second.size() << \"; child: \" << c1.first << \"; event size: \" << evts.size();\n#endif // DEBUGGING\n\n event_type evts1;\n\n fill_events( evts1, event_dist( *m_rng ) );\n\n classifier_type cfier1( &m_alleles->getPositions(), evts1 );\n\n _swap = bias_dist( *m_rng );\n run_crossover_task( cfier1, ind.first, ind.second, c1, _swap);\n\n m_offspring->setIndividual( i++, c0, c1 );\n\n ++mate_it;\n }\n\n#ifdef DEBUGGING\n BOOST_LOG_TRIVIAL(debug) << \"End Batch Crossover Task\";\n#endif // DEBUGGING\n }\n\n virtual ~batch_crossover_task() {}\n\nprotected:\n\n inline void fill_events( event_type & evt, unsigned int N ) {\n while( N-- ) {\n evt.push_back( m_pos_dist( *m_rng ) );\n }\n }\n\n event_type make_events( unsigned int N ) {\n event_type res;\n\n while( N-- ) {\n res.push_back( m_pos_dist( *m_rng ) );\n }\n\n return res;\n }\n\n void run_crossover_task( classifier_type & cls, genome_type & top, genome_type & bottom, genome_type & res, bool should_swap_strands ) {\n\n if( cls.event_count() == 0 ) {\n // there are no crossover cls\n // therefore, offspring strand will be a copy of the top strand\n res = ((should_swap_strands) ? bottom : top);\n } else if( should_swap_strands ) {\n run_crossover_task( cls, bottom, top, res );\n } else {\n run_crossover_task( cls, top, bottom, res );\n }\n }\n\n void run_crossover_task( classifier_type & cls, genome_type & top, genome_type & bottom, genome_type & res ) {\n typedef typename space_type::base_genome_type::sequence_type::const_sequence_iterator const_iterator;\n typedef block_crossover< classifier_type, BlockType > crossover_type;\n typedef BlockType block_type;\n\n crossover_type xover( cls );\n \n const_iterator tb, te, bb, be;\n if( top ) {\n tb = top->begin_sequence();\n te = top->end_sequence();\n } else {\n te = tb;\n }\n\n if( bottom ) {\n bb = bottom->begin_sequence();\n be = bottom->end_sequence();\n } else {\n be = bb;\n }\n\n unsigned int i = 0;\n// bool top_equal = true;\n// bool bottom_equal = true;\n while( true ) {\n if( tb == te ) {\n while( bb != be ) {\n const block_type t = crossover_type::bit_helper_type::ALL_UNSET;\n const block_type b = *bb++;\n const block_type o = xover.crossover( t, b, i );\n// top_equal = top_equal && (o == t );\n// bottom_equal = bottom_equal && (o == b);\n res->append_sequence(o);\n i += crossover_type::bit_helper_type::BITS_PER_BLOCK;\n }\n break;\n } else if( bb == be ) {\n while( tb != te ) {\n const block_type t = *tb++;\n const block_type b = crossover_type::bit_helper_type::ALL_UNSET;\n const block_type o = xover.crossover( t, b, i );\n// bottom_equal = bottom_equal && (o == b );\n// top_equal = top_equal && (o == t);\n res->append_sequence(o);\n i += crossover_type::bit_helper_type::BITS_PER_BLOCK;\n }\n break;\n }\n\n const block_type t = *tb++;\n const block_type b = *bb++;\n\n const block_type o = xover.crossover(t, b, i );\n\n// bottom_equal = bottom_equal && (o == b );\n// top_equal = top_equal && (o == t);\n\n res->append_sequence( o );\n i += crossover_type::bit_helper_type::BITS_PER_BLOCK;\n }\n\n// if( top_equal ) {\n// res = top;\n// } else if( bottom_equal ) {\n// res = bottom;\n// }\n }\n\n random_engine_type * m_rng;\n space_type * m_parental, * m_offspring;\n allele_type * m_alleles;\n\n mate_pair_type m_parents;\n\n unsigned int m_offspring_index;\n\n double m_recomb_rate, m_seq_bias;\n\n position_distribution_type m_pos_dist;\n};\n\n} // namespace genetics\n} // namespace clotho\n\n#endif // CLOTHO_BATCH_CROSSOVER_TASK_POPULATION_SPACE_HPP_\n\n", "meta": {"hexsha": "d01e2a82e2e6865cf7697b49bff11a2cf6a85125", "size": 9480, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/clotho/data_spaces/crossover/batch_crossover_task_population_space.hpp", "max_stars_repo_name": "putnampp/clotho", "max_stars_repo_head_hexsha": "6dbfd82ef37b4265381cd78888cd6da8c61c68c2", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2015-06-16T21:27:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T23:26:54.000Z", "max_issues_repo_path": "include/clotho/data_spaces/crossover/batch_crossover_task_population_space.hpp", "max_issues_repo_name": "putnampp/clotho", "max_issues_repo_head_hexsha": "6dbfd82ef37b4265381cd78888cd6da8c61c68c2", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2015-06-16T21:12:42.000Z", "max_issues_repo_issues_event_max_datetime": "2015-06-23T12:41:00.000Z", "max_forks_repo_path": "include/clotho/data_spaces/crossover/batch_crossover_task_population_space.hpp", "max_forks_repo_name": "putnampp/clotho", "max_forks_repo_head_hexsha": "6dbfd82ef37b4265381cd78888cd6da8c61c68c2", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.92, "max_line_length": 240, "alphanum_fraction": 0.6014767932, "num_tokens": 2141, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.3812195662561499, "lm_q1q2_score": 0.1995380783911481}} {"text": "// Boost.Geometry\n\n// Copyright (c) 2020-2021, Oracle and/or its affiliates.\n\n// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle\n\n// Licensed under the Boost Software License version 1.0.\n// http://www.boost.org/users/license.html\n\n#ifndef BOOST_GEOMETRY_STRATEGIES_INDEX_SPHERICAL_HPP\n#define BOOST_GEOMETRY_STRATEGIES_INDEX_SPHERICAL_HPP\n\n\n#include \n#include \n\n\nnamespace boost { namespace geometry\n{\n\nnamespace strategies { namespace index\n{\n\n#ifndef DOXYGEN_NO_DETAIL\nnamespace detail\n{\n\ntemplate \nclass spherical\n : public strategies::distance::detail::spherical\n{\n using base_t = strategies::distance::detail::spherical;\n\npublic:\n spherical() = default;\n\n template \n explicit spherical(RadiusOrSphere const& radius_or_sphere)\n : base_t(radius_or_sphere)\n {}\n};\n\n\n} // namespace detail\n#endif // DOXYGEN_NO_DETAIL\n\n\ntemplate \nclass spherical\n : public strategies::index::detail::spherical\n{};\n\n\nnamespace services\n{\n\ntemplate \nstruct default_strategy\n{\n using type = strategies::index::spherical<>;\n};\n\ntemplate \nstruct default_strategy\n{\n using type = strategies::index::spherical<>;\n};\n\ntemplate \nstruct default_strategy\n{\n using type = strategies::index::spherical<>;\n};\n\n\n} // namespace services\n\n\n}}}} // namespace boost::geometry::strategy::index\n\n#endif // BOOST_GEOMETRY_STRATEGIES_INDEX_SPHERICAL_HPP\n", "meta": {"hexsha": "c3502207b796df3decc50e2f5bc54e4de1501fbf", "size": 1827, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/geometry/strategies/index/spherical.hpp", "max_stars_repo_name": "pranavgo/RRT", "max_stars_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 326.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T13:47:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T02:13:59.000Z", "max_issues_repo_path": "boost/geometry/strategies/index/spherical.hpp", "max_issues_repo_name": "pranavgo/RRT", "max_issues_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 623.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T23:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:15:23.000Z", "max_forks_repo_path": "boost/geometry/strategies/index/spherical.hpp", "max_forks_repo_name": "pranavgo/RRT", "max_forks_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-01-14T15:50:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:58:36.000Z", "avg_line_length": 22.2804878049, "max_line_length": 96, "alphanum_fraction": 0.7717569787, "num_tokens": 403, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.523420348936324, "lm_q2_score": 0.3812195662561499, "lm_q1q2_score": 0.1995380783911481}} {"text": "// Copyright \n\n#include \n#include \n#include \n#include \n\n#define BOOST_TEST_MODULE test_cross\n#define BOOST_TEST_DYN_LINK\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n// #include \n\nusing namespace aoi;\nusing namespace aoi::cross;\n\nBOOST_AUTO_TEST_SUITE(test_cross)\n\nclass Player;\n\n\nclass CrossAoiTest: public CrossAoi {\n public:\n CrossAoiTest(): CrossAoi(0, 0, 0, 0, 0, 0, 0) {}\n\n CrossAoiTest(float map_bound_xmin, float map_bound_xmax, float map_bound_zmin,\n float map_bound_zmax, size_t beacon_x, size_t beacon_z, float beacon_radius)\n : CrossAoi(map_bound_xmin, map_bound_xmax, map_bound_zmin,\n map_bound_zmax, beacon_x, beacon_z, beacon_radius)\n {}\n\nfriend class Player;\n};\n\n\nclass Player {\n public:\n Player() : nuid_(GenNuid()), pos_(0, 0, 0) {}\n Player(Nuid nuid, Pos pos) : nuid_(nuid), pos_(pos) {}\n\n public:\n inline Nuid AddSensor(float radius, bool log = false) {\n auto sensor_id = GenNuid();\n aoi_->AddSensor(nuid_, sensor_id, radius);\n if (log) {\n printf(\"Player %lu Add Sensor %lu, radius %f\\n\", nuid_, sensor_id, radius);\n }\n return sensor_id;\n }\n\n inline void AddToAoi(CrossAoiTest* aoi, bool log = false) {\n aoi_ = aoi;\n aoi_->AddPlayer(nuid_, pos_.x, pos_.y, pos_.z);\n player_aoi_ = aoi_->player_map_.find(nuid_)->second.get();\n\n if (log) {\n printf(\"Add Player: %lu, Pos(%f, %f, %f)\\n\",\n nuid_, pos_.x, pos_.y, pos_.z);\n }\n }\n\n inline void RemoveFromAoi(bool log = false) {\n if (log) {\n printf(\"Remove Player: %lu, Pos(%f, %f, %f)\\n\",\n nuid_, pos_.x, pos_.y, pos_.z);\n }\n aoi_->RemovePlayer(nuid_);\n aoi_ = nullptr;\n player_aoi_ = nullptr;\n }\n\n inline void MoveDelta(float x, float y, float z) {\n MoveTo(pos_.x + x, pos_.y + y, pos_.z + z);\n }\n\n inline void MoveTo(float x, float y, float z, bool log = false) {\n pos_.Set(x, y, z);\n if (aoi_) {\n aoi_->UpdatePos(nuid_, x, y, z);\n }\n\n if (log) {\n printf(\"Player %lu MoveTo (%f, %f, %f)\\n\",\n nuid_, x, y, z);\n }\n }\n\n public:\n Nuid nuid_;\n Pos pos_;\n\n PlayerAoi *player_aoi_;\n CrossAoiTest *aoi_;\n};\n\n\nvoid PrintAoiUpdateInfos(const AoiUpdateInfos &info) {\n printf(\"=========================== update_infos\\n\");\n for (auto &elem : info) {\n auto& update_info = elem.second;\n printf(\"Player: %lu; update sensors size: %lu\\n\",\n update_info.nuid, update_info.sensor_update_list.size());\n for (auto &sensor : update_info.sensor_update_list) {\n printf(\" Sensor_id: %lu, \\n\", sensor.sensor_id);\n\n printf(\" enters: (%lu)[\", sensor.enters.size());\n for (auto &nuid : sensor.enters) {\n printf(\"%lu, \", nuid);\n }\n printf(\"]\\n\");\n\n printf(\" leaves: (%lu)[\", sensor.leaves.size());\n for (auto &nuid : sensor.leaves) {\n printf(\"%lu,\", nuid);\n }\n printf(\"]\\n\");\n }\n }\n printf(\"===========================\\n\\n\");\n}\n\n\nvoid CheckUpdateInfos(const AoiUpdateInfos &update_infos, const AoiUpdateInfos &require_infos) {\n // BOOST_TEST_REQUIRE((update_infos == require_infos));\n BOOST_TEST_REQUIRE((update_infos.size() == require_infos.size()));\n for (const auto &pair : update_infos) {\n auto &update_info = pair.second;\n BOOST_TEST_REQUIRE((require_infos.find(update_info.nuid) != require_infos.end()));\n auto &require_info = require_infos.find(update_info.nuid)->second;\n\n BOOST_TEST_REQUIRE(\n (update_info.sensor_update_list.size() == require_info.sensor_update_list.size()));\n\n for (size_t i =0; i < update_info.sensor_update_list.size(); ++i) {\n auto &sensor_info = update_info.sensor_update_list[i];\n auto &require_sensor_info = require_info.sensor_update_list[i];\n BOOST_TEST_REQUIRE((sensor_info.sensor_id == require_sensor_info.sensor_id));\n BOOST_TEST_REQUIRE((sensor_info.enters == require_sensor_info.enters));\n BOOST_TEST_REQUIRE((sensor_info.leaves == require_sensor_info.leaves));\n }\n }\n}\n\n\nvoid TestSimple(bool log = false) {\n CrossAoiTest cross_aoi(-1000, 1000, -1000, 1000, 3, 3, 5);\n\n Player player1{GenNuid(), {0, 0, 0}};\n player1.AddToAoi(&cross_aoi, log);\n auto sensor_id1 = player1.AddSensor(10, log);\n\n Player player2{GenNuid(), {0, 0, 0}};\n player2.AddToAoi(&cross_aoi, log);\n auto sensor_id2 = player2.AddSensor(5, log);\n\n auto update_infos = cross_aoi.Tick();\n if (log) PrintAoiUpdateInfos(update_infos);\n if (log) cross_aoi.PrintAllNodeList();\n\n AoiUpdateInfos require_infos = {\n {player1.nuid_, {player1.nuid_, {{sensor_id1, {player2.nuid_}, {}}}}},\n {player2.nuid_, {player2.nuid_, {{sensor_id2, {player1.nuid_}, {}}}}},\n };\n CheckUpdateInfos(update_infos, require_infos);\n\n // player2 move to (6, 0, 0)\n player2.MoveTo(6, 0, 0, log);\n\n update_infos = cross_aoi.Tick();\n if (log) PrintAoiUpdateInfos(update_infos);\n\n require_infos = {\n {player2.nuid_, {player2.nuid_, {{sensor_id2, {}, {player1.nuid_}}}}},\n };\n CheckUpdateInfos(update_infos, require_infos);\n\n // player2 move to (600, 0, 100)\n if (log) cross_aoi.PrintAllNodeList();\n player2.MoveTo(600, 0, 100, log);\n update_infos = cross_aoi.Tick();\n if (log) cross_aoi.PrintAllNodeList();\n if (log) PrintAoiUpdateInfos(update_infos);\n require_infos = {\n {player1.nuid_, {player1.nuid_, {{sensor_id1, {}, {player2.nuid_}}}}},\n };\n CheckUpdateInfos(update_infos, require_infos);\n\n player1.MoveTo(601, 100, 101, log);\n if (log) cross_aoi.PrintAllNodeList();\n update_infos = cross_aoi.Tick();\n if (log) PrintAoiUpdateInfos(update_infos);\n require_infos = {\n {player1.nuid_, {player1.nuid_, {{sensor_id1, {player2.nuid_}, {}}}}},\n {player2.nuid_, {player2.nuid_, {{sensor_id2, {player1.nuid_}, {}}}}},\n };\n CheckUpdateInfos(update_infos, require_infos);\n\n player2.RemoveFromAoi(log);\n update_infos = cross_aoi.Tick();\n if (log) PrintAoiUpdateInfos(update_infos);\n require_infos = {\n {player1.nuid_, {player1.nuid_, {{sensor_id1, {}, {player2.nuid_}}}}},\n };\n CheckUpdateInfos(update_infos, require_infos);\n if (log) cross_aoi.PrintAllNodeList();\n}\n\n\nBOOST_AUTO_TEST_CASE(test_simple) {\n bool log = false;\n TestSimple(log);\n}\n\n\nstd::vector GenPlayers(const size_t player_num, const float map_size) {\n std::vector players(player_num);\n\n boost::random::mt19937 random_generator(std::time(0));\n boost::random::uniform_real_distribution pos_generator(-map_size, map_size);\n\n for (int i : boost::irange(player_num)) {\n auto &player = players[i];\n player.pos_.Set(pos_generator(random_generator), 0, pos_generator(random_generator));\n }\n\n BOOST_TEST_REQUIRE((players.size() == player_num));\n return players;\n}\n\n\nstd::vector GenMovements(const size_t player_num, const float length) {\n std::vector movements;\n movements.reserve(player_num);\n\n boost::random::mt19937 random_generator(std::time(0));\n boost::random::uniform_real_distribution angle_gen(0, 360);\n for (int UNUSED(i) : boost::irange(player_num)) {\n float angle = angle_gen(random_generator);\n float radian = 2 * M_PI * angle / 360;\n movements.emplace_back(std::cos(radian) * length, 0, std::sin(radian) * length);\n }\n\n return movements;\n}\n\nvoid TestOneMilestone(std::vector *players, const size_t player_num,\n const float map_size) {\n printf(\"\\n===Begin Milestore: player_num = %lu, map_size = (%f, %f)\\n\",\n player_num, -map_size, map_size);\n\n boost::timer::cpu_timer run_timer;\n int times = 1;\n std::vector cross_aois;\n for (auto UNUSED(i) : boost::irange(times)) {\n cross_aois.emplace_back(-map_size, map_size, -map_size, map_size, 3, 3, 100);\n }\n // ProfilerStart(\"a.prof\");\n for (auto &cross_aoi : cross_aois) {\n for (auto &player : *players) {\n player.AddToAoi(&cross_aoi);\n player.AddSensor(100);\n }\n BOOST_TEST_REQUIRE((cross_aoi.GetPlayerMap().size() == player_num + 9));\n }\n // ProfilerStop();\n run_timer.stop();\n printf(\"Add Player (%i times)\", times);\n std::cout << run_timer.format();\n\n for (auto &cross_aoi : cross_aois) {\n cross_aoi.Tick();\n }\n\n run_timer.start();\n for (auto &cross_aoi : cross_aois) {\n cross_aoi.Tick();\n }\n run_timer.stop();\n\n printf(\"Tick (%i times)\", times);\n std::cout << run_timer.format();\n\n float speed = 6;\n float delta_time = 0.1;\n auto movements = GenMovements(player_num, delta_time * speed);\n times = 1 / delta_time;\n run_timer.start();\n for (int UNUSED(t) : boost::irange(times)) {\n for (int i : boost::irange(player_num)) {\n auto &player = players->at(i);\n auto &move = movements[i];\n player.MoveDelta(move.x, move.y, move.z);\n }\n }\n run_timer.stop();\n printf(\"Update Pos (%i times)\", times);\n std::cout << run_timer.format();\n\n printf(\"===End Milestore\\n\");\n}\n\n\nBOOST_AUTO_TEST_CASE(test_milestone) {\n for (size_t player_num : {100, 1000, 10000}) {\n for (float map_size : {50, 100, 1000, 10000}) {\n auto players = GenPlayers(player_num, map_size);\n TestOneMilestone(&players, player_num, map_size);\n }\n }\n}\n\nBOOST_AUTO_TEST_SUITE_END()\n", "meta": {"hexsha": "5b5665d5859f24b5d1138016bc44c3ed8d1051cd", "size": 9407, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_cross.cpp", "max_stars_repo_name": "disenone/AoiTesting", "max_stars_repo_head_hexsha": "3e72c211ae6e83f9acf2bc480d8e9689dd74d7b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_cross.cpp", "max_issues_repo_name": "disenone/AoiTesting", "max_issues_repo_head_hexsha": "3e72c211ae6e83f9acf2bc480d8e9689dd74d7b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_cross.cpp", "max_forks_repo_name": "disenone/AoiTesting", "max_forks_repo_head_hexsha": "3e72c211ae6e83f9acf2bc480d8e9689dd74d7b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3052959502, "max_line_length": 96, "alphanum_fraction": 0.6598277878, "num_tokens": 2748, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5234203489363239, "lm_q2_score": 0.3812195662561499, "lm_q1q2_score": 0.19953807839114804}} {"text": "/**************************************************************************\\\n *\n * This file is part of the Coin 3D visualization library.\n * Copyright (C) by Kongsberg Oil & Gas Technologies.\n *\n * This library is free software; you can redistribute it and/or\n * modify it under the terms of the GNU General Public License\n * (\"GPL\") version 2 as published by the Free Software Foundation.\n * See the file LICENSE.GPL at the root directory of this source\n * distribution for additional information about the GNU GPL.\n *\n * For using Coin with software that can not be combined with the GNU\n * GPL, and for taking advantage of the additional benefits of our\n * support services, please contact Kongsberg Oil & Gas Technologies\n * about acquiring a Coin Professional Edition License.\n *\n * See http://www.coin3d.org/ for more information.\n *\n * Kongsberg Oil & Gas Technologies, Bygdoy Alle 5, 0257 Oslo, NORWAY.\n * http://www.sim.no/ sales@sim.no coin-support@coin3d.org\n *\n\\**************************************************************************/\n\n#include \n\n#include \n\n#include \n#include \n#include \"engines/SoSubEngineP.h\"\n\n/*!\n \\class SoHeightMapToNormalMap SoHeightMapToNormalMap.h Inventor/engines/SoHeightMapToNormalMap.h\n \\brief Engine for computing a normal map from a height map.\n\n This engine will create a normal map texture from a height map texture.\n You can use it in an Inventor file like this:\n\n \\code\n Texture2 {\n image = HeightMapToNormalMap {\n sourceImage = Texture2 { filename \"HeightMap.jpg\" } . image\n } . image\n }\n \\endcode\n\n Be aware that the field connections will remain active, so both\n Texture2 nodes and the HeightMapToNormalMap engine will be kept resident\n in memory (unless you intervene manually and detach the engine) even\n though only the \"outer\" Texture2 node is needed. This can give quite\n a big memory use overhead.\n\n \\ingroup engines\n \\COIN_CLASS_EXTENSION\n \\since Coin 3.0\n*/\n\n/*!\n \\enum SoHeightMapToNormalMap::NormalMapFormat\n Enumeration of available normal map formats.\n*/\n\n/*!\n \\var SoHeightMapToNormalMap::NormalMapFormat SoHeightMapToNormalMap::INT8\n Encode the normals as a 3 component byte texture.\n This is the only option for now, as long as float textures are not conveniently\n supported in Coin.\n*/\n\n/*!\n \\var SoMFEnum SoHeightMapToNormalMap::format\n This setting decides what kind of normal map is generated. For now, only the\n INT8 format is available, and it is the default value.\n*/\n\nSO_ENGINE_SOURCE(SoHeightMapToNormalMap);\n\n/*!\n Class initializer.\n*/\nvoid\nSoHeightMapToNormalMap::initClass(void)\n{\n SO_ENGINE_INTERNAL_INIT_CLASS(SoHeightMapToNormalMap);\n}\n\n/*!\n Constructor.\n*/\nSoHeightMapToNormalMap::SoHeightMapToNormalMap(void)\n{\n SO_ENGINE_INTERNAL_CONSTRUCTOR(SoHeightMapToNormalMap);\n\n SO_ENGINE_ADD_INPUT(format, (INT8));\n\n SO_ENGINE_DEFINE_ENUM_VALUE(NormalMapFormat, INT8);\n SO_ENGINE_SET_SF_ENUM_TYPE(format, NormalMapFormat);\n}\n\n/*!\n Static function for computing a normal map from a height map.\n This function can be used directly without any engine instantiation.\n*/\nvoid\nSoHeightMapToNormalMap::convert(const unsigned char * srcptr, SbVec2s size, int nc, SbImage & dst_out)\n{\n float dx, dy;\n int width = size[0];\n int height = size[1];\n boost::scoped_array dstarray(new unsigned char[width*height*3]);\n unsigned char * dstptr = dstarray.get();\n unsigned char red;\n SbVec3f n;\n\n#define GET_PIXEL_RED(x_, y_) \\\n srcptr[(y_)*width*nc + (x_)*nc]\n\n for (int y = 0; y < height; y++) {\n for (int x = 0; x < width; x++) {\n // do Y Sobel filter\n red = GET_PIXEL_RED((x-1+width) % width, (y+1) % height);\n dy = static_cast(red) / 255.0f * -1.0f;\n\n red = GET_PIXEL_RED(x % width, (y+1) % height);\n dy += static_cast(red) / 255.0f * -2.0f;\n\n red = GET_PIXEL_RED((x+1) % width, (y+1) % height);\n dy += static_cast(red) / 255.0f * -1.0f;\n\n red = GET_PIXEL_RED((x-1+width) % width, (y-1+height) % height);\n dy += static_cast(red) / 255.0f * 1.0f;\n\n red = GET_PIXEL_RED(x % width, (y-1+height) % height);\n dy += static_cast(red) / 255.0f * 2.0f;\n\n red = GET_PIXEL_RED((x+1) % width, (y-1+height) % height);\n dy += static_cast(red) / 255.0f * 1.0f;\n\n // Do X Sobel filter\n red = GET_PIXEL_RED((x-1+width) % width, (y-1+height) % height);\n dx = static_cast(red) / 255.0f * -1.0f;\n\n red = GET_PIXEL_RED((x-1+width) % width, y % height);\n dx += static_cast(red) / 255.0f * -2.0f;\n\n red = GET_PIXEL_RED((x-1+width) % width, (y+1) % height);\n dx += static_cast(red) / 255.0f * -1.0f;\n\n red = GET_PIXEL_RED((x+1) % width, (y-1+height) % height);\n dx += static_cast(red) / 255.0f * 1.0f;\n\n red = GET_PIXEL_RED((x+1) % width, y % height);\n dx += static_cast(red) / 255.0f * 2.0f;\n\n red = GET_PIXEL_RED((x+1) % width, (y+1) % height);\n dx += static_cast(red) / 255.0f * 1.0f;\n\n n[0] = -dx;\n n[1] = -dy;\n n[2] = 1.0f;\n (void) n.normalize();\n\n *dstptr++ = static_cast(SbMin((n[0]+1.0f) * 128.0f, 255.0f));\n *dstptr++ = static_cast(SbMin((n[1]+1.0f) * 128.0f, 255.0f));\n *dstptr++ = static_cast(SbMin((n[2]+1.0f) * 128.0f, 255.0f));\n }\n }\n#undef GET_PIXEL_RED\n dst_out.setValue(size, 3, dstarray.get());\n}\n\nvoid\nSoHeightMapToNormalMap::inputChanged(SoField * which)\n{\n // in case we need to override later\n inherited::inputChanged(which);\n}\n\nvoid\nSoHeightMapToNormalMap::evaluate(void)\n{\n SbVec2s size;\n int nc;\n const unsigned char * ptr =\n static_cast(sourceImage.getValue(size, nc));\n\n SbImage targetimg;\n SoHeightMapToNormalMap::convert(ptr, size, nc, targetimg);\n\n ptr = static_cast(targetimg.getValue(size, nc));\n SO_ENGINE_OUTPUT(image, SoSFImage, setValue(size, nc, ptr));\n}\n", "meta": {"hexsha": "886b255d172e83d8498c5ecec13f2b09d3b1d02f", "size": 6093, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "grasp_generation/graspitmodified_lm/Coin-3.1.3/src/engines/SoHeightMapToNormalMap.cpp", "max_stars_repo_name": "KraftOreo/EBM_Hand", "max_stars_repo_head_hexsha": "9ab1722c196b7eb99b4c3ecc85cef6e8b1887053", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grasp_generation/graspitmodified_lm/Coin-3.1.3/src/engines/SoHeightMapToNormalMap.cpp", "max_issues_repo_name": "KraftOreo/EBM_Hand", "max_issues_repo_head_hexsha": "9ab1722c196b7eb99b4c3ecc85cef6e8b1887053", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grasp_generation/graspitmodified_lm/Coin-3.1.3/src/engines/SoHeightMapToNormalMap.cpp", "max_forks_repo_name": "KraftOreo/EBM_Hand", "max_forks_repo_head_hexsha": "9ab1722c196b7eb99b4c3ecc85cef6e8b1887053", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5699481865, "max_line_length": 102, "alphanum_fraction": 0.6638765797, "num_tokens": 1809, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5234203340678567, "lm_q2_score": 0.38121956625614994, "lm_q1q2_score": 0.19953807272299745}} {"text": "//////////////////////////////////////////////////////////////////////////////\n//\n// (C) Copyright Ion Gaztanaga 2015-2015. Distributed under the Boost\n// Software License, Version 1.0. (See accompanying file\n// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n//\n// See http://www.boost.org/libs/container for documentation.\n//\n//////////////////////////////////////////////////////////////////////////////\n\n#define BOOST_CONTAINER_SOURCE\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace\n{\n\n#ifdef BOOST_HAS_INTPTR_T\ntypedef boost::uintptr_t uintptr_type;\n#else\ntypedef std::size_t uintptr_type;\n#endif\n\nstatic const std::size_t minimum_buffer_size = 2 * sizeof(void*);\n\n} // namespace\n\nnamespace boost\n{\nnamespace container\n{\nnamespace pmr\n{\n\nvoid monotonic_buffer_resource::increase_next_buffer()\n{\n m_next_buffer_size = (std::size_t(-1) / 2 < m_next_buffer_size)\n ? std::size_t(-1)\n : m_next_buffer_size * 2;\n}\n\nvoid monotonic_buffer_resource::increase_next_buffer_at_least_to(\n std::size_t minimum_size)\n{\n if (m_next_buffer_size < minimum_size)\n {\n if (bi::detail::is_pow2(minimum_size))\n {\n m_next_buffer_size = minimum_size;\n }\n else if (std::size_t(-1) / 2 < minimum_size)\n {\n m_next_buffer_size = minimum_size;\n }\n else\n {\n m_next_buffer_size = bi::detail::ceil_pow2(minimum_size);\n }\n }\n}\n\nmonotonic_buffer_resource::monotonic_buffer_resource(memory_resource* upstream)\n BOOST_NOEXCEPT\n : m_memory_blocks(upstream ? *upstream : *get_default_resource()),\n m_current_buffer(0),\n m_current_buffer_size(0u),\n m_next_buffer_size(initial_next_buffer_size),\n m_initial_buffer(0),\n m_initial_buffer_size(0u)\n{\n}\n\nmonotonic_buffer_resource::monotonic_buffer_resource(\n std::size_t initial_size, memory_resource* upstream) BOOST_NOEXCEPT\n : m_memory_blocks(upstream ? *upstream : *get_default_resource()),\n m_current_buffer(0),\n m_current_buffer_size(0u),\n m_next_buffer_size(minimum_buffer_size),\n m_initial_buffer(0),\n m_initial_buffer_size(0u)\n{ // In case initial_size is zero\n this->increase_next_buffer_at_least_to(initial_size + !initial_size);\n}\n\nmonotonic_buffer_resource::monotonic_buffer_resource(\n void* buffer, std::size_t buffer_size,\n memory_resource* upstream) BOOST_NOEXCEPT\n : m_memory_blocks(upstream ? *upstream : *get_default_resource()),\n m_current_buffer(buffer),\n m_current_buffer_size(buffer_size),\n m_next_buffer_size(\n bi::detail::previous_or_equal_pow2(boost::container::dtl::max_value(\n buffer_size, std::size_t(initial_next_buffer_size)))),\n m_initial_buffer(buffer),\n m_initial_buffer_size(buffer_size)\n{\n this->increase_next_buffer();\n}\n\nmonotonic_buffer_resource::~monotonic_buffer_resource()\n{\n this->release();\n}\n\nvoid monotonic_buffer_resource::release() BOOST_NOEXCEPT\n{\n m_memory_blocks.release();\n m_current_buffer = m_initial_buffer;\n m_current_buffer_size = m_initial_buffer_size;\n m_next_buffer_size = initial_next_buffer_size;\n}\n\nmemory_resource*\n monotonic_buffer_resource::upstream_resource() const BOOST_NOEXCEPT\n{\n return &m_memory_blocks.upstream_resource();\n}\n\nstd::size_t monotonic_buffer_resource::remaining_storage(\n std::size_t alignment,\n std::size_t& wasted_due_to_alignment) const BOOST_NOEXCEPT\n{\n const uintptr_type up_alignment_minus1 = alignment - 1u;\n const uintptr_type up_alignment_mask = ~up_alignment_minus1;\n const uintptr_type up_addr = uintptr_type(m_current_buffer);\n const uintptr_type up_aligned_addr =\n (up_addr + up_alignment_minus1) & up_alignment_mask;\n wasted_due_to_alignment = std::size_t(up_aligned_addr - up_addr);\n return m_current_buffer_size <= wasted_due_to_alignment\n ? 0u\n : m_current_buffer_size - wasted_due_to_alignment;\n}\n\nstd::size_t monotonic_buffer_resource::remaining_storage(\n std::size_t alignment) const BOOST_NOEXCEPT\n{\n std::size_t ignore_this;\n return this->remaining_storage(alignment, ignore_this);\n}\n\nconst void* monotonic_buffer_resource::current_buffer() const BOOST_NOEXCEPT\n{\n return m_current_buffer;\n}\n\nstd::size_t monotonic_buffer_resource::next_buffer_size() const BOOST_NOEXCEPT\n{\n return m_next_buffer_size;\n}\n\nvoid* monotonic_buffer_resource::allocate_from_current(std::size_t aligner,\n std::size_t bytes)\n{\n char* p = (char*)m_current_buffer + aligner;\n m_current_buffer = p + bytes;\n m_current_buffer_size -= aligner + bytes;\n return p;\n}\n\nvoid* monotonic_buffer_resource::do_allocate(std::size_t bytes,\n std::size_t alignment)\n{\n if (alignment > memory_resource::max_align)\n throw_bad_alloc();\n\n // See if there is room in current buffer\n std::size_t aligner = 0u;\n if (this->remaining_storage(alignment, aligner) < bytes)\n {\n // Update next_buffer_size to at least bytes\n this->increase_next_buffer_at_least_to(bytes);\n // Now allocate and update internal data\n m_current_buffer = (char*)m_memory_blocks.allocate(m_next_buffer_size);\n m_current_buffer_size = m_next_buffer_size;\n this->increase_next_buffer();\n }\n // Enough internal storage, extract from it\n return this->allocate_from_current(aligner, bytes);\n}\n\nvoid monotonic_buffer_resource::do_deallocate(\n void* p, std::size_t bytes, std::size_t alignment) BOOST_NOEXCEPT\n{\n (void)p;\n (void)bytes;\n (void)alignment;\n}\n\nbool monotonic_buffer_resource::do_is_equal(const memory_resource& other) const\n BOOST_NOEXCEPT\n{\n return this == dynamic_cast(&other);\n}\n\n} // namespace pmr\n} // namespace container\n} // namespace boost\n\n#include \n", "meta": {"hexsha": "c80d57a74d53d4931690e2d2fd40ecb86b8b1ffd", "size": 6314, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/container/src/monotonic_buffer_resource.cpp", "max_stars_repo_name": "sotaoverride/backup", "max_stars_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/container/src/monotonic_buffer_resource.cpp", "max_issues_repo_name": "sotaoverride/backup", "max_issues_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/container/src/monotonic_buffer_resource.cpp", "max_forks_repo_name": "sotaoverride/backup", "max_forks_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6504854369, "max_line_length": 79, "alphanum_fraction": 0.6992397846, "num_tokens": 1426, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6150878555160665, "lm_q2_score": 0.32423539245106087, "lm_q1q2_score": 0.19943325222513325}} {"text": "/**\n * Copyright (c) 2017 Melown Technologies SE\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n#include \n#include \n#include \n\n#include \"dbglog/dbglog.hpp\"\n\n#include \"utility/expect.hpp\"\n#include \"utility/binaryio.hpp\"\n\n#include \"math/math.hpp\"\n#include \"math/geometry.hpp\"\n#include \"math/transform.hpp\"\n\n#include \"imgproc/scanconversion.hpp\"\n\n#include \"half/half.hpp\"\n\n#include \"../storage/error.hpp\"\n\n#include \"mesh.hpp\"\n#include \"meshio.hpp\"\n#include \"multifile.hpp\"\n#include \"math.hpp\"\n#include \"tileindex.hpp\"\n\nnamespace fs = boost::filesystem;\nnamespace bio = boost::iostreams;\nnamespace bin = utility::binaryio;\n\nnamespace half = half_float::detail;\n\nnamespace vtslibs { namespace vts {\n\nnamespace {\n\n/** Geo coordinates to coverage mask mapping.\n * NB: result is in pixel system: pixel centers have integral indices\n */\nmath::Matrix4 geo2mask(const math::Extents2 &extents\n , const math::Size2 &gridSize)\n{\n math::Matrix4 trafo(boost::numeric::ublas::identity_matrix(4));\n\n auto es(size(extents));\n\n // scales\n math::Size2f scale(gridSize.width / es.width\n , gridSize.height / es.height);\n\n // scale to grid\n trafo(0, 0) = scale.width;\n trafo(1, 1) = -scale.height;\n\n // move to origin\n trafo(0, 3) = -extents.ll(0) * scale.width;\n trafo(1, 3) = extents.ur(1) * scale.height;\n\n return trafo;\n}\n\nvoid updateCoverage(Mesh::CoverageMask &cm, const SubMesh &sm\n , const math::Extents2 &sdsExtents\n , std::uint8_t smIndex)\n{\n const auto rasterSize(cm.size());\n auto trafo(geo2mask(sdsExtents, rasterSize));\n\n std::vector scanlines;\n cv::Point3f tri[3];\n for (const auto &face : sm.faces) {\n for (int i : { 0, 1, 2 }) {\n auto p(transform(trafo, sm.vertices[face[i]]));\n tri[i].x = p(0); tri[i].y = p(1); tri[i].z = p(2);\n }\n\n scanlines.clear();\n imgproc::scanConvertTriangle(tri, 0, rasterSize.height, scanlines);\n\n for (const auto &sl : scanlines) {\n imgproc::processScanline\n (sl, 0, rasterSize.width, [&](int x, int y, float)\n {\n cm.set(x, y, smIndex + 1);\n });\n }\n }\n}\n\n} // namespace\n\nvoid updateCoverage(Mesh &mesh, const SubMesh &sm\n , const math::Extents2 &sdsExtents\n , std::uint8_t smIndex)\n{\n updateCoverage(mesh.coverageMask, sm, sdsExtents, smIndex);\n}\n\nvoid generateCoverage(Mesh &mesh, const math::Extents2 &sdsExtents)\n{\n mesh.createCoverage(false);\n\n std::uint8_t smIndex(0);\n for (const auto &sm : mesh) {\n updateCoverage(mesh, sm, sdsExtents, smIndex++);\n }\n}\n\nvoid generateMeshMask(MeshMask &mask, const Mesh &mesh\n , const math::Extents2 &sdsExtents)\n{\n mask.createCoverage(false);\n mask.surfaceReferences.clear();\n\n std::uint8_t smIndex(0);\n for (const auto &sm : mesh) {\n updateCoverage(mask.coverageMask, sm, sdsExtents, smIndex++);\n mask.surfaceReferences.push_back(sm.surfaceReference);\n }\n}\n\n} } // namespace vtslibs::vts\n", "meta": {"hexsha": "c01764a3e21ece86c657d5bed0292f761fe6d334", "size": 4522, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "vts-libs/vts/meshcoverage.cpp", "max_stars_repo_name": "melowntech/vts-libs", "max_stars_repo_head_hexsha": "ffbf889b6603a8f95d3c12a2602232ff9c5d2236", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-04-20T01:44:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T06:54:51.000Z", "max_issues_repo_path": "externals/browser/externals/browser/externals/vts-libs/vts-libs/vts/meshcoverage.cpp", "max_issues_repo_name": "HanochZhu/vts-browser-unity-plugin", "max_issues_repo_head_hexsha": "32a22d41e21b95fb015326f95e401d87756d0374", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-01-29T16:30:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-03T15:21:29.000Z", "max_forks_repo_path": "externals/browser/externals/browser/externals/vts-libs/vts-libs/vts/meshcoverage.cpp", "max_forks_repo_name": "HanochZhu/vts-browser-unity-plugin", "max_forks_repo_head_hexsha": "32a22d41e21b95fb015326f95e401d87756d0374", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-09-25T05:10:07.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-25T05:10:07.000Z", "avg_line_length": 30.7619047619, "max_line_length": 78, "alphanum_fraction": 0.6656346749, "num_tokens": 1118, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6150878555160665, "lm_q2_score": 0.3242353924510608, "lm_q1q2_score": 0.1994332522251332}} {"text": "//==============================================================================\n// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II\n// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI\n//\n// Distributed under the Boost Software License, Version 1.0.\n// See accompanying file LICENSE.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//==============================================================================\n#ifndef BOOST_SIMD_TOOLBOX_REDUCTION_FUNCTIONS_SIMD_SSE_SSE2_COMPARE_EQUAL_HPP_INCLUDED\n#define BOOST_SIMD_TOOLBOX_REDUCTION_FUNCTIONS_SIMD_SSE_SSE2_COMPARE_EQUAL_HPP_INCLUDED\n\n#ifdef BOOST_SIMD_HAS_SSE2_SUPPORT\n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::compare_equal_, boost::simd::tag::sse2_\n , (A0)\n , ((simd_,boost::simd::tag::sse_>))\n ((simd_,boost::simd::tag::sse_>))\n )\n {\n typedef typename meta::scalar_of::type sA0;\n typedef typename meta::as_logical::type result_type;\n BOOST_SIMD_FUNCTOR_CALL_REPEAT(2) { return result_type(_mm_movemask_pd(eq(a0,a1)) == 0X03); }\n };\n\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::compare_equal_, boost::simd::tag::sse2_\n , (A0)\n , ((simd_,boost::simd::tag::sse_>))\n ((simd_,boost::simd::tag::sse_>))\n )\n {\n typedef typename meta::scalar_of::type sA0;\n typedef typename meta::as_logical::type result_type;\n BOOST_SIMD_FUNCTOR_CALL_REPEAT(2) { return result_type(_mm_movemask_ps(eq(a0,a1)) == 0X0F); }\n };\n\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::compare_equal_, boost::simd::tag::sse2_\n , (A0)\n , ((simd_,boost::simd::tag::sse_>))\n ((simd_,boost::simd::tag::sse_>))\n )\n {\n typedef typename meta::scalar_of::type sA0;\n typedef typename meta::as_logical::type result_type;\n BOOST_SIMD_FUNCTOR_CALL_REPEAT(2)\n {\n return result_type(_mm_movemask_epi8(eq(a0,a1)) == 0X0FFFF);\n }\n };\n} } }\n\n#endif\n#endif\n", "meta": {"hexsha": "e9174a6bc75e27090537fed559cd61f4217d4f9b", "size": 2630, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/reduction/include/boost/simd/toolbox/reduction/functions/simd/sse/sse2/compare_equal.hpp", "max_stars_repo_name": "timblechmann/nt2", "max_stars_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-09-14T00:23:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-14T12:51:18.000Z", "max_issues_repo_path": "modules/boost/simd/reduction/include/boost/simd/toolbox/reduction/functions/simd/sse/sse2/compare_equal.hpp", "max_issues_repo_name": "timblechmann/nt2", "max_issues_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/reduction/include/boost/simd/toolbox/reduction/functions/simd/sse/sse2/compare_equal.hpp", "max_forks_repo_name": "timblechmann/nt2", "max_forks_repo_head_hexsha": "6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5762711864, "max_line_length": 98, "alphanum_fraction": 0.5790874525, "num_tokens": 650, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5660185205547239, "lm_q2_score": 0.3522017956470284, "lm_q1q2_score": 0.19935273930884823}} {"text": "#define BOOST_TEST_MODULE \"test_read_afm_fitting_interaction\"\n\n#ifdef BOOST_TEST_DYN_LINK\n#include \n#else\n#include \n#endif\n\n#include \n#include \n#include \n#include \n\nBOOST_AUTO_TEST_CASE(read_afm_fitting_interaction)\n{\n mjolnir::LoggerManager::set_default_logger(\"test_read_afm_fitting_interaction.log\");\n using traits_type = mjolnir::SimulatorTraits;\n using namespace toml::literals;\n const toml::value v = u8R\"(\n interaction = \"AFMFlexibleFitting\"\n k = 100.0\n gamma = 1.0\n pixel_x = 10.0\n pixel_y = 10.0\n sigma_x = 2.0\n sigma_y = 2.0\n length_x = 5\n length_y = 5\n z0 = 0.0\n cutoff = 5.0\n margin = 0.5\n image = [\n 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.5, 1.0, 0.5,\n 0.0, 0.0, 1.0, 2.0, 1.0,\n 0.0, 0.0, 0.5, 1.0, 0.5,\n ]\n parameters = [\n {index = 0, radius = 1.0},\n {index = 1, radius = 2.0},\n {index = 4, radius = 3.0},\n {index = 5, radius = 4.0},\n ]\n )\"_toml;\n\n const auto base = mjolnir::read_external_interaction(v);\n BOOST_TEST(static_cast(base));\n\n const auto derv = dynamic_cast<\n const mjolnir::AFMFitInteraction*>(base.get());\n BOOST_TEST(static_cast(derv));\n\n BOOST_TEST(derv->k() == 100.0);\n BOOST_TEST(derv->gamma () == 1.0);\n BOOST_TEST(derv->pixel_x () == 10.0);\n BOOST_TEST(derv->pixel_y () == 10.0);\n BOOST_TEST(derv->sigma_x () == 2.0);\n BOOST_TEST(derv->sigma_y () == 2.0);\n BOOST_TEST(derv->length_x() == 5u );\n BOOST_TEST(derv->length_y() == 5u );\n BOOST_TEST(derv->z0 () == 0.0);\n BOOST_TEST(derv->cutoff () == 5.0);\n BOOST_TEST(derv->margin () == 0.5);\n\n BOOST_TEST_REQUIRE(derv->participants().size() == 4u);\n BOOST_TEST_REQUIRE(derv->participants().at(0) == 0u);\n BOOST_TEST_REQUIRE(derv->participants().at(1) == 1u);\n BOOST_TEST_REQUIRE(derv->participants().at(2) == 4u);\n BOOST_TEST_REQUIRE(derv->participants().at(3) == 5u);\n\n BOOST_TEST(derv->parameters().at(0) == 1.0);\n BOOST_TEST(derv->parameters().at(1) == 2.0);\n BOOST_TEST(derv->parameters().at(4) == 3.0);\n BOOST_TEST(derv->parameters().at(5) == 4.0);\n\n BOOST_TEST(derv->image().at( 0) == 0.0);\n BOOST_TEST(derv->image().at( 1) == 0.0);\n BOOST_TEST(derv->image().at( 2) == 0.0);\n BOOST_TEST(derv->image().at( 3) == 0.0);\n BOOST_TEST(derv->image().at( 4) == 0.0);\n BOOST_TEST(derv->image().at( 5) == 0.0);\n BOOST_TEST(derv->image().at( 6) == 0.0);\n BOOST_TEST(derv->image().at( 7) == 0.0);\n BOOST_TEST(derv->image().at( 8) == 0.0);\n BOOST_TEST(derv->image().at( 9) == 0.0);\n BOOST_TEST(derv->image().at(10) == 0.0);\n BOOST_TEST(derv->image().at(11) == 0.0);\n BOOST_TEST(derv->image().at(12) == 0.5);\n BOOST_TEST(derv->image().at(13) == 1.0);\n BOOST_TEST(derv->image().at(14) == 0.5);\n BOOST_TEST(derv->image().at(15) == 0.0);\n BOOST_TEST(derv->image().at(16) == 0.0);\n BOOST_TEST(derv->image().at(17) == 1.0);\n BOOST_TEST(derv->image().at(18) == 2.0);\n BOOST_TEST(derv->image().at(19) == 1.0);\n BOOST_TEST(derv->image().at(20) == 0.0);\n BOOST_TEST(derv->image().at(21) == 0.0);\n BOOST_TEST(derv->image().at(22) == 0.5);\n BOOST_TEST(derv->image().at(23) == 1.0);\n BOOST_TEST(derv->image().at(24) == 0.5);\n}\n", "meta": {"hexsha": "148ebc6def938b8d433fd8c4dbda00705d337d1c", "size": 3774, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/core/test_read_afm_fitting_interaction.cpp", "max_stars_repo_name": "yutakasi634/Mjolnir", "max_stars_repo_head_hexsha": "ab7a29a47f994111e8b889311c44487463f02116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2017-02-01T08:28:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-25T15:47:51.000Z", "max_issues_repo_path": "test/core/test_read_afm_fitting_interaction.cpp", "max_issues_repo_name": "Mjolnir-MD/Mjolnir", "max_issues_repo_head_hexsha": "043df4080720837042c6b67a5495ecae198bc2b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 60.0, "max_issues_repo_issues_event_min_datetime": "2019-01-14T08:11:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-29T08:26:36.000Z", "max_forks_repo_path": "test/core/test_read_afm_fitting_interaction.cpp", "max_forks_repo_name": "yutakasi634/Mjolnir", "max_forks_repo_head_hexsha": "ab7a29a47f994111e8b889311c44487463f02116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2019-01-13T11:03:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T11:38:00.000Z", "avg_line_length": 36.640776699, "max_line_length": 88, "alphanum_fraction": 0.5720720721, "num_tokens": 1366, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5544704649604273, "lm_q2_score": 0.35936415888237616, "lm_q1q2_score": 0.19925681226562397}} {"text": "/*\n * Copyright 2020 Robert Bosch GmbH\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n */\n/**\n * \\file noisy_object_sensor.cpp\n */\n\n#include // for Isometry3d, Vector3d\n#include // for shared_ptr<>\n#include // for random_device\n#include // for string\n\n#include // for Component, Json\n#include // for Frustum\n#include // for Object\n#include // for ObjectSensor\n#include // for actions::ConfigureFactory\n#include // for EXPORT_CLOE_PLUGIN\n#include // for Registrar\n#include // for Sync\n#include // for actions::SetVariableActionFactory\n#include \"noise_data.hpp\" // for NoiseData, NoiseConf\n\nnamespace cloe {\n\nenum class ObjectField { Translation, Velocity, Acceleration };\n\n// clang-format off\nENUM_SERIALIZATION(ObjectField, ({\n {ObjectField::Translation, \"translation\"},\n {ObjectField::Velocity, \"velocity\"},\n {ObjectField::Acceleration, \"acceleration\"},\n}))\n// clang-format on\n\nnamespace component {\n\nvoid apply_noise_xy(Eigen::Vector3d* vec, const NoiseConf& noise) {\n vec->x() = vec->x() + noise.get();\n vec->y() = vec->y() + noise.get();\n}\n\nvoid add_noise_translation(Object* obj, const NoiseConf* noise) {\n Eigen::Vector3d transl = obj->pose.translation();\n apply_noise_xy(&transl, *noise);\n obj->pose.translation() = transl;\n}\n\nvoid add_noise_velocity(Object* obj, const NoiseConf* noise) {\n Eigen::Vector3d vel = obj->velocity;\n apply_noise_xy(&vel, *noise);\n obj->velocity = vel;\n}\n\nvoid add_noise_acceleration(Object* obj, const NoiseConf* noise) {\n Eigen::Vector3d accel = obj->acceleration;\n apply_noise_xy(&accel, *noise);\n obj->acceleration = accel;\n}\n\nclass ObjectNoiseConf : public NoiseConf {\n public:\n ObjectNoiseConf() = default;\n\n virtual ~ObjectNoiseConf() noexcept = default;\n\n /**\n * Add noise to target parameter.\n */\n std::function apply;\n\n /**\n * Set the appropriate target function.\n */\n void set_target() {\n using namespace std::placeholders; // for _1\n switch (target_) {\n case ObjectField::Translation:\n apply = std::bind(add_noise_translation, _1, this);\n break;\n case ObjectField::Velocity:\n apply = std::bind(add_noise_velocity, _1, this);\n break;\n case ObjectField::Acceleration:\n apply = std::bind(add_noise_acceleration, _1, this);\n break;\n }\n }\n\n CONFABLE_SCHEMA(ObjectNoiseConf) {\n return Schema{\n NoiseConf::schema_impl(),\n fable::schema::PropertyList{\n // clang-format off\n {\"target\", Schema(&target_, \"data field of the object the noise should be applied to\")},\n // clang-format on\n },\n };\n }\n\n void to_json(Json& j) const override {\n NoiseConf::to_json(j);\n j = Json{\n {\"target\", target_},\n };\n }\n\n private:\n ObjectField target_{ObjectField::Translation};\n};\n\nstruct NoisyObjectSensorConf : public NoisySensorConf {\n /// List of noisy object parameters.\n std::vector noisy_params;\n\n CONFABLE_SCHEMA(NoisyObjectSensorConf) {\n return Schema{\n NoisySensorConf::schema_impl(),\n fable::schema::PropertyList{\n // clang-format off\n {\"noise\", Schema(&noisy_params, \"configure noisy parameters\")},\n // clang-format on\n },\n };\n }\n\n void to_json(Json& j) const override {\n NoisySensorConf::to_json(j);\n j = Json{\n {\"noise\", noisy_params},\n };\n }\n};\n\nclass NoisyObjectSensor : public ObjectSensor {\n public:\n NoisyObjectSensor(const std::string& name, const NoisyObjectSensorConf& conf,\n std::shared_ptr obs)\n : ObjectSensor(name), config_(conf), sensor_(obs) {\n reset_random();\n }\n\n virtual ~NoisyObjectSensor() noexcept = default;\n\n const Objects& sensed_objects() const override {\n if (cached_) {\n return objects_;\n }\n for (const auto& o : sensor_->sensed_objects()) {\n auto obj = apply_noise(o);\n if (obj) {\n objects_.push_back(obj);\n }\n }\n cached_ = true;\n return objects_;\n }\n\n const Frustum& frustum() const override { return sensor_->frustum(); }\n\n const Eigen::Isometry3d& mount_pose() const override { return sensor_->mount_pose(); }\n\n /**\n * Process the underlying sensor and clear the cache.\n *\n * We could process and create the filtered list of objects now, but we can\n * also delay it (lazy computation) and only do it when absolutely necessary.\n * This comes at the minor cost of checking whether cached_ is true every\n * time sensed_objects() is called.\n */\n Duration process(const Sync& sync) override {\n // This currently shouldn't do anything, but this class acts as a prototype\n // for How It Should Be Done.\n Duration t = ObjectSensor::process(sync);\n if (t < sync.time()) {\n return t;\n }\n\n // Process the underlying sensor and clear the cache.\n t = sensor_->process(sync);\n clear_cache();\n return t;\n }\n\n void reset() override {\n ObjectSensor::reset();\n sensor_->reset();\n clear_cache();\n reset_random();\n }\n\n void abort() override {\n ObjectSensor::abort();\n sensor_->abort();\n }\n\n void enroll(Registrar& r) override {\n r.register_action(std::make_unique(\n &config_, \"config\", \"configure noisy object component\"));\n r.register_action>(\n \"noise_activation\", \"switch sensor noise on/off\", \"enable\", &config_.enabled);\n }\n\n protected:\n std::shared_ptr apply_noise(const std::shared_ptr& o) const {\n if (!config_.enabled) {\n return o;\n }\n auto obj = std::make_shared(*o);\n\n for (auto& np : config_.noisy_params) {\n np.apply(obj.get());\n }\n return obj;\n }\n\n void reset_random() {\n // Reset the sensor's \"master\" seed, if applicable.\n unsigned long seed = config_.seed;\n if (seed == 0) {\n std::random_device r;\n do {\n seed = r();\n } while (seed == 0);\n\n if (config_.reuse_seed) {\n config_.seed = seed;\n }\n }\n for (auto& np : config_.noisy_params) {\n np.set_target();\n np.reset(seed);\n ++seed;\n }\n }\n\n void clear_cache() {\n objects_.clear();\n cached_ = false;\n }\n\n private:\n NoisyObjectSensorConf config_;\n std::shared_ptr sensor_;\n mutable bool cached_;\n mutable Objects objects_;\n};\n\nDEFINE_COMPONENT_FACTORY(NoisyObjectSensorFactory, NoisyObjectSensorConf, \"noisy_object_sensor\",\n \"add gaussian noise to object sensor output\")\n\nDEFINE_COMPONENT_FACTORY_MAKE(NoisyObjectSensorFactory, NoisyObjectSensor, ObjectSensor)\n\n} // namespace component\n} // namespace cloe\n\nEXPORT_CLOE_PLUGIN(cloe::component::NoisyObjectSensorFactory)\n", "meta": {"hexsha": "e814da5f0de3bddb2bcdec9159ac57f74b123254", "size": 7714, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "plugins/noisy_sensor/src/noisy_object_sensor.cpp", "max_stars_repo_name": "Sidharth-S-S/cloe", "max_stars_repo_head_hexsha": "974ef649e7dc6ec4e6869e4cf690c5b021e5091e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 20.0, "max_stars_repo_stars_event_min_datetime": "2020-07-07T18:28:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:35:28.000Z", "max_issues_repo_path": "plugins/noisy_sensor/src/noisy_object_sensor.cpp", "max_issues_repo_name": "Sidharth-S-S/cloe", "max_issues_repo_head_hexsha": "974ef649e7dc6ec4e6869e4cf690c5b021e5091e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46.0, "max_issues_repo_issues_event_min_datetime": "2021-01-20T10:13:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T12:27:19.000Z", "max_forks_repo_path": "plugins/noisy_sensor/src/noisy_object_sensor.cpp", "max_forks_repo_name": "Sidharth-S-S/cloe", "max_forks_repo_head_hexsha": "974ef649e7dc6ec4e6869e4cf690c5b021e5091e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2021-01-25T08:01:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-27T10:09:53.000Z", "avg_line_length": 28.3602941176, "max_line_length": 100, "alphanum_fraction": 0.6516722842, "num_tokens": 1840, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5544704649604273, "lm_q2_score": 0.35936415888237616, "lm_q1q2_score": 0.19925681226562397}} {"text": "#include \"mutation_annotated_tree.hpp\"\n#include \n#include \n#include \n#include \n\n// Uses one-hot encoding if base is unambiguous\n// A:1,C:2,G:4,T:8\nint8_t Mutation_Annotated_Tree::get_nuc_id (char nuc) {\n int8_t ret = 0b1111;\n switch(nuc) {\n case 'a':\n case 'A': ret = 0b1;\n break;\n case 'c':\n case 'C': ret = 0b10;\n break;\n case 'g':\n case 'G': ret = 0b100; \n break;\n case 't':\n case 'T': ret = 0b1000; \n break;\n case 'R': ret = 0b101;\n break;\n case 'Y': ret = 0b1010;\n break;\n case 'S': ret = 0b110;\n break;\n case 'W': ret = 0b1001;\n break;\n case 'K': ret = 0b1100;\n break;\n case 'M': ret = 0b11;\n break;\n case 'B': ret = 0b1110;\n break;\n case 'D': ret = 0b1101;\n break;\n case 'H': ret = 0b1011;\n break;\n case 'V': ret = 0b111;\n case 'n':\n case 'N': \n default: ret = 0b1111;\n break;\n }\n return ret;\n}\n\n// Sets bits at positions specified by nuc_vec to 1 in int8\nint8_t Mutation_Annotated_Tree::get_nuc_id (std::vector nuc_vec) {\n int8_t ret = 0;\n int8_t one = 1;\n for (auto nuc: nuc_vec) {\n assert((nuc >= 0) && (nuc <=3));\n ret += (one << nuc);\n }\n return ret;\n}\n\n// Convert nuc_id back to IUPAC base \nchar Mutation_Annotated_Tree::get_nuc (int8_t nuc_id) {\n char ret = 'N';\n //assert ((nuc_id >= 1) && (nuc_id <= 15));\n switch(nuc_id) {\n case 1: ret = 'A';\n break;\n case 2: ret = 'C';\n break;\n case 3: ret = 'M';\n break;\n case 4: ret = 'G';\n break;\n case 5: ret = 'R';\n break;\n case 6: ret = 'S';\n break;\n case 7: ret = 'V';\n break;\n case 8: ret = 'T';\n break;\n case 9: ret = 'W';\n break;\n case 10: ret = 'Y';\n break;\n case 11: ret = 'H';\n break;\n case 12: ret = 'K';\n break;\n case 13: ret = 'D';\n break;\n case 14: ret = 'B';\n break;\n default: ret = 'N';\n break;\n }\n return ret;\n}\n\n// A:0, C:1, G:2, T:3 \nint8_t Mutation_Annotated_Tree::get_nt (int8_t nuc_id) {\n int8_t ret = 0;\n switch(nuc_id) {\n case 1: ret = 0;\n break;\n case 2: ret = 1;\n break;\n case 4: ret = 2;\n break;\n case 8: ret = 3;\n break;\n default: ret = -1;\n break;\n }\n return ret;\n}\n\nstd::vector Mutation_Annotated_Tree::get_nuc_vec (char c) {\n switch (c) {\n case 'a':\n case 'A': return std::vector{0};\n case 'c':\n case 'C': return std::vector{1};\n case 'g':\n case 'G': return std::vector{2};\n case 't':\n case 'T': return std::vector{3};\n case 'R': return std::vector{0,2};\n case 'Y': return std::vector{1,3};\n case 'S': return std::vector{1,2};\n case 'W': return std::vector{0,3};\n case 'K': return std::vector{2,3};\n case 'M': return std::vector{0,1};\n case 'B': return std::vector{1,2,3};\n case 'D': return std::vector{0,2,3};\n case 'H': return std::vector{0,1,3};\n case 'V': return std::vector{0,1,2};\n case 'n':\n case 'N': return std::vector{0,1,2,3};\n default: return std::vector{0,1,2,3};\n }\n}\n\nstd::vector Mutation_Annotated_Tree::get_nuc_vec_from_id (int8_t nuc_id) {\n return get_nuc_vec(get_nuc(nuc_id));\n}\n\n// Get newick stringstream for the input subtree rooted at some node (node) in \n// the input tree T. Boolean arguments decide whether\n// internal node ids and branch lengths are printed. If last boolean argument is\n// set, branch lengths from input tree are retained, otherwise, branch length\n// for a branch is equal to the number of mutations annotated on that branch \nvoid Mutation_Annotated_Tree::write_newick_string (std::stringstream& ss, const Mutation_Annotated_Tree::Tree& T, Mutation_Annotated_Tree::Node* node, bool print_internal, bool print_branch_len, bool retain_original_branch_len, bool uncondense_leaves) {\n TIMEIT();\n\n std::vector traversal = T.depth_first_expansion(node);\n size_t level_offset = node->level-1;\n size_t curr_level = 0;\n bool prev_open = true;\n\n std::stack node_stack;\n std::stack branch_length_stack;\n\n for (auto n: traversal) {\n size_t level = n->level-level_offset;\n float branch_length = n->branch_length;\n if (!retain_original_branch_len) {\n branch_length = static_cast(n->mutations.size());\n }\n if (curr_level < level) {\n if (!prev_open) {\n ss << ',';\n }\n size_t l = level - 1;\n if (curr_level > 1) {\n l = level - curr_level;\n }\n for (size_t i=0; i < l; i++) {\n ss << '(';\n prev_open = true;\n }\n if (n->is_leaf()) {\n if (uncondense_leaves && (T.condensed_nodes.find(n->identifier) != T.condensed_nodes.end())) {\n auto cn = T.condensed_nodes.at(n->identifier);\n auto cn_size = cn.size();\n for (size_t idx = 0; idx < cn_size; idx++) {\n ss << cn[idx];\n if (idx+1 < cn_size) {\n ss << ',';\n }\n }\n }\n else {\n ss << n->identifier;\n }\n if ((print_branch_len) && (branch_length >= 0)) {\n ss << ':';\n ss << branch_length;\n }\n prev_open = false;\n }\n else {\n node_stack.push(n->identifier);\n branch_length_stack.push(branch_length);\n }\n }\n else if (curr_level > level) {\n prev_open = false;\n for (size_t i = level; i < curr_level; i++) {\n ss << ')';\n if (print_internal){\n ss << node_stack.top();\n }\n if ((print_branch_len) && (branch_length_stack.top() >= 0)) {\n ss << ':';\n ss << branch_length_stack.top();\n }\n node_stack.pop();\n branch_length_stack.pop();\n }\n if (n->is_leaf()) {\n if (uncondense_leaves && (T.condensed_nodes.find(n->identifier) != T.condensed_nodes.end())) {\n auto cn = T.condensed_nodes.at(n->identifier);\n ss << ',';\n auto cn_size = cn.size();\n for (size_t idx = 0; idx < cn_size; idx++) {\n ss << cn[idx];\n if (idx+1 < cn_size) {\n ss << ',';\n }\n }\n }\n else {\n ss << ',';\n ss << n->identifier;\n }\n if ((print_branch_len) && (branch_length >= 0)) {\n ss << ':';\n ss << branch_length;\n }\n }\n else {\n node_stack.push(n->identifier);\n branch_length_stack.push(branch_length);\n }\n }\n else {\n prev_open = false;\n if (n->is_leaf()) {\n if (uncondense_leaves && (T.condensed_nodes.find(n->identifier) != T.condensed_nodes.end())) {\n auto cn = T.condensed_nodes.at(n->identifier);\n ss << ',';\n auto cn_size = cn.size();\n for (size_t idx = 0; idx < cn_size; idx++) {\n ss << cn[idx];\n if (idx+1 < cn_size) {\n ss << ',';\n }\n }\n }\n else {\n ss << ',';\n ss << n->identifier;\n }\n if ((print_branch_len) && (branch_length >= 0)) {\n ss << ':';\n ss << branch_length;\n }\n }\n else {\n node_stack.push(n->identifier);\n branch_length_stack.push(branch_length);\n }\n }\n curr_level = level;\n }\n size_t remaining = node_stack.size();\n for (size_t i = 0; i < remaining; i++) {\n ss << ')';\n if (print_internal) {\n ss << node_stack.top();\n }\n if ((print_branch_len) && (branch_length_stack.top() >= 0)) {\n ss << ':';\n ss << branch_length_stack.top();\n }\n node_stack.pop();\n branch_length_stack.pop();\n }\n\n ss << ';';\n}\n\nstd::string Mutation_Annotated_Tree::get_newick_string (const Mutation_Annotated_Tree::Tree& T, Mutation_Annotated_Tree::Node* node, bool print_internal, bool print_branch_len, bool retain_original_branch_len, bool uncondense_leaves) {\n std::stringstream newick_ss;\n write_newick_string(newick_ss, T, node, print_internal, print_branch_len, retain_original_branch_len, uncondense_leaves);\n return newick_ss.str();\n}\n\nstd::string Mutation_Annotated_Tree::get_newick_string (const Tree& T, bool print_internal, bool print_branch_len, bool retain_original_branch_len, bool uncondense_leaves) {\n return get_newick_string(T, T.root, print_internal, print_branch_len, retain_original_branch_len, uncondense_leaves);\n}\n\n// Split string into words for a specific delimiter delim\nvoid Mutation_Annotated_Tree::string_split (std::string const& s, char delim, std::vector& words) {\n TIMEIT();\n size_t start_pos = 0, end_pos = 0;\n while ((end_pos = s.find(delim, start_pos)) != std::string::npos) {\n if ((end_pos == start_pos) || end_pos >= s.length()) {\n break;\n }\n words.emplace_back(s.substr(start_pos, end_pos-start_pos));\n start_pos = end_pos+1;\n }\n auto last = s.substr(start_pos, s.size()-start_pos);\n if (last != \"\") {\n words.push_back(std::move(last));\n }\n \n}\n\n// Split string into words (delimited by space, tabs etc.)\nvoid Mutation_Annotated_Tree::string_split (std::string s, std::vector& words) {\n std::string curr = \"\";\n std::vector ret;\n \n // Used to split string around spaces.\n std::istringstream ss(s);\n\n std::string word;\n // Traverse through all words\n while (ss >> word) {\n words.push_back(std::move(word));\n };\n}\n\nMutation_Annotated_Tree::Tree Mutation_Annotated_Tree::create_tree_from_newick_string (std::string newick_string) {\n TIMEIT();\n Tree T;\n\n std::vector leaves;\n std::vector num_open;\n std::vector num_close;\n std::vector> branch_len (128); // will be resized later if needed\n size_t level = 0;\n\n std::vector s1;\n string_split(newick_string, ',', s1);\n\n num_open.reserve(s1.size());\n num_close.reserve(s1.size());\n\n for (auto s: s1) {\n size_t no = 0;\n size_t nc = 0;\n bool stop = false;\n bool branch_start = false;\n std::string leaf = \"\";\n std::string branch = \"\";\n for (auto c: s) {\n if (c == ':') {\n stop = true;\n branch = \"\";\n branch_start = true;\n }\n else if (c == '(') {\n no++;\n level++;\n if (branch_len.size() <= level) {\n branch_len.resize(level*2);\n }\n }\n else if (c == ')') {\n stop = true;\n nc++;\n float len = (branch.size() > 0) ? std::stof(branch) : -1.0;\n branch_len[level].push(len);\n level--;\n branch_start = false;\n }\n else if (!stop) {\n leaf += c;\n branch_start = false;\n }\n else if (branch_start) {\n if (isdigit(c) || c == '.' || c == 'e' || c == 'E' || c == '-' || c == '+') {\n branch += c;\n }\n }\n }\n leaves.push_back(std::move(leaf));\n num_open.push_back(no);\n num_close.push_back(nc);\n float len = (branch.size() > 0) ? std::stof(branch) : -1.0;\n branch_len[level].push(len);\n }\n\n if (level != 0) {\n fprintf(stderr, \"ERROR: incorrect Newick format!\\n\");\n exit(1);\n }\n\n T.curr_internal_node = 0;\n std::stack parent_stack;\n\n for (size_t i=0; i0);\n if (!hasmeta) {\n fprintf(stderr, \"WARNING: This pb does not include any metadata. Filling in default values\\n\");\n }\n tree = create_tree_from_newick_string(data.newick());\n auto dfs = tree.depth_first_expansion();\n static tbb::affinity_partitioner ap;\n tbb::parallel_for( tbb::blocked_range(0, dfs.size()),\n [&](tbb::blocked_range r) {\n for (size_t idx = r.begin(); idx < r.end(); idx++) {\n auto node = dfs[idx];\n auto mutation_list = data.node_mutations(idx);\n if (hasmeta) {\n for (int k = 0; k < data.metadata(idx).clade_annotations_size(); k++) {\n node->clade_annotations.emplace_back(data.metadata(idx).clade_annotations(k)); \n }\n } \n for (int k = 0; k < mutation_list.mutation_size(); k++) {\n auto mut = mutation_list.mutation(k);\n Mutation m;\n m.chrom = mut.chromosome();\n m.position = mut.position();\n if (!m.is_masked()) {\n m.ref_nuc = (1 << mut.ref_nuc());\n m.par_nuc = (1 << mut.par_nuc());\n m.is_missing = false;\n std::vector nuc_vec(mut.mut_nuc_size());\n for (int n = 0; n < mut.mut_nuc_size(); n++) {\n nuc_vec[n] = mut.mut_nuc(n);\n }\n m.mut_nuc = get_nuc_id(nuc_vec);\n if (m.mut_nuc != m.par_nuc) {\n node->add_mutation(m);\n }\n }\n else {\n // Mutation masked\n m.ref_nuc = 0;\n m.par_nuc = 0;\n m.mut_nuc = 0;\n node->add_mutation(m);\n }\n }\n if (!std::is_sorted(node->mutations.begin(), node->mutations.end())) {\n fprintf(stderr, \"WARNING: Mutations not sorted!\\n\");\n std::sort(node->mutations.begin(), node->mutations.end());\n }\n }\n }, ap);\n\n size_t num_condensed_nodes = static_cast(data.condensed_nodes_size());\n tbb::parallel_for( tbb::blocked_range(0, num_condensed_nodes),\n [&](tbb::blocked_range r) {\n for (size_t idx = r.begin(); idx < r.end(); idx++) {\n auto cn = data.condensed_nodes(idx);\n tree.condensed_nodes.emplace(std::pair>(cn.node_name(), std::vector(cn.condensed_leaves_size())));\n for (int k = 0; k < cn.condensed_leaves_size(); k++) {\n tree.condensed_nodes[cn.node_name()][k] = cn.condensed_leaves(k);\n tree.condensed_leaves.emplace(cn.condensed_leaves(k));\n }\n }\n }, ap);\n\n return tree;\n}\n\nvoid Mutation_Annotated_Tree::save_mutation_annotated_tree (Mutation_Annotated_Tree::Tree tree, std::string filename) {\n TIMEIT();\n Parsimony::data data;\n data.set_newick(get_newick_string(tree, false, true, true));\n\n auto dfs = tree.depth_first_expansion();\n\n for (size_t idx = 0; idx < dfs.size(); idx++) {\n auto meta = data.add_metadata();\n for (size_t k = 0; k < dfs[idx]->clade_annotations.size(); k++) {\n meta->add_clade_annotations(dfs[idx]->clade_annotations[k]);\n }\n auto mutation_list = data.add_node_mutations();\n for (auto m: dfs[idx]->mutations) {\n auto mut = mutation_list->add_mutation();\n mut->set_chromosome(m.chrom);\n mut->set_position(m.position);\n \n if (m.is_masked()) {\n mut->set_ref_nuc(-1);\n mut->set_par_nuc(-1);\n }\n else {\n int8_t j = get_nt(m.ref_nuc);\n assert (j >= 0);\n mut->set_ref_nuc(j);\n\n j = get_nt(m.par_nuc);\n assert(j >= 0);\n mut->set_par_nuc(j);\n\n mut->clear_mut_nuc();\n for (auto nuc: get_nuc_vec_from_id(m.mut_nuc)) {\n mut->add_mut_nuc(nuc);\n }\n }\n }\n }\n\n // Add condensed nodes\n for (auto cn: tree.condensed_nodes) {\n auto cn_ptr = data.add_condensed_nodes();\n cn_ptr->set_node_name(cn.first);\n for (auto lid: cn.second) {\n cn_ptr->add_condensed_leaves(lid);\n }\n }\n\n // Boost library used to stream the contents to the output protobuf file in\n // uncompressed or compressed .gz format\n std::ofstream outfile(filename, std::ios::out | std::ios::binary);\n boost::iostreams::filtering_streambuf< boost::iostreams::output> outbuf;\n \n if (filename.find(\".gz\\0\") != std::string::npos) {\n try {\n outbuf.push(boost::iostreams::gzip_compressor());\n outbuf.push(outfile);\n std::ostream outstream(&outbuf);\n data.SerializeToOstream(&outstream);\n boost::iostreams::close(outbuf);\n outfile.close();\n }\n catch(const boost::iostreams::gzip_error& e) {\n std::cout << e.what() << '\\n';\n }\n }\n else {\n data.SerializeToOstream(&outfile);\n outfile.close();\n }\n}\n\n/* === Node === */\nbool Mutation_Annotated_Tree::Node::is_leaf () {\n return (children.size() == 0);\n}\n\nbool Mutation_Annotated_Tree::Node::is_root() {\n return (parent == NULL);\n}\n\nMutation_Annotated_Tree::Node::Node() {\n level = 0;\n identifier = \"\";\n parent = NULL;\n branch_length = -1.0;\n clade_annotations.clear();\n mutations.clear();\n}\n\nMutation_Annotated_Tree::Node::Node (std::string id, float len) {\n identifier = id;\n parent = NULL;\n level = 1;\n branch_length = len;\n mutations.clear();\n}\n\nMutation_Annotated_Tree::Node::Node (std::string id, Node* p, float len) {\n identifier = id;\n parent = p;\n level = p->level + 1;\n branch_length = len;\n mutations.clear();\n}\n\n// Assumes mutations are added in chronological order. If a new mutation occurs\n// at the same position, it should either be updated to the new allele or\n// removed entirely (in case of reversal mutation)\nvoid Mutation_Annotated_Tree::Node::add_mutation (Mutation mut) {\n auto iter = std::lower_bound(mutations.begin(), mutations.end(), mut);\n // check if mutation at the same position has occured before\n if ((iter != mutations.end()) && (iter->position == mut.position)) {\n // update to new allele\n if (iter->par_nuc != mut.mut_nuc) {\n iter->mut_nuc = mut.mut_nuc;\n }\n //reversal mutation\n else {\n std::vector tmp;\n for (auto m: mutations) {\n if (m.position != iter->position) {\n tmp.emplace_back(m.copy());\n }\n }\n mutations.clear();\n for (auto m: tmp) {\n mutations.emplace_back(m.copy());\n }\n }\n }\n // new mutation\n else {\n mutations.insert(iter, mut);\n }\n}\n\nvoid Mutation_Annotated_Tree::Node::clear_mutations() {\n mutations.clear();\n}\n\nvoid Mutation_Annotated_Tree::Node::clear_annotations() {\n clade_annotations.clear();\n}\n\n/* === Tree === */\nsize_t Mutation_Annotated_Tree::Tree::get_max_level () const {\n size_t max_level = 0;\n for (auto x: all_nodes) {\n if (x.second->level > max_level) {\n max_level = x.second->level;\n }\n }\n return max_level;\n}\n \nsize_t Mutation_Annotated_Tree::Tree::get_num_annotations () const {\n size_t ret = 0;\n if (root != NULL) {\n ret = root->clade_annotations.size();\n }\n return ret;\n}\n \nvoid Mutation_Annotated_Tree::Tree::rename_node(std::string old_nid, std::string new_nid) {\n auto n = get_node(old_nid);\n if (n != NULL) {\n n->identifier = new_nid;\n all_nodes.erase(old_nid);\n all_nodes[new_nid] = n;\n }\n else {\n fprintf(stderr, \"ERROR: %s not found in the Tree!\\n\", old_nid.c_str());\n exit(1);\n }\n}\n\nstd::vector Mutation_Annotated_Tree::Tree::get_leaves(std::string nid) {\n std::vector leaves;\n if (nid == \"\") {\n if (root == NULL) {\n return leaves;\n }\n nid = root->identifier;\n }\n Node* node = all_nodes[nid];\n\n std::queue remaining_nodes;\n remaining_nodes.push(node);\n while (remaining_nodes.size() > 0) {\n Node* curr_node = remaining_nodes.front();\n if (curr_node->children.size() == 0)\n leaves.push_back(curr_node);\n remaining_nodes.pop();\n for (auto c: curr_node->children) {\n remaining_nodes.push(c);\n }\n }\n return leaves;\n}\n\nstd::vector Mutation_Annotated_Tree::Tree::get_leaves_ids(std::string nid) {\n std::vector leaves_ids;\n if (nid == \"\") {\n if (root == NULL) {\n return leaves_ids;\n }\n nid = root->identifier;\n }\n Node* node = all_nodes[nid];\n\n std::queue remaining_nodes;\n remaining_nodes.push(node);\n while (remaining_nodes.size() > 0) {\n Node* curr_node = remaining_nodes.front();\n if (curr_node->children.size() == 0)\n leaves_ids.push_back(curr_node->identifier);\n remaining_nodes.pop();\n for (auto c: curr_node->children) {\n remaining_nodes.push(c);\n }\n }\n return leaves_ids;\n}\n\nsize_t Mutation_Annotated_Tree::Tree::get_num_leaves(Node* node) {\n if (node == NULL) {\n node = root;\n }\n\n if (node->is_leaf()) {\n return 1;\n }\n size_t num_leaves = 0;\n for (auto c: node->children) {\n num_leaves += get_num_leaves(c);\n }\n return num_leaves;\n}\n\nMutation_Annotated_Tree::Node* Mutation_Annotated_Tree::Tree::create_node (std::string const& identifier, float branch_len, size_t num_annotations) {\n all_nodes.clear();\n Node* n = new Node(identifier, branch_len);\n for (size_t k=0; k < num_annotations; k++) {\n n->clade_annotations.emplace_back(\"\");\n }\n root = n;\n all_nodes[identifier] = root;\n return n;\n}\n\nMutation_Annotated_Tree::Node* Mutation_Annotated_Tree::Tree::create_node (std::string const& identifier, Node* par, float branch_len) {\n if (all_nodes.find(identifier) != all_nodes.end()) {\n fprintf(stderr, \"Error: %s already in the tree!\\n\", identifier.c_str());\n exit(1);\n }\n Node* n = new Node(identifier, par, branch_len);\n size_t num_annotations = get_num_annotations();\n for (size_t k=0; k < num_annotations; k++) {\n n->clade_annotations.emplace_back(\"\");\n }\n all_nodes[identifier] = n;\n par->children.push_back(n);\n return n;\n}\n\nMutation_Annotated_Tree::Node* Mutation_Annotated_Tree::Tree::create_node (std::string const& identifier, std::string const& parent_id, float branch_len) {\n Node* par = all_nodes[parent_id];\n return create_node(identifier, par, branch_len);\n}\n\nMutation_Annotated_Tree::Node* Mutation_Annotated_Tree::Tree::get_node (std::string nid) const {\n if (all_nodes.find(nid) != all_nodes.end()) {\n return all_nodes.at(nid);\n }\n return NULL;\n\n}\n\nbool Mutation_Annotated_Tree::Tree::is_ancestor (std::string anc_id, std::string nid) const {\n Node* node = get_node(nid);\n while (node->parent != NULL) {\n node = node->parent;\n if (node->identifier == anc_id) {\n return true;\n }\n }\n return false; \n}\n\nstd::vector Mutation_Annotated_Tree::Tree::rsearch (const std::string& nid, bool include_self) const {\n std::vector ancestors;\n Node* node = get_node(nid);\n if (node==NULL) {\n return ancestors;\n } \n if (include_self) {\n ancestors.push_back(node);\n }\n while (node->parent != NULL) {\n ancestors.push_back(node->parent);\n node = node->parent;\n }\n return ancestors;\n}\n\nvoid Mutation_Annotated_Tree::Tree::remove_node_helper (std::string nid, bool move_level) { \n auto it = all_nodes.find(nid);\n if (it == all_nodes.end()) {\n fprintf(stderr, \"ERROR: Tried to remove node identifier %s but it was not found!\\n\", nid.c_str());\n exit(1);\n }\n Node* source = it->second;\n Node* curr_parent = source->parent;\n \n if (curr_parent != NULL) {\n // Remove source from curr_parent\n auto iter = std::find(curr_parent->children.begin(), curr_parent->children.end(), source);\n assert (iter != curr_parent->children.end());\n curr_parent->children.erase(iter);\n\n // Remove parent if it no longer has any children\n if (curr_parent->children.size() == 0) {\n if (curr_parent == root) {\n fprintf(stderr, \"ERROR: Tree empty!\\n\");\n exit(1);\n }\n remove_node_helper (curr_parent->identifier, move_level);\n }\n // Move the remaining child one level up if it is the only child of its parent \n else if (move_level && (curr_parent->children.size() == 1)) {\n auto child = curr_parent->children[0];\n if (curr_parent->parent != NULL) {\n for (size_t k=0; k < curr_parent->clade_annotations.size(); k++) {\n if (child->clade_annotations[k] == \"\") {\n child->clade_annotations[k] = curr_parent->clade_annotations[k];\n }\n }\n child->parent = curr_parent->parent;\n child->level = curr_parent->parent->level + 1;\n child->branch_length += curr_parent->branch_length;\n\n std::vector tmp;\n for (auto m: child->mutations) {\n tmp.emplace_back(m.copy());\n }\n\n //Clear and add back mutations in chrono order\n child->clear_mutations();\n for (auto m: curr_parent->mutations) {\n child->add_mutation(m.copy());\n }\n for (auto m: tmp) {\n child->add_mutation(m.copy());\n }\n\n curr_parent->parent->children.push_back(child);\n \n iter = std::find(curr_parent->parent->children.begin(), curr_parent->parent->children.end(), curr_parent);\n assert(iter != curr_parent->parent->children.end());\n curr_parent->parent->children.erase(iter);\n \n // Update levels of source descendants\n std::queue remaining_nodes;\n remaining_nodes.push(child);\n while (remaining_nodes.size() > 0) {\n Node* curr_node = remaining_nodes.front();\n remaining_nodes.pop();\n curr_node->level = curr_node->parent->level + 1;\n for (auto c: curr_node->children) {\n remaining_nodes.push(c);\n }\n }\n }\n\n auto par_it = all_nodes.find(curr_parent->identifier);\n assert (par_it != all_nodes.end());\n all_nodes.erase(par_it);\n delete curr_parent;\n }\n }\n\n //Remove source and descendants from all_nodes\n std::queue desc;\n desc.push(source);\n while (desc.size() > 0) {\n Node* curr_node = desc.front();\n desc.pop();\n for (auto c: curr_node->children) {\n desc.push(c);\n }\n it = all_nodes.find(curr_node->identifier);\n all_nodes.erase(it);\n delete curr_node;\n }\n}\n\nvoid Mutation_Annotated_Tree::Tree::remove_node (std::string nid, bool move_level) { \n TIMEIT();\n remove_node_helper (nid, move_level);\n}\n\nvoid Mutation_Annotated_Tree::Tree::move_node (std::string source_id, std::string dest_id, bool move_level) {\n Node* source = all_nodes[source_id];\n Node* destination = all_nodes[dest_id];\n Node* curr_parent = source->parent;\n\n source->parent = destination;\n source->branch_length = -1.0; // Invalidate source branch length\n\n destination->children.push_back(source);\n\n // Remove source from curr_parent\n auto iter = std::find(curr_parent->children.begin(), curr_parent->children.end(), source);\n curr_parent->children.erase(iter);\n if (curr_parent->children.size() == 0) {\n remove_node(curr_parent->identifier, move_level);\n }\n \n // Update levels of source descendants\n std::queue remaining_nodes;\n remaining_nodes.push(source);\n while (remaining_nodes.size() > 0) {\n Node* curr_node = remaining_nodes.front();\n remaining_nodes.pop();\n curr_node->level = curr_node->parent->level + 1;\n for (auto c: curr_node->children) {\n remaining_nodes.push(c);\n }\n }\n}\n\nstd::vector Mutation_Annotated_Tree::Tree::breadth_first_expansion(std::string nid) {\n std::vector traversal;\n \n if (nid == \"\") {\n if (root == NULL) {\n return traversal;\n }\n nid = root->identifier;\n }\n\n Node* node = all_nodes[nid];\n\n std::queue remaining_nodes;\n remaining_nodes.push(node);\n while (remaining_nodes.size() > 0) {\n Node* curr_node = remaining_nodes.front();\n traversal.push_back(curr_node);\n remaining_nodes.pop();\n for (auto c: curr_node->children) {\n remaining_nodes.push(c);\n }\n }\n\n return traversal;\n}\n\nvoid Mutation_Annotated_Tree::Tree::depth_first_expansion_helper(Mutation_Annotated_Tree::Node* node, std::vector& vec) const {\n vec.push_back(node);\n for (auto c: node->children) {\n depth_first_expansion_helper(c, vec);\n }\n}\n\nstd::vector Mutation_Annotated_Tree::Tree::depth_first_expansion(Mutation_Annotated_Tree::Node* node) const {\n TIMEIT();\n std::vector traversal;\n if (node == NULL) {\n node = root;\n }\n if (node == NULL) {\n return traversal;\n }\n depth_first_expansion_helper(node, traversal);\n return traversal;\n}\n\nsize_t Mutation_Annotated_Tree::Tree::get_parsimony_score() {\n size_t score = 0;\n auto dfs = depth_first_expansion();\n for (auto n: dfs) {\n score += n->mutations.size();\n }\n return score;\n}\n\nvoid Mutation_Annotated_Tree::Tree::condense_leaves(std::vector missing_samples) {\n if (condensed_nodes.size() > 0) {\n fprintf(stderr, \"WARNING: tree contains condensed nodes. It may be condensed already!\\n\");\n }\n\n auto tree_leaves = get_leaves_ids();\n for (auto l1_id: tree_leaves) {\n std::vector polytomy_nodes;\n\n auto l1 = get_node(l1_id);\n if (l1 == NULL) {\n continue;\n }\n if (std::find(missing_samples.begin(), missing_samples.end(), l1->identifier) != missing_samples.end()) {\n continue;\n }\n if (l1->mutations.size() > 0) {\n continue;\n }\n\n for (auto l2: l1->parent->children) {\n if (std::find(missing_samples.begin(), missing_samples.end(), l2->identifier) != missing_samples.end()) {\n continue;\n }\n if (l2->is_leaf() && (get_node(l2->identifier) != NULL) && (l2->mutations.size() == 0)) {\n polytomy_nodes.push_back(l2);\n }\n }\n if (polytomy_nodes.size() > 1) {\n std::string new_node_name = \"node_\" + std::to_string(1+condensed_nodes.size()) + \"_condensed_\" + std::to_string(polytomy_nodes.size()) + \"_leaves\";\n \n auto curr_node = get_node(l1->identifier);\n auto new_node = create_node(new_node_name, curr_node->parent, l1->branch_length);\n\n new_node->clear_mutations();\n \n condensed_nodes[new_node_name] = std::vector(polytomy_nodes.size());\n\n for (size_t it = 0; it < polytomy_nodes.size(); it++) {\n condensed_nodes[new_node_name][it] = polytomy_nodes[it]->identifier;\n remove_node(polytomy_nodes[it]->identifier, false);\n }\n }\n }\n}\n\nvoid Mutation_Annotated_Tree::Tree::uncondense_leaves() {\n tbb::mutex tbb_lock;\n static tbb::affinity_partitioner ap;\n tbb::parallel_for(tbb::blocked_range(0, condensed_nodes.size()),\n [&](tbb::blocked_range r) {\n for (size_t it = r.begin(); it < r.end(); it++) {\n auto cn = condensed_nodes.begin();\n std::advance(cn, it);\n\n tbb_lock.lock();\n auto n = get_node(cn->first);\n tbb_lock.unlock();\n auto par = (n->parent != NULL) ? n->parent : n;\n\n size_t num_samples = cn->second.size();\n\n if (num_samples > 0) {\n tbb_lock.lock();\n rename_node(n->identifier, cn->second[0]);\n tbb_lock.unlock();\n }\n\n for (size_t s = 1; s < num_samples; s++) {\n tbb_lock.lock();\n auto new_n = create_node(cn->second[s], par, n->branch_length);\n tbb_lock.unlock();\n for (auto m: n->mutations) {\n new_n->add_mutation(m.copy());\n }\n }\n }\n }, ap);\n condensed_nodes.clear();\n condensed_leaves.clear();\n}\n\n\nvoid Mutation_Annotated_Tree::Tree::collapse_tree() {\n auto bfs = breadth_first_expansion();\n\n for (size_t idx = 1; idx < bfs.size(); idx++) {\n auto node = bfs[idx];\n auto mutations = node->mutations; \n if (mutations.size() == 0) {\n auto parent = node->parent;\n auto children = node->children;\n for (auto child: children) {\n move_node(child->identifier, parent->identifier, false);\n }\n }\n //If internal node has one child, the child can be moved up one level\n else if (node->children.size() == 1) {\n auto child = node->children.front();\n auto parent = node->parent;\n for (auto m: mutations) {\n child->add_mutation(m.copy());\n }\n move_node(child->identifier, parent->identifier, false);\n }\n }\n}\n\nMutation_Annotated_Tree::Tree Mutation_Annotated_Tree::get_tree_copy(const Mutation_Annotated_Tree::Tree& tree, const std::string& identifier) {\n TIMEIT();\n auto root = tree.root;\n if (identifier != \"\") {\n root = tree.get_node(identifier);\n }\n \n Tree copy = create_tree_from_newick_string (get_newick_string(tree, root, true, true));\n\n std::vector dfs1;\n std::vector dfs2;\n\n static tbb::affinity_partitioner ap;\n tbb::parallel_for(tbb::blocked_range(0, 2),\n [&](tbb::blocked_range r) {\n for (size_t k=r.begin(); k(0, dfs1.size()),\n [&](tbb::blocked_range r) {\n for (size_t k=r.begin(); kclade_annotations.resize(n1->clade_annotations.size());\n for (size_t i=0; iclade_annotations.size(); i++) {\n n2->clade_annotations[i] = n1->clade_annotations[i];\n }\n for (auto m: n1->mutations) {\n Mutation m2 = m.copy();\n n2->add_mutation(m2);\n }\n }\n }, ap);\n\n size_t num_condensed_nodes = static_cast(tree.condensed_nodes.size());\n tbb::parallel_for( tbb::blocked_range(0, num_condensed_nodes),\n [&](tbb::blocked_range r) {\n for (size_t idx = r.begin(); idx < r.end(); idx++) {\n auto cn = tree.condensed_nodes.begin(); \n std::advance(cn, idx);\n copy.condensed_nodes.insert(std::pair>(cn->first, std::vector(cn->second.size())));\n for (size_t k = 0; k < cn->second.size(); k++) {\n copy.condensed_nodes[cn->first][k] = cn->second[k];\n copy.condensed_leaves.insert(cn->second[k]);\n }\n }\n }, ap);\n\n return copy;\n}\n\n// Get the last common ancestor of two node identifiers. Return NULL if does not\n// exist\nMutation_Annotated_Tree::Node* Mutation_Annotated_Tree::LCA (const Mutation_Annotated_Tree::Tree& tree, const std::string& nid1, const std::string& nid2) {\n TIMEIT();\n \n if ((tree.get_node(nid1) == NULL) || (tree.get_node(nid2) == NULL)) {\n return NULL;\n }\n\n auto n2_ancestors = tree.rsearch(nid2, true);\n\n for (auto anc1: tree.rsearch(nid1, true)) {\n for (auto anc2: n2_ancestors) {\n if (anc1 == anc2) {\n return anc1;\n }\n }\n }\n\n return NULL;\n}\n\n// Extract the subtree consisting of the specified set of samples. This routine\n// maintains the internal node names of the input tree. Mutations are copied\n// from the tree such that the path of mutations from root to the sample is\n// same as the original tree.\nMutation_Annotated_Tree::Tree Mutation_Annotated_Tree::get_subtree (const Mutation_Annotated_Tree::Tree& tree, const std::vector& samples) {\n TIMEIT();\n Tree subtree;\n\n // Set of leaf and internal nodes corresponding to the subtree\n tbb::concurrent_unordered_set subtree_nodes;\n // Maintain a set of all ancestors of a sample for each sample\n std::vector> all_ancestors(samples.size());\n\n static tbb::affinity_partitioner ap;\n tbb::parallel_for(tbb::blocked_range(0, samples.size()),\n [&](tbb::blocked_range r) {\n for (size_t k=r.begin(); k(0, samples.size()),\n [&](tbb::blocked_range r) {\n for (size_t i=r.begin(); i last_subtree_node;\n for (auto n: dfs) {\n // If the node is in subtree_nodes, it should be added to the subtree\n if (subtree_nodes.find(n) != subtree_nodes.end()) {\n Node* subtree_parent = NULL;\n if (last_subtree_node.size() > 0) {\n while (!tree.is_ancestor(last_subtree_node.top()->identifier, n->identifier)) {\n last_subtree_node.pop();\n }\n subtree_parent = last_subtree_node.top();\n }\n // Add as root of the subtree\n if (subtree_parent == NULL) {\n // for root node, need to size the annotations vector\n Node* new_node = subtree.create_node(n->identifier, -1.0, num_annotations);\n // need to assign any clade annotations which would belong to that root as well\n for (size_t k = 0; k < num_annotations; k++) {\n if (n->clade_annotations[k] != \"\") {\n new_node->clade_annotations[k] = n->clade_annotations[k];\n }\n }\n std::vector root_to_node = tree.rsearch(n->identifier, true); \n std::reverse(root_to_node.begin(), root_to_node.end());\n root_to_node.emplace_back(n);\n\n for (auto curr: root_to_node) {\n for (auto m: curr->mutations) {\n new_node->add_mutation(m);\n }\n }\n }\n // Add to the parent identified\n else {\n Node* new_node = subtree.create_node(n->identifier, subtree_parent->identifier);\n\n auto par_to_node = tree.rsearch(n->identifier, true);\n std::reverse(par_to_node.begin(), par_to_node.end());\n par_to_node.erase(par_to_node.begin(), std::find(par_to_node.begin(), par_to_node.end(), subtree_parent)+1);\n\n\n for (auto curr: par_to_node) {\n for (size_t k = 0; k < num_annotations; k++) {\n if (curr->clade_annotations[k] != \"\") {\n new_node->clade_annotations[k] = curr->clade_annotations[k];\n }\n }\n for (auto m: curr->mutations) {\n new_node->add_mutation(m);\n }\n }\n }\n last_subtree_node.push(n);\n }\n }\n\n subtree.curr_internal_node = tree.curr_internal_node;\n\n return subtree;\n}\n\nvoid Mutation_Annotated_Tree::clear_tree(Mutation_Annotated_Tree::Tree& T) {\n for (auto n: T.depth_first_expansion()) {\n delete(n);\n }\n}\n", "meta": {"hexsha": "102e7a092081b58d37f2ac38a5147d4af33ffd04", "size": 45733, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mutation_annotated_tree.cpp", "max_stars_repo_name": "lgozasht/usher", "max_stars_repo_head_hexsha": "4afe8e59a5d60ee9516a52111c1852b2d6fb53df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mutation_annotated_tree.cpp", "max_issues_repo_name": "lgozasht/usher", "max_issues_repo_head_hexsha": "4afe8e59a5d60ee9516a52111c1852b2d6fb53df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mutation_annotated_tree.cpp", "max_forks_repo_name": "lgozasht/usher", "max_forks_repo_head_hexsha": "4afe8e59a5d60ee9516a52111c1852b2d6fb53df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3340840841, "max_line_length": 253, "alphanum_fraction": 0.5329849343, "num_tokens": 10801, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5544704649604273, "lm_q2_score": 0.35936414516010196, "lm_q1q2_score": 0.19925680465702822}} {"text": "#include \n#include \n#include \n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n#include \n\n#include \"./comparerLogic.h\"\n\nusing namespace std;\n\nusing namespace nil::crypto3::zk::components;\nusing namespace nil::crypto3::zk::snark;\n\n\nushort INVALID_PROOF_RETURN_CODE = 200;\n\nstd::string convert_byteblob_to_hex_string(std::vector blob) {\n // convert byte_blob to hex string and print it to output\n std::string hex;\n hex.reserve(blob.size() * 2);\n boost::algorithm::hex(blob.begin(), blob.end(), back_inserter(hex));\n return hex;\n}\n\nvoid save_byteblob(std::vector byteblob, boost::filesystem::path fname) {\n boost::filesystem::ofstream out(fname);\n for (const auto &v : byteblob) {\n out << v;\n }\n out.close();\n}\n\nstd::vector load_byteblob(boost::filesystem::path fname) {\n boost::filesystem::ifstream stream(fname, std::ios::in | std::ios::binary);\n std::vector contents((std::istreambuf_iterator(stream)), std::istreambuf_iterator());\n if (contents.size() == 0) {\n throw std::ios_base::failure(\"Empty file\");\n }\n return contents;\n}\n\n\n// proving key\n\nvoid save_proving_key(scheme_type::proving_key_type pk, boost::filesystem::path fname) {\n std::vector byteblob = nil::marshalling::verifier_input_serializer_tvm::process(pk);\n save_byteblob(byteblob, fname);\n}\n\nscheme_type::proving_key_type load_proving_key(boost::filesystem::path fname) {\n std::vector byteblob = load_byteblob(fname);\n nil::marshalling::status_type processingStatus = nil::marshalling::status_type::success;\n return nil::marshalling::verifier_input_deserializer_tvm::proving_key_process(\n byteblob.cbegin(),\n byteblob.cend(),\n processingStatus);\n}\n\n// verification key\n\nvoid save_verification_key(scheme_type::verification_key_type vk, boost::filesystem::path fname) {\n std::vector byteblob = nil::marshalling::verifier_input_serializer_tvm::process(vk);\n save_byteblob(byteblob, fname);\n}\n\nscheme_type::verification_key_type load_verification_key(boost::filesystem::path fname) {\n std::vector byteblob = load_byteblob(fname);\n nil::marshalling::status_type processingStatus = nil::marshalling::status_type::success;\n return nil::marshalling::verifier_input_deserializer_tvm::verification_key_process(\n byteblob.cbegin(),\n byteblob.cend(),\n processingStatus);\n}\n\n// proof\n\nvoid save_proof(scheme_type::proof_type proof, boost::filesystem::path fname) {\n std::vector byteblob = nil::marshalling::verifier_input_serializer_tvm::process(proof);\n save_byteblob(byteblob, fname);\n}\n\nscheme_type::proof_type load_proof(boost::filesystem::path fname) {\n std::vector byteblob = load_byteblob(fname);\n nil::marshalling::status_type processingStatus = nil::marshalling::status_type::success;\n return nil::marshalling::verifier_input_deserializer_tvm::proof_process(\n byteblob.cbegin(),\n byteblob.cend(),\n processingStatus);\n}\n\n\n// primary input\n\nvoid save_primary_input(zk::snark::r1cs_primary_input primary_input, boost::filesystem::path fname) {\n std::vector byteblob = nil::marshalling::verifier_input_serializer_tvm::process(primary_input);\n save_byteblob(byteblob, fname);\n}\n\nzk::snark::r1cs_primary_input load_primary_input(boost::filesystem::path fname) {\n std::vector byteblob = load_byteblob(fname);\n nil::marshalling::status_type processingStatus = nil::marshalling::status_type::success;\n return nil::marshalling::verifier_input_deserializer_tvm::primary_input_process(\n byteblob.cbegin(),\n byteblob.cend(),\n processingStatus);\n}\n\n\nint setup_keys(boost::filesystem::path pk_path, boost::filesystem::path vk_path) {\n\n blueprint bp;\n ComparerLogic comparerLogic(bp);\n comparerLogic.generate_r1cs_constraints(bp);\n\n cout << \"Blueprint size: \" << bp.num_variables() << endl;\n cout << \"Generating constraint system...\" << endl;\n const r1cs_constraint_system constraint_system = bp.get_constraint_system();\n cout << \"Number of R1CS constraints: \" << constraint_system.num_constraints() << endl;\n\n cout << \"Generating keypair...\" << endl;\n scheme_type::keypair_type keypair = generate(constraint_system);\n\n cout << \"Saving proving key to a file \" << pk_path<< endl;\n save_proving_key(keypair.first, pk_path);\n\n cout << \"Saving verification key to a file \" << vk_path << endl;\n save_verification_key(keypair.second, vk_path);\n\n return 0;\n}\n\nint create_proof(boost::filesystem::path pk_path, boost::filesystem::path proof_path, boost::filesystem::path pi_path, int minYear, int maxYear, int year) {\n\n cout << \"Loading proving key from a file \" << pk_path << endl;\n typename scheme_type::proving_key_type pk = load_proving_key(pk_path);\n\n blueprint bp;\n ComparerLogic comparerLogic(bp);\n\n cout << \"Generating constraint system...\" << endl;\n comparerLogic.generate_r1cs_constraints(bp);\n\n cout << \"Generating witness...\" << endl;\n comparerLogic.generate_r1cs_witness(bp, minYear, maxYear, year);\n\n cout << \"Blueprint is satisfied: \" << bp.is_satisfied() << endl;\n\n if (!bp.is_satisfied()) {\n return INVALID_PROOF_RETURN_CODE;\n }\n\n cout << \"Generating proof...\" << endl;\n const scheme_type::proof_type proof = prove(pk, bp.primary_input(), bp.auxiliary_input());\n\n cout << \"Saving proof to file \" << proof_path << endl;\n save_proof(proof, proof_path);\n\n cout << \"Saving primary input to file \" << pi_path << endl;\n save_primary_input(bp.primary_input(), pi_path);\n return 0;\n}\n\nint verify_proof(boost::filesystem::path proof_path, boost::filesystem::path vk_path, boost::filesystem::path pi_path) {\n\n cout << \"Loading proof from a file \" << proof_path << endl;\n typename scheme_type::proof_type proof = load_proof(proof_path);\n\n cout << \"Loading primary input from a file \" << pi_path << endl;\n r1cs_primary_input input = load_primary_input(pi_path);\n\n cout << \"Loading verification key from a file \" << vk_path << endl;\n typename scheme_type::verification_key_type vk = load_verification_key(vk_path);\n\n // verify\n using basic_proof_system = r1cs_gg_ppzksnark;\n const bool verified = verify(vk, input, proof);\n cout << \"Verification status: \" << verified << endl;\n\n return verified ? 0 : INVALID_PROOF_RETURN_CODE;\n}\n\nint main(int argc, char *argv[]) {\n int maxYear, minYear, year;\n boost::filesystem::path pk_path, vk_path, proof_path, pi_path;\n // bool hexFlag;\n\n boost::program_options::options_description options(\"CLI Proof Generator\");\n options.add_options()\n // (\"hex,h\", boost::program_options::bool_switch(&hexFlag), \"print only hex proof to output\")\n (\"minYear,minYear\", boost::program_options::value(&minYear)->default_value(0))\n (\"maxYear,maxYear\", boost::program_options::value(&maxYear)->default_value(100))\n (\"year,year\", boost::program_options::value(&year)->default_value(18))\n (\"proving-key-path,pk\", boost::program_options::value(&pk_path)->default_value(\"proving.key\"))\n (\"verification-key-path,vk\", boost::program_options::value(&vk_path)->default_value(\"verification.key\"))\n (\"proof-path,p\", boost::program_options::value(&proof_path)->default_value(\"proof\"))\n (\"primary-input-path,pi\", boost::program_options::value(&pi_path)->default_value(\"primary.input\"));\n\n boost::program_options::variables_map vm;\n boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(options).run(), vm);\n boost::program_options::notify(vm);\n\n cout << setprecision(16);\n\n if (!argv[1]) {\n cout << \"Please select a command: [setup/prove/verify]\" << endl;\n return 0;\n }\n else if (string(argv[1]) == \"setup\") {\n // Generate proving.key & verification.key\n return setup_keys(pk_path, vk_path);\n } else if (string(argv[1]) == \"prove\") {\n return create_proof(pk_path, proof_path, pi_path, minYear, maxYear, year);\n } else if (string(argv[1]) == \"verify\") {\n return verify_proof(proof_path, pi_path, vk_path);\n }\n\n return 0;\n}\n\n", "meta": {"hexsha": "1d889df6e208be6bd21c6658dea0e5cd212821f8", "size": 9043, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "snark-logic/bin/main.cpp", "max_stars_repo_name": "podlodkin/podlodkin-freeton-year-control", "max_stars_repo_head_hexsha": "e394c11f2414804d2fbde93a092ae589d4359739", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snark-logic/bin/main.cpp", "max_issues_repo_name": "podlodkin/podlodkin-freeton-year-control", "max_issues_repo_head_hexsha": "e394c11f2414804d2fbde93a092ae589d4359739", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snark-logic/bin/main.cpp", "max_forks_repo_name": "podlodkin/podlodkin-freeton-year-control", "max_forks_repo_head_hexsha": "e394c11f2414804d2fbde93a092ae589d4359739", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-08-31T06:27:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T06:27:19.000Z", "avg_line_length": 39.6622807018, "max_line_length": 156, "alphanum_fraction": 0.7160234435, "num_tokens": 2225, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5544704649604274, "lm_q2_score": 0.3593641451601019, "lm_q1q2_score": 0.19925680465702822}} {"text": "#define _LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS\n#include \"matching.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace cv;\nusing namespace std;\nusing namespace gsl;\n\nnamespace {\n\nstruct descriptor_stat_data {\n descriptor_stat_data() = default;\n\n void insert_matches(gsl::span matches,\n int descriptor_count) noexcept {\n lock_guard l{_mutex};\n transform(begin(matches), end(matches),\n back_inserter(_global_minimal_distances),\n [](const DMatch& m) { return m.distance; });\n _total_descriptors += descriptor_count;\n }\n\n pair, int64_t> extract() noexcept {\n lock_guard l{_mutex};\n pair, int64_t> p{move(_global_minimal_distances),\n _total_descriptors};\n _global_minimal_distances = vector();\n _total_descriptors = 0UL;\n return p;\n }\n\n private:\n mutex _mutex;\n vector _global_minimal_distances GUARDED_BY(_mutex);\n int64_t _total_descriptors GUARDED_BY(_mutex) = 0L;\n};\n\nclass matching {\n public:\n matching(descriptor_stat_data& accumulated_data,\n NormTypes norm_to_use,\n bool crosscheck,\n string_view input_pattern,\n optional output_pattern,\n optional original_files) noexcept\n : accumulated_data{accumulated_data}\n , matcher{BFMatcher::create(norm_to_use, crosscheck)}\n , input_pattern{input_pattern}\n , output_pattern{output_pattern}\n , original_images{original_files} {\n // XOR is true if both operands have the same value.\n Expects(!(output_pattern.has_value() ^ original_images.has_value()) &&\n \"Either both or none are set\");\n }\n\n void operator()(int idx,\n optional> /*keypoints*/, // NOLINT\n optional descriptors) noexcept {\n Expects(descriptors.has_value());\n if (descriptors->rows == 0)\n return;\n\n try {\n const int previous_idx = idx - 1;\n const FileStorage previous_img = sens_loc::io::open_feature_file(\n fmt::format(input_pattern, previous_idx));\n Mat previous_descriptors =\n sens_loc::io::load_descriptors(previous_img);\n\n vector matches;\n matcher->match(*descriptors, previous_descriptors, matches);\n accumulated_data.insert_matches(matches, descriptors->rows);\n\n // Plot the matching between the descriptors of the previous and the\n // current frame.\n if (output_pattern) {\n const vector previous_keypoints =\n sens_loc::io::load_keypoints(previous_img);\n\n const FileStorage this_feature =\n sens_loc::io::open_feature_file(\n fmt::format(input_pattern, idx));\n const vector this_keypoints =\n sens_loc::io::load_keypoints(this_feature);\n\n const string img_p1 = fmt::format(*original_images, idx - 1);\n const string img_p2 = fmt::format(*original_images, idx);\n auto img1 = sens_loc::io::load_as_8bit_gray(img_p1);\n auto img2 = sens_loc::io::load_as_8bit_gray(img_p2);\n\n if (!img1 || !img2)\n return;\n\n Mat out_img;\n drawMatches(img2->data(), this_keypoints, img1->data(),\n previous_keypoints, matches, out_img,\n Scalar(0, 0, 255), Scalar(255, 0, 0));\n\n const string output = fmt::format(*output_pattern, idx);\n imwrite(output, out_img);\n }\n } catch (...) {\n std::cerr << sens_loc::util::err{}\n << \"Could not initialize data for idx: \" << idx << \"\\n\";\n return;\n }\n }\n\n size_t postprocess(const optional& stat_file,\n const optional& matched_distance_histo) {\n auto [distances, total_descriptors] = accumulated_data.extract();\n if (distances.empty())\n return 0UL;\n\n sort(begin(distances), end(distances));\n const auto dist_bins = 25;\n sens_loc::analysis::distance distance_stat{distances, dist_bins};\n\n if (stat_file) {\n cv::FileStorage stat_out{*stat_file,\n cv::FileStorage::WRITE |\n cv::FileStorage::FORMAT_YAML};\n stat_out.writeComment(\n \"The following values contain the results of the statistical \"\n \"analysis for descriptor distance to the closest descriptor \"\n \"after matching\");\n write(stat_out, \"match_distance\", distance_stat.get_statistic());\n stat_out.release();\n } else {\n cout << \"==== Match Distances\\n\"\n << \"total count: \" << total_descriptors << \"\\n\"\n << \"matched count: \" << distances.size() << \"\\n\"\n << \"matched/total: \"\n << narrow_cast(distances.size()) /\n narrow_cast(total_descriptors)\n << \"\\n\"\n << \"min: \" << distance_stat.min() << \"\\n\"\n << \"max: \" << distance_stat.max() << \"\\n\"\n << \"median: \" << distance_stat.median() << \"\\n\"\n << \"mean: \" << distance_stat.mean() << \"\\n\"\n << \"Variance: \" << distance_stat.variance() << \"\\n\"\n << \"StdDev: \" << distance_stat.stddev() << \"\\n\"\n << \"Skewness: \" << distance_stat.skewness() << \"\\n\";\n }\n if (matched_distance_histo) {\n std::ofstream gnuplot_data{*matched_distance_histo};\n gnuplot_data << sens_loc::io::to_gnuplot(distance_stat.histogram())\n << std::endl;\n } else {\n cout << distance_stat.histogram() << \"\\n\";\n }\n return distances.size();\n }\n\n private:\n descriptor_stat_data& accumulated_data;\n\n Ptr matcher;\n string_view input_pattern;\n optional output_pattern;\n optional original_images;\n};\n} // namespace\n\nnamespace sens_loc::apps {\nint analyze_matching(util::processing_input in,\n NormTypes norm_to_use,\n bool crosscheck,\n const optional& stat_file,\n const optional& matched_distance_histo,\n const optional& output_pattern,\n const optional& original_files) {\n Expects(in.start < in.end && \"Matching requires at least 2 images\");\n using visitor = statistic_visitor;\n descriptor_stat_data data;\n auto analysis_v = visitor{/*input_pattern=*/in.input_pattern,\n /*accumulated_data=*/data,\n /*norm_to_use=*/norm_to_use,\n /*crosscheck=*/crosscheck,\n /*input_pattern=*/in.input_pattern,\n /*output_pattern=*/output_pattern,\n /*original_files=*/original_files};\n\n auto f = parallel_visitation(\n in.start + 1, // Because two consecutive images are matched, the first\n // index is skipped. This requires \"backwards\" matching.\n in.end, analysis_v);\n size_t n_elements = f.postprocess(stat_file, matched_distance_histo);\n\n return n_elements > 0UL ? 0 : 1;\n}\n} // namespace sens_loc::apps\n", "meta": {"hexsha": "753b10e79cbd22ced74beb8bd928d30d349fd554", "size": 8629, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/apps/feature_performance/matching.cpp", "max_stars_repo_name": "JonasToth/depth-conversions", "max_stars_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-09-30T07:09:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T09:14:35.000Z", "max_issues_repo_path": "src/apps/feature_performance/matching.cpp", "max_issues_repo_name": "JonasToth/depth-conversions", "max_issues_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/apps/feature_performance/matching.cpp", "max_forks_repo_name": "JonasToth/depth-conversions", "max_forks_repo_head_hexsha": "5c8338276565d846c07673e83f94f6841006872b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8957345972, "max_line_length": 80, "alphanum_fraction": 0.5541777726, "num_tokens": 1736, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5544704649604274, "lm_q2_score": 0.35936413829896496, "lm_q1q2_score": 0.19925680085273043}} {"text": "/**\n *****************************************************************************\n * @author This file is part of tinyram_snark, developed by SCIPR Lab\n * and contributors (see AUTHORS).\n * @copyright MIT license (see LICENSE file)\n *****************************************************************************/\n#include \n#include \n#ifndef MINDEPS\n#include \n#endif\n\n#include \n#include \n#include \n\n#ifndef MINDEPS\nnamespace po = boost::program_options;\n\nbool process_prover_command_line(const int argc, const char** argv,\n std::string &processed_assembly_fn,\n std::string &proving_key_fn,\n std::string &primary_input_fn,\n std::string &auxiliary_input_fn,\n std::string &proof_fn)\n{\n try\n {\n po::options_description desc(\"Usage\");\n desc.add_options()\n (\"help\", \"print this help message\")\n (\"processed_assembly\", po::value(&processed_assembly_fn)->required())\n (\"proving_key\", po::value(&proving_key_fn)->required())\n (\"primary_input\", po::value(&primary_input_fn)->required())\n (\"auxiliary_input\", po::value(&auxiliary_input_fn)->required())\n (\"proof\", po::value(&proof_fn)->required());\n\n po::variables_map vm;\n po::store(po::parse_command_line(argc, argv, desc), vm);\n\n if (vm.count(\"help\"))\n {\n std::cout << desc << \"\\n\";\n return false;\n }\n\n po::notify(vm);\n }\n catch(std::exception& e)\n {\n std::cerr << \"Error: \" << e.what() << \"\\n\";\n return false;\n }\n\n return true;\n}\n#endif\n\nusing namespace tinyram_snark;\n\nint main(int argc, const char * argv[])\n{\n default_tinyram_ppzksnark_pp::init_public_params();\n\n#ifdef MINDEPS\n std::string processed_assembly_fn = \"processed_assembly.txt\";\n std::string proving_key_fn = \"proving_key.txt\";\n std::string primary_input_fn = \"primary_input.txt\";\n std::string auxiliary_input_fn = \"auxiliary_input.txt\";\n std::string proof_fn = \"proof.txt\";\n#else\n std::string processed_assembly_fn;\n std::string proving_key_fn;\n std::string primary_input_fn;\n std::string auxiliary_input_fn;\n std::string proof_fn;\n\n if (!process_prover_command_line(argc, argv, processed_assembly_fn,\n proving_key_fn, primary_input_fn, auxiliary_input_fn, proof_fn))\n {\n return 1;\n }\n#endif\n libff::start_profiling();\n\n /* load everything */\n libff::enter_block(\"Deserialize proving key\");\n ram_ppzksnark_proving_key pk;\n std::ifstream pk_file(proving_key_fn);\n pk_file >> pk;\n pk_file.close();\n libff::leave_block(\"Deserialize proving key\");\n\n std::ifstream processed(processed_assembly_fn);\n tinyram_program program = load_preprocessed_program(pk.ap, processed);\n\n std::ifstream f_primary_input(primary_input_fn);\n std::ifstream f_auxiliary_input(auxiliary_input_fn);\n tinyram_input_tape primary_input = load_tape(f_primary_input);\n tinyram_input_tape auxiliary_input = load_tape(f_auxiliary_input);\n\n const ram_boot_trace boot_trace = tinyram_boot_trace_from_program_and_input(pk.ap, pk.primary_input_size_bound, program, primary_input);\n const ram_ppzksnark_proof proof = ram_ppzksnark_prover(pk, boot_trace, auxiliary_input);\n\n libff::enter_block(\"Serialize proof\");\n std::ofstream proof_file(proof_fn);\n proof_file << proof;\n proof_file.close();\n libff::leave_block(\"Serialize proof\");\n}\n", "meta": {"hexsha": "9c6250531311e4189fe6674ebd19e5a2710962b7", "size": 4047, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tinyram_snark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover.cpp", "max_stars_repo_name": "alittlehorse/osprey", "max_stars_repo_head_hexsha": "22f290a7de3413a847e3dc33c96328752cc37f47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2021-03-04T08:28:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T11:50:40.000Z", "max_issues_repo_path": "tinyram_snark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover.cpp", "max_issues_repo_name": "alittlehorse/osprey", "max_issues_repo_head_hexsha": "22f290a7de3413a847e3dc33c96328752cc37f47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tinyram_snark/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover.cpp", "max_forks_repo_name": "alittlehorse/osprey", "max_forks_repo_head_hexsha": "22f290a7de3413a847e3dc33c96328752cc37f47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-05-22T15:50:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-22T15:50:15.000Z", "avg_line_length": 36.4594594595, "max_line_length": 170, "alphanum_fraction": 0.6419570052, "num_tokens": 929, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5544704649604273, "lm_q2_score": 0.35936413143782797, "lm_q1q2_score": 0.19925679704843258}} {"text": "#ifndef DQN_HPP_\r\n#define DQN_HPP_\r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\nnamespace dqn\r\n{\r\nconstexpr auto kRawFrameHeight = 210;\r\nconstexpr auto kRawFrameWidth = 160;\r\nconstexpr auto kCroppedFrameSize = 84;\r\nconstexpr auto kCroppedFrameDataSize = kCroppedFrameSize * kCroppedFrameSize;\r\nconstexpr auto kOutputCount = 18;\r\n\r\nconstexpr auto last_action_layer_name = \"last_action_input_layer\";\r\n\r\nconstexpr auto frames_layer_name = \"frames_input_layer\";\r\nconstexpr auto cont_layer_name = \"cont_input_layer\";\r\nconstexpr auto target_layer_name = \"target_input_layer\";\r\nconstexpr auto filter_layer_name = \"filter_input_layer\";\r\n\r\nconstexpr auto train_last_action_blob_name = \"actions\";\r\nconstexpr auto test_last_action_blob_name = \"last_actions\";\r\n\r\nconstexpr auto train_frames_blob_name = \"frames\";\r\nconstexpr auto test_frames_blob_name = \"all_frames\";\r\n\r\nconstexpr auto target_blob_name = \"target\";\r\nconstexpr auto filter_blob_name = \"filter\";\r\nconstexpr auto cont_blob_name = \"cont\";\r\nconstexpr auto q_values_blob_name = \"q_values\";\r\n\r\nconstexpr auto ip1Size = 512;\r\nconstexpr auto lstmSize = 512;\r\n\r\nusing LastAction = std::vector;\r\nusing LastActionBatch = std::vector;\r\n\r\nusing FrameData = std::array;\r\nusing FrameDataSp = std::shared_ptr;\r\nusing InputFrames = std::vector;\r\nusing InputFramesBatch = std::vector;\r\nusing Transition = std::tuple >;\r\nusing Episode = std::vector;\r\nusing ReplayMemory = std::deque;\r\nusing MemoryLayer = caffe::MemoryDataLayer;\r\nusing FrameVec = std::vector;\r\n\r\nusing ActionValue = std::pair;\r\nusing SolverSp = std::shared_ptr>;\r\nusing NetSp = boost::shared_ptr>;\r\n\r\n/**\r\n * Deep Q-Network\r\n */\r\nclass DQN\r\n{\r\npublic:\r\n DQN(const ActionVect& legal_actions,\r\n const int replay_memory_capacity,\r\n const double gamma,\r\n const int clone_frequency,\r\n const int unroll,\r\n const int minibatch_size,\r\n const int frames_per_timestep);\r\n\r\n // Initialize DQN. Must be called before calling any other method.\r\n void Initialize(caffe::SolverParameter& solver_param);\r\n\r\n // Create the caffe net .prototxt\r\n caffe::NetParameter CreateNet(bool unroll1_is_lstm);\r\n\r\n // Load a trained model from a file.\r\n void LoadTrainedModel(const std::string& model_file);\r\n\r\n // Restore solving from a solver file.\r\n void RestoreSolver(const std::string& solver_file);\r\n\r\n // Snapshot the model/solver/replay memory. Produces files:\r\n // snapshot_prefix_iter_N.[caffemodel|solverstate|replaymem]. Optionally\r\n // removes snapshots that share the same prefix but have a lower\r\n // iteration number.\r\n void Snapshot(const std::string& snapshot_prefix, bool remove_old=false,\r\n bool snapshot_memory=true);\r\n\r\n // A specialized method for producing a high-score\r\n // snapshot. Optionally remove older HiScore snapshots\r\n void SnapshotHiScore(const std::string& snapshot_prefix,\r\n double avg_score, double std_dev,\r\n bool remove_old=true);\r\n\r\n // Select an action by epsilon-greedy. If cont is false, LSTM state\r\n // will be reset. cont should be true only at start of new episodes.\r\n Action SelectAction(const InputFrames& frames, const LastAction& last_action, double epsilon, bool cont);\r\n\r\n // Select a batch of actions by epsilon-greedy.\r\n ActionVect SelectActions(const InputFramesBatch& frames_batch,\r\n\t\t\t\t\t\t\t const LastActionBatch& last_action_batch,\r\n double epsilon, bool cont);\r\n\r\n // Add an episode to the replay memory\r\n void RememberEpisode(const Episode& episode);\r\n\r\n // Update DQN. Returns the number of solver steps executed.\r\n/// int UpdateSequential();\r\n // Updates from a random minibatch of experiences\r\n int UpdateRandom();\r\n\r\n // Clear the replay memory\r\n void ClearReplayMemory();\r\n\r\n // Save the replay memory to a gzipped compressed file\r\n void SnapshotReplayMemory(const std::string& filename);\r\n\r\n // Load the replay memory from a gzipped compressed file\r\n void LoadReplayMemory(const std::string& filename);\r\n\r\n // Get the number of episodes stored in the replay memory\r\n int memory_episodes() const\r\n {\r\n return replay_memory_.size();\r\n }\r\n\r\n // Get the number of transitions store in the replay memory\r\n int memory_size() const\r\n {\r\n return replay_memory_size_;\r\n }\r\n\r\n // Return the current iteration of the solver\r\n int current_iteration() const\r\n {\r\n return solver_->iter();\r\n }\r\n\r\n void CloneTestNet()\r\n {\r\n CloneNet(*test_net_);\r\n }\r\n\r\n // Benchmark the speed of the learning by doing some number of\r\n // iterations of updates and selects. random_updates toggles\r\n // random/sequential updating.\r\n //void Benchmark(int iterations, bool random_updates);\r\n\r\n // Obscures the screen by zeroing everything with a given probability.\r\n void ObscureScreen(FrameDataSp& screen, double obscure_prob);\r\n // Re-Display the last seen screen with probability prob\r\n void RedisplayScreen(FrameDataSp& screen, double prob);\r\n\r\n // Returns the number of transitions in the last episode added to\r\n // the memory or 0 if the memory is empty.\r\n int GetLastEpisodeSize();\r\n\r\nprotected:\r\n // Clone the given net and store the result in clone_net_\r\n void CloneNet(caffe::Net& net);\r\n\r\n // Given a set of input frames and a network, select an\r\n // action. Returns the action and the estimated Q-Value.\r\n\tActionValue SelectActionGreedily(caffe::Net& net,\r\n const InputFrames& last_frames,\r\n const Action& last_action,\r\n bool cont);\r\n\r\n // Given a vector of frames, return a batch of selected actions + values.\r\n\tstd::vector SelectActionGreedily(caffe::Net& net,\r\n\t\t\t\t\t\t\t\t\t\t\t\t const InputFramesBatch& frames_batch,\r\n const LastActionBatch& last_action_batch,\r\n bool cont);\r\n\r\n // Input data into the Frames/Target/Filter layers of the given\r\n // net. This must be done before forward is called.\r\n void InputDataIntoLayers(caffe::Net& net,\r\n float* frames_input,\r\n\t\t\t\t\t\t\t float* last_action_input,\r\n float* cont_input,\r\n float* target_input,\r\n float* filter_input);\r\n\r\nprotected:\r\n int unroll_; // Number of steps to unroll recurrent layers\r\n int minibatch_size_; // Size of each minibatch\r\n int frames_per_timestep_; // History of frames given at each timestep\r\n int frames_per_forward_; // Number of frames needed by each forward\r\n\r\n // Size of the input blobs to the memory layers\r\n int frame_input_size_TRAIN_, target_input_size_TRAIN_,\r\n filter_input_size_TRAIN_, cont_input_size_TRAIN_,\r\n\t\tlast_action_input_size_TRAIN_;\r\n int frame_input_size_TEST_, cont_input_size_TEST_,\r\n\t\tlast_action_input_size_TEST_;\r\n\r\n const ActionVect legal_actions_;\r\n const int replay_memory_capacity_;\r\n const double gamma_;\r\n const int clone_frequency_; // How often (steps) the clone_net is updated\r\n int replay_memory_size_; // Number of transitions in replay memory\r\n ReplayMemory replay_memory_;\r\n SolverSp solver_;\r\n NetSp net_; // The primary network used for action selection.\r\n NetSp test_net_; // Net used for testing\r\n NetSp clone_net_; // Clone used to generate targets.\r\n int last_clone_iter_; // Iteration in which the net was last cloned\r\n std::mt19937 random_engine;\r\n float smoothed_loss_;\r\n std::vector last_displayed_screen_; // Used in RedisplayScreen\r\n};\r\n\r\n/**\r\n * Returns a vector of filenames matching a given regular expression.\r\n */\r\nstd::vector FilesMatchingRegexp(const std::string& regexp);\r\n\r\n/**\r\n * Removes snapshots starting with snapshot_prefix that have an\r\n * iteration less than min_iter. Does not remove high-score snapshots.\r\n */\r\nvoid RemoveSnapshots(const std::string& snapshot_prefix, int min_iter);\r\n\r\n/**\r\n * Look for the latest snapshot to resume from. Returns a string\r\n * containing the path to the .solverstate. Returns empty string if\r\n * none is found. Will only return if the snapshot contains all of:\r\n * .solverstate,.caffemodel,.replaymemory\r\n */\r\nstd::string FindLatestSnapshot(const std::string& snapshot_prefix);\r\n/**\r\n * Returns a list of high score snapshots\r\n */\r\nstd::vector GetHiScoreSnapshots(const std::string& snapshot_prefix);\r\n\r\n/**\r\n * Look for the best HiScore matching the given snapshot prefix\r\n */\r\nfloat FindHiScore(const std::string& snapshot_prefix);\r\n\r\n/**\r\n * Remove all high-score snapshots matching the given snapshot prefix\r\n */\r\nvoid RemoveHiScoreSnapshots(const std::string& snapshot_prefix);\r\n\r\n/**\r\n * Preprocess an ALE screen (downsampling & grayscaling). Optionally\r\n * obscure the screen to make ALE into a POMDP.\r\n */\r\nFrameDataSp PreprocessScreen(const ALEScreen& raw_screen);\r\n\r\n}\r\n\r\n#endif /* DQN_HPP_ */\r\n", "meta": {"hexsha": "8a14cdfe101ffb293e899216b2b8c254b79d7555", "size": 9684, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "dqn.hpp", "max_stars_repo_name": "bit1029public/ADRQN", "max_stars_repo_head_hexsha": "c2390359599dadd5a5077e9ec4df11ed1a2a891c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-12-06T02:50:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-06T02:50:13.000Z", "max_issues_repo_path": "dqn.hpp", "max_issues_repo_name": "bit1029public/ADRQN", "max_issues_repo_head_hexsha": "c2390359599dadd5a5077e9ec4df11ed1a2a891c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dqn.hpp", "max_forks_repo_name": "bit1029public/ADRQN", "max_forks_repo_head_hexsha": "c2390359599dadd5a5077e9ec4df11ed1a2a891c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2461538462, "max_line_length": 110, "alphanum_fraction": 0.6829822387, "num_tokens": 2034, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.3923368301671084, "lm_q1q2_score": 0.19923329715255295}} {"text": "// Boost.Geometry\n\n// Copyright (c) 2021, Oracle and/or its affiliates.\n\n// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle\n\n// Licensed under the Boost Software License version 1.0.\n// http://www.boost.org/users/license.html\n\n#ifndef BOOST_GEOMETRY_STRATEGIES_AZIMUTH_GEOGRAPHIC_HPP\n#define BOOST_GEOMETRY_STRATEGIES_AZIMUTH_GEOGRAPHIC_HPP\n\n\n// TODO: move this file to boost/geometry/strategy\n#include \n\n#include \n#include \n\n\nnamespace boost { namespace geometry\n{\n\nnamespace strategies { namespace azimuth\n{\n\ntemplate\n<\n typename FormulaPolicy = strategy::andoyer,\n typename Spheroid = srs::spheroid,\n typename CalculationType = void\n>\nclass geographic : strategies::detail::geographic_base\n{\n using base_t = strategies::detail::geographic_base;\n\npublic:\n geographic()\n : base_t()\n {}\n\n explicit geographic(Spheroid const& spheroid)\n : base_t(spheroid)\n {}\n\n auto azimuth() const\n {\n return strategy::azimuth::geographic\n <\n FormulaPolicy, Spheroid, CalculationType\n >(base_t::m_spheroid);\n }\n};\n\n\nnamespace services\n{\n\ntemplate \nstruct default_strategy\n{\n using type = strategies::azimuth::geographic<>;\n};\n\n\ntemplate \nstruct strategy_converter >\n{\n static auto get(strategy::azimuth::geographic const& strategy)\n {\n return strategies::azimuth::geographic(strategy.model());\n }\n};\n\n} // namespace services\n\n}} // namespace strategies::azimuth\n\n}} // namespace boost::geometry\n\n#endif // BOOST_GEOMETRY_STRATEGIES_AZIMUTH_GEOGRAPHIC_HPP\n", "meta": {"hexsha": "a4fac5e60c7f5d8e15f12b7e08d6bc92598b9bb4", "size": 1916, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/strategies/azimuth/geographic.hpp", "max_stars_repo_name": "Harshitha91/Tmdb-react-native-node", "max_stars_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 326.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T13:47:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T02:13:59.000Z", "max_issues_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/strategies/azimuth/geographic.hpp", "max_issues_repo_name": "Harshitha91/Tmdb-react-native-node", "max_issues_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 623.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T23:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:15:23.000Z", "max_forks_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/geometry/strategies/azimuth/geographic.hpp", "max_forks_repo_name": "Harshitha91/Tmdb-react-native-node", "max_forks_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-01-14T15:50:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:58:36.000Z", "avg_line_length": 23.3658536585, "max_line_length": 77, "alphanum_fraction": 0.7212943633, "num_tokens": 464, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.3923368301671084, "lm_q1q2_score": 0.19923329715255295}} {"text": "/*\n\nCopyright (c) 2005-2017, University of Oxford.\nAll rights reserved.\n\nUniversity of Oxford means the Chancellor, Masters and Scholars of the\nUniversity of Oxford, having an administrative office at Wellington\nSquare, Oxford OX1 2JD, UK.\n\nThis file is part of Chaste.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of the University of Oxford nor the names of its\n contributors may be used to endorse or promote products derived from this\n software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\nGOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n*/\n#ifndef DYNAMICALLY_LOADABLE_LR91_HPP_\n#define DYNAMICALLY_LOADABLE_LR91_HPP_\n\n#include \"ChasteSerialization.hpp\"\n#include \n\n#include \"AbstractCardiacCell.hpp\"\n#include \"AbstractStimulusFunction.hpp\"\n#include \"AbstractDynamicallyLoadableEntity.hpp\"\n#include \n\n/**\n * This class represents the Luo-Rudy 1991 system of equations,\n * with support for being compiled into a .so and loaded at run-time.\n */\nclass DynamicallyLoadableLr91 : public AbstractCardiacCell, public AbstractDynamicallyLoadableEntity\n{\nprivate:\n /** Needed for serialization. */\n friend class boost::serialization::access;\n /**\n * Archive the member variables.\n *\n * @param archive\n * @param version\n */\n template\n void serialize(Archive & archive, const unsigned int version)\n {\n archive & boost::serialization::base_object(*this);\n archive & boost::serialization::base_object(*this);\n }\n\n /* Constants for the model */\n\n /** membrane capcaitance*/\n static const double membrane_C;\n /** Faraday constant*/\n static const double membrane_F;\n /** Universal gas constant*/\n static const double membrane_R;\n /** Temeperature*/\n static const double membrane_T;\n /** Reversal potentila for background current*/\n static const double background_current_E_b;\n /** Maximal conductance for background current*/\n static const double background_current_g_b;\n /** Maximal conductance for sodium current*/\n static const double fast_sodium_current_g_Na;\n /** Intracellular potassium concentration*/\n static const double ionic_concentrations_Ki;\n /** Extracellular potassium concentration*/\n static const double ionic_concentrations_Ko;\n /** Intracellular sodium concentration*/\n static const double ionic_concentrations_Nai;\n /** Extracellular sodium concentration*/\n static const double ionic_concentrations_Nao;\n /** Maximal conductance for plateau current*/\n static const double plateau_potassium_current_g_Kp;\n /** Permeability ratio Na/K for potassium currents*/\n static const double time_dependent_potassium_current_PR_NaK;\n\n /** Another parameter, which is a function of the above */\n double fast_sodium_current_E_Na;\n\n /**\n * Range-checking on the current values of the state variables. Make sure\n * all gating variables have are within zero and one, and all concentrations\n * are positive\n */\n void VerifyStateVariables();\n\npublic:\n /**\n * Constructor\n *\n * @param pSolver is a pointer to the ODE solver\n * @param pIntracellularStimulus is a pointer to the intracellular stimulus\n */\n DynamicallyLoadableLr91(boost::shared_ptr pSolver,\n boost::shared_ptr pIntracellularStimulus);\n\n /**\n * Destructor\n */\n ~DynamicallyLoadableLr91();\n\n /**\n * Fill in a vector representing the RHS of the Luo-Rudy 1991 system\n * of Odes at each time step, y' = [y1' ... yn'].\n * Some ODE solver will call this function repeatedly to solve for y = [y1 ... yn].\n *\n * @param time the current time, in milliseconds\n * @param rY current values of the state variables\n * @param rDY to be filled in with derivatives\n */\n void EvaluateYDerivatives(double time, const std::vector &rY, std::vector &rDY);\n\n /**\n * Returns the ionic current\n *\n * @param pStateVariables optional state at which to evaluate the current\n * @return the total ionic current\n */\n double GetIIonic(const std::vector* pStateVariables=NULL);\n\n /**\n * Get the intracellular calcium concentration\n *\n * @return the intracellular calcium concentration\n */\n double GetIntracellularCalciumConcentration();\n};\n\n#include \"SerializationExportWrapper.hpp\"\nCHASTE_CLASS_EXPORT(DynamicallyLoadableLr91)\n\nnamespace boost\n{\nnamespace serialization\n{\n/**\n * Allow us to not need a default constructor, by specifying how Boost should\n * instantiate a DynamicallyLoadableLr91 instance.\n */\ntemplate\ninline void save_construct_data(\n Archive & ar, const DynamicallyLoadableLr91 * t, const unsigned int file_version)\n{\n const boost::shared_ptr p_solver = t->GetSolver();\n const boost::shared_ptr p_stimulus = t->GetStimulusFunction();\n ar << p_solver;\n ar << p_stimulus;\n}\n\n/**\n * Allow us to not need a default constructor, by specifying how Boost should\n * instantiate a DynamicallyLoadableLr91 instance (using existing constructor).\n *\n * NB this constructor allocates memory for the other member variables too.\n */\ntemplate\ninline void load_construct_data(\n Archive & ar, DynamicallyLoadableLr91 * t, const unsigned int file_version)\n{\n\n boost::shared_ptr p_solver;\n boost::shared_ptr p_stimulus;\n ar >> p_solver;\n ar >> p_stimulus;\n ::new(t)DynamicallyLoadableLr91(p_solver, p_stimulus);\n}\n}\n} // namespace ...\n\n#endif // DYNAMICALLY_LOADABLE_LR91_HPP_\n", "meta": {"hexsha": "db29541aa4ab790498a2f7dea676522c73a5284e", "size": 6939, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "heart/dynamic/DynamicallyLoadableLr91.hpp", "max_stars_repo_name": "gonayl/Chaste", "max_stars_repo_head_hexsha": "498c48489a38a8f4c5fa7c01e691cc82df3d2e6b", "max_stars_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "heart/dynamic/DynamicallyLoadableLr91.hpp", "max_issues_repo_name": "gonayl/Chaste", "max_issues_repo_head_hexsha": "498c48489a38a8f4c5fa7c01e691cc82df3d2e6b", "max_issues_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "heart/dynamic/DynamicallyLoadableLr91.hpp", "max_forks_repo_name": "gonayl/Chaste", "max_forks_repo_head_hexsha": "498c48489a38a8f4c5fa7c01e691cc82df3d2e6b", "max_forks_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3298429319, "max_line_length": 100, "alphanum_fraction": 0.7410289667, "num_tokens": 1553, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.3923368301671084, "lm_q1q2_score": 0.19923329715255295}} {"text": "#include \"pch_bullet.h\"\n#include \n#include \n#include \"Foregrounds.h\"\n#include \"CausalityApplication.h\"\n#include \"Common\\PrimitiveVisualizer.h\"\n#include \"Common\\Extern\\cpplinq.hpp\"\n#include \n\nusing namespace Causality;\nusing namespace DirectX;\nusing namespace DirectX::Scene;\nusing namespace std;\nusing namespace Platform;\nusing namespace Eigen;\nusing namespace concurrency;\nusing namespace DirectX::Visualizers;\nextern wstring ResourcesDirectory;\n\n//std::unique_ptr HandPhysicalModel::s_pCylinder;\n//std::unique_ptr HandPhysicalModel::s_pSphere;\n\nconst static wstring SkyBoxTextures[6] = {\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Right.dds\"),\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Left.dds\"),\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Top.dds\"),\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Bottom.dds\"),\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Front.dds\"),\n\tResourcesDirectory + wstring(L\"Textures\\\\SkyBox\\\\GrimmNight\\\\Back.dds\"),\n};\n\n//std::unique_ptr pBroadphase = nullptr;\n//// Set up the collision configuration and dispatcher\n//std::unique_ptr pCollisionConfiguration = nullptr;\n//std::unique_ptr pDispatcher = nullptr;\n//// The actual physics solver\n//std::unique_ptr pSolver = nullptr;\n\nstd::queue> WorldBranch::BranchPool;\n\n\nfloat ShapeSimiliarity(const Eigen::VectorXf& v1, const Eigen::VectorXf& v2)\n{\n\tauto v = v1 - v2;\n\t//dis = sqrt(v.dot(v);\n\t//1.414 - dis;\n\tauto theta = XMScalarACosEst(v1.dot(v2) * XMScalarReciprocalSqrtEst(v1.dot(v1) * v2.dot(v2))); // Difference in angular [0,pi/4]\n\tauto rhlo = abs(sqrt(v1.dot(v1)) - sqrtf(v2.dot(v2)));\t// Difference in length [0,sqrt(2)]\n\treturn 1.0f - 0.5f * (0.3f * rhlo / sqrtf(2.0f) + 0.7f * theta / (XM_PIDIV4));\n}\n\nCausality::WorldScene::WorldScene(const std::shared_ptr& pResouce, const DirectX::ILocatable* pCamera)\n\t: States(pResouce->GetD3DDevice())\n\t, m_pCameraLocation(pCamera)\n{\n\tm_HaveHands = false;\n\tm_showTrace = true;\n\tLoadAsync(pResouce->GetD3DDevice());\n}\n\nWorldScene::~WorldScene()\n{\n}\n\nconst float fingerRadius = 0.006f;\nconst float fingerLength = 0.02f;\n\nclass XmlModelLoader\n{\n};\n\nWorldBranch::WorldBranch()\n{\n\tpBroadphase.reset(new btDbvtBroadphase());\n\n\t// Set up the collision configuration and dispatcher\n\tpCollisionConfiguration.reset(new btDefaultCollisionConfiguration());\n\tpDispatcher.reset(new btCollisionDispatcher(pCollisionConfiguration.get()));\n\n\t// The actual physics solver\n\tpSolver.reset(new btSequentialImpulseConstraintSolver());\n\t// The world.\n\tpDynamicsWorld.reset(new btDiscreteDynamicsWorld(pDispatcher.get(), pBroadphase.get(), pSolver.get(), pCollisionConfiguration.get()));\n\tpDynamicsWorld->setGravity(btVector3(0, -1.0f, 0));\n\n\tIsEnabled = false;\n}\n\nvoid Causality::WorldScene::LoadAsync(ID3D11Device* pDevice)\n{\n\n\tm_loadingComplete = false;\n\tpBackground = nullptr;\n\n\t//CD3D11_DEFAULT d;\n\t//CD3D11_RASTERIZER_DESC Desc(d);\n\t//Desc.MultisampleEnable = TRUE;\n\t//ThrowIfFailed(pDevice->CreateRasterizerState(&Desc, &pRSState));\n\n\tconcurrency::task load_models([this, pDevice]() {\n\t\t{\n\t\t\tlock_guard guard(m_RenderLock);\n\t\t\tWorldBranch::InitializeBranchPool(1);\n\t\t\tWorldTree = WorldBranch::DemandCreate(\"Root\");\n\n\t\t\t//std::vector subjectTrans(30);\n\t\t\t//subjectTrans.resize(20);\n\t\t\t//for (size_t i = 0; i < 20; i++)\n\t\t\t//{\n\t\t\t//\tsubjectTrans[i].Scale = XMVectorReplicate(1.1f + 0.15f * i);// XMMatrixTranslation(0, 0, i*(-150.f));\n\t\t\t//}\n\t\t\t//WorldTree->Fork(subjectTrans);\n\n\t\t\tWorldTree->Enable(DirectX::AffineTransform::Identity());\n\t\t}\n\n\t\t//m_pFramesPool.reset(new WorldBranchPool);\n\t\t//m_pFramesPool->Initialize(30);\n\t\t//{\n\t\t//\tlock_guard guard(m_RenderLock);\n\t\t//\tfor (size_t i = 0; i < 30; i++)\n\t\t//\t{\n\t\t//\t\tm_StateFrames.push_back(m_pFramesPool->DemandCreate());\n\t\t//\t\tauto pFrame = m_StateFrames.back();\n\t\t//\t\t//pFrame->Initialize();\n\t\t//\t\tpFrame->SubjectTransform.Scale = XMVectorReplicate(1.0f + 0.1f * i);// XMMatrixTranslation(0, 0, i*(-150.f));\n\t\t//\t}\n\t\t//\tm_StateFrames.front()->Enable(DirectX::AffineTransform::Identity());\n\t\t//}\n\n\t\tauto Directory = App::Current()->GetResourcesDirectory();\n\t\tauto ModelDirectory = Directory / \"Models\";\n\t\tauto TextureDirectory = Directory / \"Textures\";\n\t\tauto texDir = TextureDirectory.wstring();\n\t\tpEffect = std::make_shared(pDevice);\n\t\tpEffect->SetVertexColorEnabled(false);\n\t\tpEffect->SetTextureEnabled(true);\n\t\t//pEffect->SetLightingEnabled(true);\n\t\tpEffect->EnableDefaultLighting();\n\t\t{\n\t\t\tvoid const* shaderByteCode;\n\t\t\tsize_t byteCodeLength;\n\t\t\tpEffect->GetVertexShaderBytecode(&shaderByteCode, &byteCodeLength);\n\t\t\tpInputLayout = CreateInputLayout(pDevice, shaderByteCode, byteCodeLength);\n\t\t}\n\n\t\t//pBackground = std::make_unique(pDevice, SkyBoxTextures);\n\n\t\tauto sceneFile = Directory / \"Foregrounds.xml\";\n\t\ttinyxml2::XMLDocument sceneDoc;\n\t\tsceneDoc.LoadFile(sceneFile.string().c_str());\n\t\tauto scene = sceneDoc.FirstChildElement(\"scene\");\n\t\tauto node = scene->FirstChildElement();\n\t\twhile (node)\n\t\t{\n\t\t\tif (!strcmp(node->Name(), \"obj\"))\n\t\t\t{\n\t\t\t\tauto path = node->Attribute(\"src\");\n\t\t\t\tif (path != nullptr && strlen(path) != 0)\n\t\t\t\t{\n\t\t\t\t\tauto pModel = std::make_shared();\n\t\t\t\t\tGeometryModel::CreateFromObjFile(pModel.get(), pDevice, (ModelDirectory / path).wstring(), texDir);\n\n\t\t\t\t\tXMFLOAT3 v = pModel->BoundOrientedBox.Extents;\n\t\t\t\t\tv.y /= v.x;\n\t\t\t\t\tv.z /= v.x;\n\t\t\t\t\tm_ModelFeatures[pModel->Name] = Eigen::Vector2f(v.y, v.z);\n\t\t\t\t\tstd::cout << \"[Model] f(\" << pModel->Name << \") = \" << m_ModelFeatures[pModel->Name] << std::endl;\n\n\t\t\t\t\tfloat scale = 1.0f;\n\t\t\t\t\tfloat mass = 1.0f;\n\t\t\t\t\tVector3 pos;\n\n\t\t\t\t\tauto attr = node->Attribute(\"scale\");\n\t\t\t\t\tif (attr != nullptr)\n\t\t\t\t\t{\n\t\t\t\t\t\tstringstream ss(attr);\n\n\t\t\t\t\t\tss >> scale;\n\t\t\t\t\t\t//model->SetScale(XMVectorReplicate(scale));\n\t\t\t\t\t}\n\t\t\t\t\tattr = node->Attribute(\"position\");\n\t\t\t\t\tif (attr != nullptr)\n\t\t\t\t\t{\n\t\t\t\t\t\tstringstream ss(attr);\n\n\t\t\t\t\t\tchar ch;\n\t\t\t\t\t\tss >> pos.x >> ch >> pos.y >> ch >> pos.z;\n\t\t\t\t\t\t//model->SetPosition(pos);\n\t\t\t\t\t}\n\n\t\t\t\t\tattr = node->Attribute(\"mass\");\n\t\t\t\t\tif (attr)\n\t\t\t\t\t\tmass = (float) atof(attr);\n\n\t\t\t\t\tAddObject(pModel, mass, pos, DirectX::Quaternion::Identity, DirectX::Vector3(scale));\n\n\t\t\t\t\t//auto pShape = model->CreateCollisionShape();\n\t\t\t\t\t//pShape->setLocalScaling(btVector3(scale, scale, scale));\n\t\t\t\t\t//btVector3 minb, maxb;\n\t\t\t\t\t//model->InitializePhysics(pDynamicsWorld, pShape, mass, pos, XMQuaternionIdentity());\n\t\t\t\t\t//model->GetBulletRigid()->setFriction(1.0f);\n\t\t\t\t\t//{\n\t\t\t\t\t//\tstd::lock_guard guard(m_RenderLock);\n\t\t\t\t\t//\tModels.push_back(model);\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (!strcmp(node->Name(), \"cube\"))\n\t\t\t{\n\t\t\t\tVector3 extent(1.0f);\n\t\t\t\tVector3 pos;\n\t\t\t\tColor color(255, 255, 255, 255);\n\t\t\t\tstring name(\"cube\");\n\t\t\t\tfloat mass = 1.0f;\n\t\t\t\tauto attr = node->Attribute(\"extent\");\n\t\t\t\tif (attr != nullptr)\n\t\t\t\t{\n\t\t\t\t\tstringstream ss(attr);\n\t\t\t\t\tchar ch;\n\t\t\t\t\tss >> extent.x >> ch >> extent.y >> ch >> extent.z;\n\t\t\t\t}\n\t\t\t\tattr = node->Attribute(\"position\");\n\t\t\t\tif (attr != nullptr)\n\t\t\t\t{\n\t\t\t\t\tstringstream ss(attr);\n\n\t\t\t\t\tchar ch;\n\t\t\t\t\tss >> pos.x >> ch >> pos.y >> ch >> pos.z;\n\t\t\t\t}\n\t\t\t\tattr = node->Attribute(\"color\");\n\t\t\t\tif (attr != nullptr)\n\t\t\t\t{\n\t\t\t\t\tstringstream ss(attr);\n\n\t\t\t\t\tchar ch;\n\t\t\t\t\tss >> color.x >> ch >> color.y >> ch >> color.z;\n\t\t\t\t\tif (!ss.eof())\n\t\t\t\t\t\tss >> ch >> color.w;\n\t\t\t\t\tcolor = color.ToVector4() / 255;\n\t\t\t\t\tcolor.Saturate();\n\t\t\t\t}\n\t\t\t\tattr = node->Attribute(\"name\");\n\t\t\t\tif (attr)\n\t\t\t\t\tname = attr;\n\n\t\t\t\tattr = node->Attribute(\"mass\");\n\t\t\t\tif (attr)\n\t\t\t\t\tmass = (float) atof(attr);\n\n\t\t\t\tauto pModel = make_shared(name, extent, (XMVECTOR) color);\n\n\t\t\t\tXMFLOAT3 v = pModel->BoundOrientedBox.Extents;\n\t\t\t\tv.y /= v.x;\n\t\t\t\tv.z /= v.x;\n\t\t\t\tm_ModelFeatures[pModel->Name] = Eigen::Vector2f(v.y, v.z);\n\n\t\t\t\tAddObject(pModel, mass, pos, DirectX::Quaternion::Identity, DirectX::Vector3::One);\n\t\t\t\t//auto pShape = pModel->CreateCollisionShape();\n\t\t\t\t//pModel->InitializePhysics(nullptr, pShape, mass, pos);\n\t\t\t\t//pModel->Enable(pDynamicsWorld);\n\t\t\t\t//pModel->GetBulletRigid()->setFriction(1.0f);\n\t\t\t\t//{\n\t\t\t\t//\tstd::lock_guard guard(m_RenderLock);\n\t\t\t\t//\tModels.push_back(pModel);\n\t\t\t\t//}\n\t\t\t}\n\t\t\tnode = node->NextSiblingElement();\n\t\t}\n\n\t\tm_loadingComplete = true;\n\t});\n}\n\nvoid Causality::WorldScene::SetViewIdenpendntCameraPosition(const DirectX::ILocatable * pCamera)\n{\n\tm_pCameraLocation = pCamera;\n}\n\nvoid Causality::WorldScene::Render(ID3D11DeviceContext * pContext)\n{\n\tif (pBackground)\n\t\tpBackground->Render(pContext);\n\n\t{\n\t\tpContext->IASetInputLayout(pInputLayout.Get());\n\t\tauto pAWrap = States.AnisotropicWrap();\n\t\tpContext->PSSetSamplers(0, 1, &pAWrap);\n\t\tpContext->RSSetState(pRSState.Get());\n\t\tstd::lock_guard guard(m_RenderLock);\n\t\t\n\t\tBoundingOrientedBox modelBox;\n\t\tusing namespace cpplinq;\n\n\t\t// Render models\n\t\tfor (const auto& model : Models)\n\t\t{\n\t\t\tauto superposition = ModelStates[model->Name];\n\t\t\tfor (const auto& state : superposition)\n\t\t\t{\n\t\t\t\tauto mat = state.TransformMatrix();\n\t\t\t\tmodel->LocalMatrix = state.TransformMatrix(); //.first->GetRigidTransformMatrix();\n\t\t\t\tmodel->Opticity = state.Probability; //state.second;\n\n\t\t\t\tmodel->BoundOrientedBox.Transform(modelBox, mat);\n\t\t\t\t// Render if in the view frustum\n\n\t\t\t\tif (ViewFrutum.Contains(modelBox) != ContainmentType::DISJOINT)\n\t\t\t\t\tmodel->Render(pContext, pEffect.get());\n\t\t\t}\n\t\t}\n\n\t\tfor (const auto& branch : WorldTree->leaves())\n\t\t{\n\t\t\t\t//Subjects\n\t\t\t\tfor (const auto& item : branch.Subjects)\n\t\t\t\t{\n\t\t\t\t\tif (item.second)\n\t\t\t\t\t{\n\t\t\t\t\t\titem.second->Opticity = branch.Liklyhood();\n\t\t\t\t\t\titem.second->Render(pContext, nullptr);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t}\n\t}\n\n\n\n\tg_PrimitiveDrawer.Begin();\n\n\tVector3 conners[8];\n\n\tViewFrutum.GetCorners(conners);\n\tDrawBox(conners, Colors::Pink);\n\tBoundingOrientedBox obox;\n\tBoundingBox box;\n\t{\n\t\t//Draw axias\n\t\tDrawAxis();\n\t\t//g_PrimitiveDrawer.DrawQuad({ 1.0f,0,1.0f }, { -1.0f,0,1.0f }, { -1.0f,0,-1.0f }, { 1.0f,0,-1.0f }, Colors::Pink);\n\n\n\t\tauto& fh = m_HandDescriptionFeature;\n\n\t\t{\n\t\t\tstd::lock_guard guard(m_RenderLock);\n\t\t\tauto s = Models.size();\n\t\t\tif (m_HaveHands)\n\t\t\t\tstd::cout << \"Detail Similarity = {\";\n\t\t\tfor (size_t i = 0; i < s; i++)\n\t\t\t{\n\t\t\t\tconst auto& model = Models[i];\n\t\t\t\tobox = model->GetOrientedBoundingBox();\n\t\t\t\tif (ViewFrutum.Contains(obox) != ContainmentType::DISJOINT)\n\t\t\t\t{\n\t\t\t\t\tobox.GetCorners(conners);\n\t\t\t\t\tDrawBox(conners, DirectX::Colors::DarkGreen);\n\t\t\t\t}\n\t\t\t\tif (m_HaveHands)\n\t\t\t\t{\n\t\t\t\t\tauto fm = m_ModelFeatures[model->Name];\n\n\t\t\t\t\tauto similarity = ShapeSimiliarity(fm, fh);\n\t\t\t\t\tm_ModelDetailSimilarity[model->Name] = similarity;\n\t\t\t\t\tstd::cout << model->Name << ':' << similarity << \" , \";\n\t\t\t\t\tColor c = Color::Lerp({ 1,0,0 }, { 0,1,0 }, similarity);\n\t\t\t\t\tfor (size_t i = 0; i < 8; i++)\n\t\t\t\t\t{\n\t\t\t\t\t\tg_PrimitiveDrawer.DrawSphere(conners[i], 0.005f * similarity, c);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tauto pModel = dynamic_cast(model.get());\n\t\t\t\tXMMATRIX transform = model->GetWorldMatrix();\n\t\t\t\tif (pModel)\n\t\t\t\t{\n\t\t\t\t\tfor (const auto& part : pModel->Parts)\n\t\t\t\t\t{\n\t\t\t\t\t\tpart->BoundOrientedBox.Transform(obox, transform);\n\t\t\t\t\t\tif (ViewFrutum.Intersects(obox))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tobox.GetCorners(conners);\n\t\t\t\t\t\t\tDrawBox(conners, DirectX::Colors::Orange);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (m_HaveHands)\n\t\t\tstd::cout << '}' << std::endl;\n\t}\n\n\tif (m_HaveHands)\n\t{\n\n\t\t//for (auto& pRigid : m_HandRigids)\n\t\t//{\n\t\t//\tg_PrimitiveDrawer.DrawSphere(pRigid->GetPosition(), 0.01f, Colors::Pink);\n\t\t//}\n\n\t\tauto pCamera = App::Current()->GetPrimaryCamera();\n\n\t\tXMMATRIX leap2world = m_FrameTransform;// XMMatrixScalingFromVector(XMVectorReplicate(0.001f)) * XMMatrixTranslation(0,0,-1.0) * XMMatrixRotationQuaternion(pCamera->GetOrientation()) * XMMatrixTranslationFromVector((XMVECTOR)pCamera->GetPosition());\n\t\tstd::lock_guard guard(m_HandFrameMutex);\n\t\tfor (const auto& hand : m_Frame.hands())\n\t\t{\n\t\t\tauto palmPosition = XMVector3Transform(hand.palmPosition().toVector3(), leap2world);\n\t\t\tg_PrimitiveDrawer.DrawSphere(palmPosition, 0.02f, Colors::YellowGreen);\n\t\t\t//for (const auto& finger : hand.fingers())\n\t\t\t//{\n\t\t\t//\tfor (size_t i = 0; i < 4; i++)\n\t\t\t//\t{\n\t\t\t//\t\tconst auto & bone = finger.bone((Leap::Bone::Type)i);\n\t\t\t//\t\tXMVECTOR bJ = XMVector3Transform(bone.prevJoint().toVector3(), leap2world);\n\t\t\t//\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\t\t\t//\t\t//if (i == 0)\n\t\t\t//\t\t//\tg_PrimitiveDrawer.DrawSphere(bJ, 0.01f, DirectX::Colors::Lime);\n\n\t\t\t//\t\t//// The unit of leap is millimeter\n\t\t\t//\t\t//g_PrimitiveDrawer.DrawSphere(eJ, 0.01f, DirectX::Colors::Lime);\n\t\t\t//\t\tg_PrimitiveDrawer.DrawLine(bJ, eJ, DirectX::Colors::White);\n\t\t\t//\t}\n\t\t\t//}\n\t\t}\n\n\t\t// NOT VALIAD!~!!!!!\n\t\t//if (m_HandTrace.size() > 0 && m_showTrace)\n\t\t//{\n\t\t//\tstd::lock_guard guard(m_RenderLock);\n\t\t//\t//auto pJoints = m_HandTrace.linearize();\n\t\t//\tm_CurrentHandBoundingBox.GetCorners(conners);\n\t\t//\tDrawBox(conners, Colors::LimeGreen);\n\t\t//\tm_HandTraceBoundingBox.GetCorners(conners);\n\t\t//\tDrawBox(conners, Colors::YellowGreen);\n\t\t//\tm_HandTraceModel.Primitives.clear();\n\t\t//\tfor (int i = m_HandTrace.size() - 1; i >= std::max(0, (int) m_HandTrace.size() - TraceLength); i--)\n\t\t//\t{\n\t\t//\t\tconst auto& h = m_HandTrace[i];\n\t\t//\t\tfloat radius = (i + 1 - std::max(0, m_HandTrace.size() - TraceLength)) / (std::min(m_HandTrace.size(), TraceLength));\n\t\t//\t\tfor (size_t j = 0; j < h.size(); j++)\n\t\t//\t\t{\n\t\t//\t\t\t//m_HandTraceModel.Primitives.emplace_back(h[j], 0.02f);\n\t\t//\t\t\tg_PrimitiveDrawer.DrawSphere(h[j], 0.005f * radius, Colors::LimeGreen);\n\t\t//\t\t}\n\t\t//\t}\n\t\t//}\n\t}\n\tg_PrimitiveDrawer.End();\n\n\t//if (m_HandTrace.size() > 0)\n\t//{\n\t//\tif (!pBatch)\n\t//\t{\n\t//\t\tpBatch = std::make_unique>(pContext, 204800,40960);\n\t//\t}\n\t//\tm_HandTraceModel.SetISO(0.33333f);\n\t//\tm_HandTraceModel.Update();\n\t//\tm_HandTraceModel.Tessellate(m_HandTraceVertices, m_HandTraceIndices, 0.005f);\n\t//\tpBatch->Begin();\n\t//\tpEffect->SetDiffuseColor(Colors::LimeGreen);\n\t//\t//pEffect->SetEmissiveColor(Colors::LimeGreen);\n\t//\tpEffect->SetTextureEnabled(false);\n\t//\tpEffect->SetWorld(XMMatrixIdentity());\n\t//\tpEffect->Apply(pContext);\n\t//\tpBatch->DrawIndexed(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST, m_HandTraceIndices.data(), m_HandTraceIndices.size(), m_HandTraceVertices.data(), m_HandTraceVertices.size());\n\t//\tpBatch->End();\n\t//}\n\n}\n\nvoid Causality::WorldScene::DrawAxis()\n{\n\tg_PrimitiveDrawer.DrawSphere({ 0,0,0,0.02 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawLine({ -5,0,0 }, { 5,0,0 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawLine({ 0,-5,0 }, { 0,5,0 }, Colors::Green);\n\tg_PrimitiveDrawer.DrawLine({ 0,0,-5 }, { 0,0,5 }, Colors::Blue);\n\tg_PrimitiveDrawer.DrawTriangle({ 5.05f,0,0 }, { 4.95,0.05,0 }, { 4.95,-0.05,0 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawTriangle({ 5.05f,0,0 }, { 4.95,-0.05,0 }, { 4.95,0.05,0 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawTriangle({ 5.05f,0,0 }, { 4.95,0,0.05 }, { 4.95,0,-0.05 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawTriangle({ 5.05f,0,0 }, { 4.95,0,-0.05 }, { 4.95,0,0.05 }, Colors::Red);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,5.05f,0 }, { -0.05,4.95,0 }, { 0.05,4.95,0 }, Colors::Green);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,5.05f,0 }, { 0.05,4.95,0 }, { -0.05,4.95,0 }, Colors::Green);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,5.05f,0 }, { 0.0,4.95,-0.05 }, { 0,4.95,0.05 }, Colors::Green);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,5.05f,0 }, { 0.0,4.95,0.05 }, { 0,4.95,-0.05 }, Colors::Green);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,0,5.05f }, { 0.05,0,4.95 }, { -0.05,0,4.95 }, Colors::Blue);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,0,5.05f }, { -0.05,0,4.95 }, { 0.05,0,4.95 }, Colors::Blue);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,0,5.05f }, { 0,0.05,4.95 }, { 0,-0.05,4.95 }, Colors::Blue);\n\tg_PrimitiveDrawer.DrawTriangle({ 0,0,5.05f }, { 0,-0.05,4.95 }, { 0,0.05,4.95 }, Colors::Blue);\n\n}\n\nvoid Causality::WorldScene::DrawBox(DirectX::SimpleMath::Vector3 conners [], DirectX::CXMVECTOR color)\n{\n\tg_PrimitiveDrawer.DrawLine(conners[0], conners[1], color);\n\tg_PrimitiveDrawer.DrawLine(conners[1], conners[2], color);\n\tg_PrimitiveDrawer.DrawLine(conners[2], conners[3], color);\n\tg_PrimitiveDrawer.DrawLine(conners[3], conners[0], color);\n\n\tg_PrimitiveDrawer.DrawLine(conners[3], conners[7], color);\n\tg_PrimitiveDrawer.DrawLine(conners[2], conners[6], color);\n\tg_PrimitiveDrawer.DrawLine(conners[1], conners[5], color);\n\tg_PrimitiveDrawer.DrawLine(conners[0], conners[4], color);\n\n\tg_PrimitiveDrawer.DrawLine(conners[4], conners[5], color);\n\tg_PrimitiveDrawer.DrawLine(conners[5], conners[6], color);\n\tg_PrimitiveDrawer.DrawLine(conners[6], conners[7], color);\n\tg_PrimitiveDrawer.DrawLine(conners[7], conners[4], color);\n\n}\n\nvoid XM_CALLCONV Causality::WorldScene::UpdateViewMatrix(DirectX::FXMMATRIX view, DirectX::CXMMATRIX projection)\n{\n\tif (pEffect)\n\t{\n\t\tpEffect->SetView(view);\n\t\tpEffect->SetProjection(projection);\n\t}\n\tif (pBackground)\n\t{\n\t\tpBackground->UpdateViewMatrix(view,projection);\n\t}\n\tg_PrimitiveDrawer.SetView(view);\n\tg_PrimitiveDrawer.SetProjection( projection);\n\t\n\t// BoundingFrustum is assumpt Left-Handed\n\tBoundingFrustumExtension::CreateFromMatrixRH(ViewFrutum, projection);\n\t//BoundingFrustum::CreateFromMatrix(ViewFrutum, projection);\n\t// Fix the RH-projection matrix\n\t//XMStoreFloat4((XMFLOAT4*) &ViewFrutum.RightSlope, -XMLoadFloat4((XMFLOAT4*) &ViewFrutum.RightSlope));\n\t//XMStoreFloat2((XMFLOAT2*) &ViewFrutum.Near, -XMLoadFloat2((XMFLOAT2*) &ViewFrutum.Near));\n\t//ViewFrutum.LeftSlope = -ViewFrutum.LeftSlope;\n\t//ViewFrutum.RightSlope = -ViewFrutum.RightSlope;\n\t//ViewFrutum.TopSlope = -ViewFrutum.TopSlope;\n\t//ViewFrutum.BottomSlope = -ViewFrutum.BottomSlope;\n\t//ViewFrutum.Near = -ViewFrutum.Near;\n\t//ViewFrutum.Far = -ViewFrutum.Far;\n\tXMVECTOR det;\n\tauto invView = view;\n\t//invView.r[2] = -invView.r[2];\n\tinvView = XMMatrixInverse(&det, invView);\n\t//invView.r[2] = -invView.r[2];\n\t//XMVECTOR temp = invView.r[2];\n\t//invView.r[2] = invView.r[1];\n\t//invView.r[1] = temp;\n\tViewFrutum.Transform(ViewFrutum,invView);\n\t// Fix the RH-inv-view-matrix to LH-equalulent by swap row-y with row-z\n\t//XMStoreFloat3(&ViewFrutum.Origin, invView.r[3]);\n\n\t//XMStoreFloat4(&ViewFrutum.Orientation, XMQuaternionRotationMatrix(invView));;\n\n\t//for (const auto& item : m_HandModels)\n\t//{\n\t//\tif (item.second)\n\t//\t\titem.second->UpdateViewMatrix(view);\n\t//}\n}\n\n//void XM_CALLCONV Causality::WorldScene::UpdateProjectionMatrix(DirectX::FXMMATRIX projection)\n//{\n//\tif (pEffect)\n//\t\tpEffect->SetProjection(projection);\n//\tif (pBackground)\n//\t\tpBackground->UpdateProjectionMatrix(projection);\n//\t\n//\tg_PrimitiveDrawer.SetProjection(projection);\n//}\n\nvoid Causality::WorldScene::UpdateAnimation(StepTimer const & timer)\n{\n\t{\n\t\tlock_guard guard(m_RenderLock);\n\t\tusing namespace cpplinq;\n\t\tusing namespace std::placeholders;\n\t\tfloat stepTime = (float) timer.GetElapsedSeconds();\n\t\tWorldTree->Evolution(stepTime, m_Frame, m_FrameTransform);\n\t\tModelStates = WorldTree->CaculateSuperposition();\n\t}\n\n\tif (m_HandTrace.size() > 0)\n\t{\n\t\t{ // Critia section\n\t\t\tstd::lock_guard guard(m_HandFrameMutex);\n\t\t\tconst int plotSize = 45;\n\t\t\tBoundingOrientedBox::CreateFromPoints(m_CurrentHandBoundingBox, m_HandTrace.back().size(), m_HandTrace.back().data(), sizeof(Vector3));\n\t\t\tm_TracePoints.clear();\n\t\t\tColor color = Colors::LimeGreen;\n\t\t\tfor (int i = m_HandTrace.size() - 1; i >= std::max(0, (int) m_HandTrace.size() - plotSize); i--)\n\t\t\t{\n\t\t\t\tconst auto& h = m_HandTrace[i];\n\t\t\t\t//float radius = (i + 1 - std::max(0U, m_HandTrace.size() - plotSize)) / (std::min(m_HandTrace.size(), plotSize));\n\t\t\t\tfor (size_t j = 0; j < h.size(); j++)\n\t\t\t\t{\n\t\t\t\t\t//g_PrimitiveDrawer.DrawSphere(h[j], 0.005 * radius, color);\n\t\t\t\t\tm_TracePoints.push_back(h[j]);\n\t\t\t\t}\n\t\t\t}\n\t\t\t//for (const auto& pModel : Children)\n\t\t\t//{\n\t\t\t//\t//btCollisionWorld::RayResultCallback\n\t\t\t//\tauto pRigid = dynamic_cast(pModel.get());\n\t\t\t//\tpRigid->GetBulletRigid()->checkCollideWith()\n\t\t\t//}\n\t\t\t//auto pBat = dynamic_cast(Children[0].get());\n\t\t\t//pBat->SetPosition(vector_cast(m_Frame.hands().frontmost().palmPosition()));\n\t\t}\n\t\tCreateBoundingOrientedBoxFromPoints(m_HandTraceBoundingBox, m_TracePoints.size(), m_TracePoints.data(), sizeof(Vector3));\n\t\tXMFLOAT3 v = m_HandTraceBoundingBox.Extents;\n\t\tm_HandDescriptionFeature = Eigen::Vector2f(v.y / v.x, v.z / v.x);\n\n\t\t// ASSUMPTION: Extends is sorted from!\n\n\n\t\t//XMMATRIX invTrans = XMMatrixAffineTransformation(g_XMOne / XMVectorReplicate(m_HandTraceBoundingBox.Extents.x), XMVectorZero(), XMQuaternionInverse(XMLoadFloat4(&m_HandTraceBoundingBox.Orientation)), -XMLoadFloat3(&m_HandTraceBoundingBox.Center));\n\t\t//int sampleCount = std::min(m_TraceSamples.size(), TraceLength)*m_TraceSamples[0].size();\n\t\t//for (const auto& model : Children)\n\t\t//{\n\t\t//\tauto pModel = dynamic_cast(model.get());\n\t\t//\tauto inCount = 0;\n\t\t//\tif (pModel)\n\t\t//\t{\n\t\t//\t\tauto obox = model->GetOrientedBoundingBox();\n\t\t//\t\tXMMATRIX fowTrans = XMMatrixAffineTransformation(XMVectorReplicate(obox.Extents.x), XMVectorZero(), XMQuaternionIdentity(), XMVectorZero());\n\t\t//\t\tfowTrans = invTrans * fowTrans;\n\t\t//\t\tauto pSample = m_TraceSamples.back().data() + m_TraceSamples.back().size()-1;\n\t\t//\t\tfor (size_t i = 0; i < sampleCount; i++)\n\t\t//\t\t{\n\t\t//\t\t\tconst auto& point = pSample[-i];\n\t\t//\t\t\tXMVECTOR p = XMVector3Transform(point, fowTrans);\n\t\t//\t\t\tint j;\n\t\t//\t\t\tfor ( j = 0; j < pModel->Parts.size(); j++)\n\t\t//\t\t\t{\n\t\t//\t\t\t\tif (pModel->Parts[j].BoundOrientedBox.Contains(p))\n\t\t//\t\t\t\t\tbreak;\n\t\t//\t\t\t}\n\t\t//\t\t\tif (j >= pModel->Parts.size())\n\t\t//\t\t\t\tinCount++;\n\t\t//\t\t}\n\t\t//\t}\n\t\t//\tm_ModelDetailSimilarity[model->Name] = (float) inCount / (float)sampleCount;\n\t\t//}\n\n\n\n\t}\n\n\t//if (pGroundRigid)\n\t//\tpGroundRigid->setLinearVelocity({ 0,-1.0f,0 });\n\n\n\n\t//pDynamicsWorld->stepSimulation(timer.GetElapsedSeconds(), 10);\n\n\t//for (auto& obj : Children)\n\t//{\n\t//\tauto s = (rand() % 1000) / 1000.0;\n\t//\tobj->Rotate(XMQuaternionRotationRollPitchYaw(0, 0.5f * timer.GetElapsedSeconds(), 0));\n\t//}\n}\n\nvoid Causality::WorldScene::OnHandsTracked(const UserHandsEventArgs & e)\n{\n\tm_HaveHands = true;\n\t//const auto& hand = e.sender.frame().hands().frontmost();\n\t//size_t i = 0;\n\t//\n\t//XMMATRIX leap2world = e.toWorldTransform;\n\t//for (const auto& finger : hand.fingers())\n\t//{\n\t//\tXMVECTOR bJ = XMVector3Transform(finger.bone((Leap::Bone::Type)0).prevJoint().toVector3(), leap2world);\n\t//\tauto pState = m_HandRigids[i]->GetBulletRigid()->getMotionState();\n\t//\tauto transform = btTransform::getIdentity();\n\t//\ttransform.setOrigin(vector_cast(bJ));\n\t//\tif (!pState)\n\t//\t{\n\t//\t\tpState = new btDefaultMotionState(transform);\n\t//\t\tm_HandRigids[i]->GetBulletRigid()->setMotionState(pState);\n\t//\t}\n\t//\telse\n\t//\t\tpState->setWorldTransform(transform);\n\t//\t\n\n\t//\ti++;\n\t//\tfor (size_t boneIdx = 0; boneIdx < 4; boneIdx++) // bone idx\n\t//\t{\n\t//\t\tconst auto & bone = finger.bone((Leap::Bone::Type)boneIdx);\n\t//\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\n\t//\t\tauto pState = m_HandRigids[i]->GetBulletRigid()->getMotionState();\n\t//\t\tauto transform = btTransform::getIdentity();\n\t//\t\ttransform.setOrigin(vector_cast(eJ));\n\t//\t\tif (!pState)\n\t//\t\t{\n\t//\t\t\tpState = new btDefaultMotionState(transform);\n\t//\t\t\tm_HandRigids[i]->GetBulletRigid()->setMotionState(pState);\n\t//\t\t}\n\t//\t\telse\n\t//\t\t\tpState->setWorldTransform(transform);\n\n\t//\t\ti++;\n\t//\t}\n\t//}\n\n\tm_Frame = e.sender.frame();\n\tfor (auto& branch : WorldTree->leaves())\n\t{\n\t\tfor (const auto& hand : m_Frame.hands())\n\t\t{\n\t\t\tauto & subjects = branch.Subjects;\n\t\t\tbranch.AddSubjectiveObject(hand,e.toWorldTransform);\n\t\t\t//if (!subjects[hand.id()])\n\t\t\t//{\n\t\t\t//\tsubjects[hand.id()].reset(\n\t\t\t//\t\tnew HandPhysicalModel(\n\t\t\t//\t\tpFrame->pDynamicsWorld,\n\t\t\t//\t\thand, e.toWorldTransform,\n\t\t\t//\t\tpFrame->SubjectTransform)\n\t\t\t//\t\t);\n\t\t\t//\t//for (const auto& itm : pFrame->Objects)\n\t\t\t//\t//{\n\t\t\t//\t//\tconst auto& pObj = itm.second;\n\t\t\t//\t//\tif (!pObj->GetBulletRigid()->isStaticOrKinematicObject())\n\t\t\t//\t//\t{\n\t\t\t//\t//\t\tfor (const auto& bone : subjects[hand.id()]->Rigids())\n\t\t\t//\t//\t\t\tpObj->GetBulletRigid()->setIgnoreCollisionCheck(bone.get(), false);\n\t\t\t//\t//\t}\n\t\t\t//\t//}\n\t\t\t//}\n\t\t}\n\t}\n\n\t//for (size_t j = 0; j < i; j++)\n\t//{\n\t//\tpDynamicsWorld->addRigidBody(m_HandRigids[j]->GetBulletRigid());\n\t//\tm_HandRigids[j]->GetBulletRigid()->setGravity({ 0,0,0 });\n\t//}\n}\n\nvoid Causality::WorldScene::OnHandsTrackLost(const UserHandsEventArgs & e)\n{\n\tm_Frame = e.sender.frame();\n\tm_FrameTransform = e.toWorldTransform;\n\tif (m_Frame.hands().count() == 0)\n\t{\n\t\tm_HaveHands = false;\n\t\tstd::lock_guard guard(m_HandFrameMutex);\n\t\tm_HandTrace.clear();\n\t\tm_TraceSamples.clear();\n\t\tWorldTree->Collapse();\n\t\t//for (const auto &pRigid : m_HandRigids)\n\t\t//{\n\t\t//\tpDynamicsWorld->removeRigidBody(pRigid->GetBulletRigid());\n\t\t//}\n\t}\n}\n\nvoid Causality::WorldScene::OnHandsMove(const UserHandsEventArgs & e)\n{\n\tstd::lock_guard guard(m_HandFrameMutex);\n\tm_Frame = e.sender.frame();\n\tm_FrameTransform = e.toWorldTransform;\n\tXMMATRIX leap2world = m_FrameTransform;\n\t//std::array joints;\n\tstd::vector handBoxes;\n\tfloat fingerStdev = 0.02f;\n\tstd::random_device rd;\n\tstd::mt19937 gen(rd());\n\tstd::normal_distribution normalDist(0, fingerStdev);\n\tstd::uniform_real uniformDist;\n\n\t// Caculate moving trace\n\tint handIdx = 0;\n\tfor (const auto& hand : m_Frame.hands())\n\t{\n\t\tint fingerIdx = 0; // hand idx\n\t\tm_HandTrace.emplace_back();\n\t\tm_TraceSamples.emplace_back();\n\t\tauto& samples = m_TraceSamples.back();\n\t\tauto& joints = m_HandTrace.back();\n\t\tfor (const auto& finger : hand.fingers())\n\t\t{\n\t\t\tXMVECTOR bJ = XMVector3Transform(finger.bone((Leap::Bone::Type)0).prevJoint().toVector3(), leap2world);\n\t\t\tjoints[fingerIdx * 5] = bJ;\n\t\t\tfor (size_t boneIdx = 0; boneIdx < 4; boneIdx++) // bone idx\n\t\t\t{\n\t\t\t\tconst auto & bone = finger.bone((Leap::Bone::Type)boneIdx);\n\t\t\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\t\t\t\tjoints[fingerIdx * 5 + boneIdx + 1] = eJ;\n\n\n\t\t\t\t//auto dir = eJ - bJ;\n\t\t\t\t//float dis = XMVectorGetX(XMVector3Length(dir));\n\t\t\t\t//if (abs(dis) < 0.001)\n\t\t\t\t//\tcontinue;\n\t\t\t\t//XMVECTOR rot = XMQuaternionRotationVectorToVector(g_XMIdentityR1, dir);\n\t\t\t\t//bJ = eJ;\n\t\t\t\t//for (size_t k = 0; k < 100; k++) // bone idx\n\t\t\t\t//{\n\t\t\t\t//\tfloat x = normalDist(gen);\n\t\t\t\t//\tfloat h = uniformDist(gen);\n\t\t\t\t//\tfloat z = normalDist(gen);\n\t\t\t\t//\tXMVECTOR disp = XMVectorSet(x, h*dis, z, 1);\n\t\t\t\t//\tdisp = XMVector3Rotate(disp, rot);\n\t\t\t\t//\tdisp += bJ;\n\t\t\t\t//\tsamples[(fingerIdx * 4 + boneIdx) * 100 + k] = disp;\n\t\t\t\t//}\n\t\t\t}\n\t\t\tfingerIdx++;\n\t\t}\n\t\thandIdx++;\n\t\twhile (m_HandTrace.size() > 60)\n\t\t{\n\t\t\tm_HandTrace.pop_front();\n\t\t\t//m_TraceSamples.pop_front();\n\t\t}\n\n\t\t// Cone intersection test section\n\t\t//Vector3 rayEnd = XMVector3Transform(hand.palmPosition().toVector3(), leap2world);\n\t\t//Vector3 rayBegin = m_pCameraLocation->GetPosition();\n\t\t//auto pConeShape = new btConeShape(100, XM_PI / 16 * 100);\n\t\t//auto pCollisionCone = new btCollisionObject();\n\t\t//pCollisionCone->setCollisionShape(pConeShape);\n\t\t//btTransform trans(\n\t\t//\tvector_cast(XMQuaternionRotationVectorToVector(g_XMIdentityR1, rayEnd - rayBegin)),\n\t\t//\tvector_cast(rayBegin));\n\t\t//pCollisionCone->setWorldTransform(trans);\n\t\t//class Callback : public btDynamicsWorld::ContactResultCallback\n\t\t//{\n\t\t//public:\n\t\t//\tconst IModelNode* pModel;\n\t\t//\tCallback() {}\n\n\t\t//\tvoid SetModel(const IModelNode* pModel)\n\t\t//\t{\n\t\t//\t\tthis->pModel = pModel;\n\t\t//\t}\n\t\t//\tCallback(const IModelNode* pModel)\n\t\t//\t\t: pModel(pModel)\n\t\t//\t{\n\t\t//\t}\n\t\t//\tvirtual\tbtScalar\taddSingleResult(btManifoldPoint& cp, const btCollisionObjectWrapper* colObj0Wrap, int partId0, int index0, const btCollisionObjectWrapper* colObj1Wrap, int partId1, int index1)\n\t\t//\t{\n\t\t//\t\tcout << \"point frustrum contact with \"<< pModel->Name << endl;\n\t\t//\t\treturn 0;\n\t\t//\t}\n\t\t//};\n\t\t//static map callbackTable;\n\n\n\t\t//for (const auto& model : Children)\n\t\t//{\n\t\t//\tauto pRigid = dynamic_cast(model.get());\n\t\t//\tcallbackTable[model->Name].SetModel(model.get());\n\t\t//\tpDynamicsWorld->contactPairTest(pRigid->GetBulletRigid(), pCollisionCone, callbackTable[model->Name]);\n\t\t//}\n\t}\n\n\t//int i = 0;\n\t//const auto& hand = m_Frame.hands().frontmost();\n\t//for (const auto& finger : hand.fingers())\n\t//{\n\t//\tXMVECTOR bJ = XMVector3Transform(finger.bone((Leap::Bone::Type)0).prevJoint().toVector3(), leap2world);\n\t//\tauto pState = m_HandRigids[i]->GetBulletRigid()->getMotionState();\n\t//\tauto transform = btTransform::getIdentity();\n\t//\ttransform.setOrigin(vector_cast(bJ));\n\t//\tm_HandRigids[i]->GetBulletRigid()->proceedToTransform(transform);\n\n\n\t//\ti++;\n\t//\tfor (size_t boneIdx = 0; boneIdx < 4; boneIdx++) // bone idx\n\t//\t{\n\t//\t\tconst auto & bone = finger.bone((Leap::Bone::Type)boneIdx);\n\t//\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\n\t//\t\tauto pState = m_HandRigids[i]->GetBulletRigid()->getMotionState();\n\t//\t\tauto transform = btTransform::getIdentity();\n\t//\t\ttransform.setOrigin(vector_cast(eJ));\n\t//\t\tm_HandRigids[i]->GetBulletRigid()->proceedToTransform(transform);\n\n\t//\t\ti++;\n\t//\t}\n\t//}\n\n\n\tstd::cout << \"[Leap] Hands Move.\" << std::endl;\n}\n\nvoid Causality::WorldScene::OnKeyDown(const KeyboardEventArgs & e)\n{\n}\n\nvoid Causality::WorldScene::OnKeyUp(const KeyboardEventArgs & e)\n{\n\tif (e.Key == 'T')\n\t\tm_showTrace = !m_showTrace;\n}\n\nvoid Causality::WorldScene::AddObject(const std::shared_ptr& pModel, float mass, const DirectX::Vector3 & Position, const DirectX::Quaternion & Orientation, const Vector3 & Scale)\n{\n\tlock_guard guard(m_RenderLock);\n\tModels.push_back(pModel);\n\tauto pShaped = dynamic_cast(pModel.get());\n\tauto pShape = pShaped->CreateCollisionShape();\n\tpShape->setLocalScaling(vector_cast(Scale));\n\n\tWorldTree->AddDynamicObject(pModel->Name, pShape, mass, Position, Orientation);\n\t//for (const auto& pFrame : m_StateFrames)\n\t//{\n\t//\tauto pObject = std::shared_ptr(new PhysicalRigid());\n\t//\tpObject->InitializePhysics(pFrame->pDynamicsWorld, pShape, mass, Position, Orientation);\n\t//\tpObject->GetBulletRigid()->setFriction(1.0f);\n\t//\tpObject->GetBulletRigid()->setDamping(0.8, 0.9);\n\t//\tpObject->GetBulletRigid()->setRestitution(0.0);\n\t//\tpFrame->Objects[pModel->Name] = pObject;\n\t//}\n}\n\nstd::pair XM_CALLCONV CaculateCylinderTransform(FXMVECTOR P1, FXMVECTOR P2)\n{\n\tstd::pair trans;\n\tauto center = XMVectorAdd(P1, P2);\n\tcenter = XMVectorMultiply(center, g_XMOneHalf);\n\tauto dir = XMVectorSubtract(P1, P2);\n\tauto scale = XMVector3Length(dir);\n\tXMVECTOR rot;\n\tif (XMVector4Equal(dir, g_XMZero))\n\t\trot = XMQuaternionIdentity();\n\telse\n\t\trot = XMQuaternionRotationVectorToVector(g_XMIdentityR1.v, dir);\n\ttrans.first = center;\n\ttrans.second = rot;\n\treturn trans;\n}\n\nXMMATRIX Causality::HandPhysicalModel::CaculateLocalMatrix(const Leap::Hand & hand, const DirectX::Matrix4x4 & leapTransform)\n{\n\tXMVECTOR palmCenter = hand.palmPosition().toVector3();\n\treturn XMMatrixScalingFromCenter(m_InheritTransform.Scale, palmCenter) * ((RigidTransform&) m_InheritTransform).TransformMatrix() * (XMMATRIX) leapTransform;\n\n}\n\n\nCausality::HandPhysicalModel::HandPhysicalModel\n(const std::shared_ptr &pWorld,\nconst Leap::Hand & hand, const DirectX::Matrix4x4 & leapTransform,\nconst DirectX::AffineTransform &inheritTransform)\n: m_Hand(hand)\n{\n\tColor.G(0.5f);\n\tColor.B(0.5f);\n\n\tId = hand.id();\n\tm_InheritTransform = inheritTransform;\n\n\tLocalMatrix = CaculateLocalMatrix(hand, leapTransform);\n\tXMMATRIX leap2world = LocalMatrix;\n\tint j = 0;\n\tfor (const auto& finger : m_Hand.fingers())\n\t{\n\t\tfor (size_t i = 0; i < 4; i++)\n\t\t{\n\t\t\tconst auto & bone = finger.bone((Leap::Bone::Type)i);\n\t\t\tXMVECTOR bJ = XMVector3Transform(bone.prevJoint().toVector3(), leap2world);\n\t\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\t\t\tm_Bones[i + j * 4].first = bJ;\n\t\t\tm_Bones[i + j * 4].second = eJ;\n\n\t\t\t// Initalize rigid hand model\n\t\t\tauto center = 0.5f * XMVectorAdd(bJ, eJ);\n\t\t\tauto dir = XMVectorSubtract(eJ, bJ);\n\t\t\tauto height = std::max(XMVectorGetX(XMVector3Length(dir)), fingerLength);\n\t\t\tXMVECTOR rot;\n\t\t\tif (XMVector4Equal(dir, g_XMZero))\n\t\t\t\trot = XMQuaternionIdentity();\n\t\t\telse\n\t\t\t\trot = XMQuaternionRotationVectorToVector(g_XMIdentityR1, dir);\n\t\t\tshared_ptr pShape(new btCapsuleShape(fingerRadius, height));\n\n\t\t\t// Scaling in Y axis is encapsled in bJ and eJ\n\t\t\tbtVector3 scl = vector_cast(m_InheritTransform.Scale);\n\t\t\tscl.setY(1.0f);\n\t\t\tpShape->setLocalScaling(scl);\n\n\t\t\tm_HandRigids.emplace_back(new PhysicalRigid());\n\t\t\tconst auto & pRigid = m_HandRigids.back();\n\t\t\t//pRigid->GetBulletRigid()->setGravity({ 0,0,0 });\n\t\t\tpRigid->InitializePhysics(nullptr, pShape, 0, center, rot);\n\t\t\tconst auto& body = pRigid->GetBulletRigid();\n\t\t\tbody->setFriction(1.0f);\n\t\t\tbody->setRestitution(0.0f);\n\t\t\tbody->setCollisionFlags(body->getCollisionFlags() | btCollisionObject::CF_KINEMATIC_OBJECT);\n\t\t\tbody->setActivationState(DISABLE_DEACTIVATION);\n\t\t\t//body->setAngularFactor(0.0f); // Rotation Along Y not affact\n\n\t\t\tpRigid->Enable(pWorld);\n\t\t}\n\t}\n\n\t//for (size_t i = 0; i < m_HandRigids.size(); i++)\n\t//{\n\t//\tfor (size_t j = 0; j < m_HandRigids.size(); j++)\n\t//\t{\n\t//\t\tif (i != j)\n\t//\t\t\tm_HandRigids[i]->GetBulletRigid()->setIgnoreCollisionCheck(m_HandRigids[j]->GetBulletRigid(), true);\n\t//\t}\n\t//}\n\n}\n\nbool Causality::HandPhysicalModel::Update(const Leap::Frame & frame, const DirectX::Matrix4x4 & leapTransform)\n{\n\tm_Hand = frame.hand(Id);\n\n\tif (m_Hand.isValid())\n\t{\n\t\tXMMATRIX transform = CaculateLocalMatrix(m_Hand, leapTransform);\n\t\tLocalMatrix = transform;\n\t\tColor.R(m_Hand.grabStrength());\n\t\tLostFrames = 0;\n\t\tint j = 0;\n\t\tfor (const auto& finger : m_Hand.fingers())\n\t\t{\n\t\t\tfor (size_t i = 0; i < 4; i++)\n\t\t\t{\n\t\t\t\tconst auto & bone = finger.bone((Leap::Bone::Type)i);\n\t\t\t\tXMVECTOR bJ = XMVector3Transform(bone.prevJoint().toVector3(), transform);\n\t\t\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), transform);\n\t\t\t\tm_Bones[i + j * 4].first = bJ;\n\t\t\t\tm_Bones[i + j * 4].second = eJ;\n\n\t\t\t\t// Rigid hand model\n\n\t\t\t\tauto & pRigid = m_HandRigids[i + j * 4];\n\t\t\t\tif (!pRigid->IsEnabled())\n\t\t\t\t\tpRigid->Enable();\n\t\t\t\tauto center = 0.5f * XMVectorAdd(bJ, eJ);\n\t\t\t\tauto dir = XMVectorSubtract(eJ, bJ);\n\t\t\t\tXMVECTOR rot;\n\t\t\t\tif (XMVector4Equal(dir, g_XMZero))\n\t\t\t\t\trot = XMQuaternionIdentity();\n\t\t\t\telse\n\t\t\t\t\trot = XMQuaternionRotationVectorToVector(g_XMIdentityR1, dir);\n\t\t\t\tauto trans = btTransform(vector_cast(rot), vector_cast(center));\n\t\t\t\tpRigid->GetBulletRigid()->getMotionState()->setWorldTransform(trans);\n\t\t\t}\n\t\t\tj++;\n\t\t}\n\t\treturn true;\n\t}\n\telse\n\t{\n\t\tfor (auto& pRigid : m_HandRigids)\n\t\t{\n\t\t\tpRigid->Disable();\n\t\t}\n\t\tLostFrames++;\n\t\treturn false;\n\t}\n}\n\n// Inherited via IModelNode\n\nvoid Causality::HandPhysicalModel::Render(ID3D11DeviceContext * pContext, DirectX::IEffect * pEffect)\n{\n\tXMMATRIX leap2world = LocalMatrix;\n\t//auto palmPosition = XMVector3Transform(m_Hand.palmPosition().toVector3(), leap2world);\n\t//g_PrimitiveDrawer.DrawSphere(palmPosition, 0.02f, Colors::YellowGreen);\n\tColor.A(Opticity);\n\tXMVECTOR color = Color;\n\t//color = XMVectorSetW(color, Opticity);\n\n\t//g_PrimitiveDrawer.Begin();\n\t//for (const auto& bone : m_Bones)\n\t//{\n\t//\t//g_PrimitiveDrawer.DrawSphere(bone.second, fingerRadius, jC);\n\t//\tg_PrimitiveDrawer.DrawCylinder(bone.first, bone.second, fingerRadius * m_InheritTransform.Scale.x, color);\n\t//}\n\t//g_PrimitiveDrawer.End();\n\n\tfor (const auto& pRigid : m_HandRigids)\n\t{\n\t\tg_PrimitiveDrawer.DrawCylinder(\n\t\t\tpRigid->GetPosition(),\n\t\t\tXMVector3Rotate(g_XMIdentityR1, pRigid->GetOrientation()),\n\t\t\tdynamic_cast(pRigid->GetBulletShape())->getHalfHeight() * 2,\n\t\t\tfingerRadius * m_InheritTransform.Scale.x,\n\t\t\tcolor);\n\t}\n\n\t//for (const auto& finger : m_Hand.fingers())\n\t//{\n\t//\tfor (size_t i = 0; i < 4; i++)\n\t//\t{\n\t//\t\tconst auto & bone = finger.bone((Leap::Bone::Type)i);\n\t//\t\tXMVECTOR bJ = XMVector3Transform(bone.prevJoint().toVector3(), leap2world);\n\t//\t\tXMVECTOR eJ = XMVector3Transform(bone.nextJoint().toVector3(), leap2world);\n\t//\t\t//g_PrimitiveDrawer.DrawLine(bJ, eJ, Colors::LimeGreen);\n\t//\t\t//g_PrimitiveDrawer.DrawCube(bJ, g_XMOne * 0.03, g_XMIdentityR3, Colors::Red);\n\t//\t\tg_PrimitiveDrawer.DrawCylinder(bJ, eJ,0.015f,Colors::LimeGreen);\n\n\t//\t\t//auto center = 0.5f * XMVectorAdd(bJ, eJ);\n\t//\t\t//auto dir = XMVectorSubtract(eJ, bJ);\n\t//\t\t//auto scale = XMVector3Length(dir);\n\t//\t\t//XMVECTOR rot;\n\t//\t\t//if (XMVector4LessOrEqual(XMVector3LengthSq(dir), XMVectorReplicate(0.01f)))\n\t//\t\t//\trot = XMQuaternionIdentity();\n\t//\t\t//else\n\t//\t\t//\trot = XMQuaternionRotationVectorToVector(g_XMIdentityR1, dir);\n\t//\t\t//XMMATRIX world = XMMatrixAffineTransformation(scale, g_XMZero, rot, center);\n\t//\t\t//s_pCylinder->Draw(world, ViewMatrix, ProjectionMatrix,Colors::LimeGreen);\n\t//\t}\n\t//}\n}\n\ninline void debug_assert(bool condition)\n{\n#ifdef DEBUG\n\tif (!condition)\n\t{\n\t\t_CrtDbgBreak();\n\t\t//std::cout << \"assert failed.\" << std::endl;\n\t}\n#endif\n}\n\n// normalized feild intensity equalent charge\nXMVECTOR XM_CALLCONV FieldSegmentToPoint(FXMVECTOR P, FXMVECTOR L0, FXMVECTOR L1)\n{\n\tif (XMVector4NearEqual(L0, L1, XMVectorReplicate(0.001f)))\n\t{\n\t\tXMVECTOR v = XMVectorAdd(L0, L1);\n\t\tv = XMVectorMultiply(v, g_XMOneHalf);\n\t\tv = XMVectorSubtract(v, P);\n\t\tXMVECTOR d = XMVector3LengthSq(v);\n\t\tv = XMVector3Normalize(v);\n\t\tv /= d;\n\t\treturn v;\n\t}\n\n\tXMVECTOR s = XMVectorSubtract(L1, L0);\n\tXMVECTOR v0 = XMVectorSubtract(L0, P);\n\tXMVECTOR v1 = XMVectorSubtract(L1, P);\n\n\tXMMATRIX Rot;\n\tRot.r[1] = XMVector3Normalize(s);\n\tRot.r[2] = XMVector3Cross(v0, v1);\n\tRot.r[2] = XMVector3Normalize(Rot.r[2]);\n\tRot.r[0] = XMVector3Cross(Rot.r[1], Rot.r[2]);\n\tRot.r[3] = g_XMIdentityR3;\n\n\t// Rotated to standard question:\n\t// Y\n\t// ^ *y1\n\t// | |\n\t//--o-----|x0----->X\n\t// | |\n\t//\t| |\n\t//\t *y0\n\t// Close form solution of the intergral : f(y0,y1) = <-y/(x0*sqrt(x0^2+y^2)),1/sqrt(x0^2+y^2),0> | (y0,y1)\n\tXMVECTOR Ds = XMVector3ReciprocalLength(s);\n\tXMVECTOR Ps = XMVector3Dot(v0, s);\n\tXMVECTOR Y0 = XMVectorMultiply(Ps, Ds);\n\n\tPs = XMVector3Dot(v1, s);\n\tXMVECTOR Y1 = XMVectorMultiply(Ps, Ds);\n\n\tXMVECTOR X0 = XMVector3LengthSq(v1);\n\tPs = XMVectorMultiply(Y1, Y1);\n\tX0 = XMVectorSubtract(X0, Ps);\n\t//debug_assert(XMVector4GreaterOrEqual(X0, XMVectorZero()));\n\tXMVECTOR R0 = XMVectorMultiplyAdd(Y0, Y0, X0);\n\tXMVECTOR R1 = XMVectorMultiplyAdd(Y1, Y1, X0);\n\tR0 = XMVectorReciprocalSqrt(R0);\n\tR1 = XMVectorReciprocalSqrt(R1);\n\n\tXMVECTOR Ry = XMVectorSubtract(R1, R0);\n\n\tR0 = XMVectorMultiply(R0, Y0);\n\tR1 = XMVectorMultiply(R1, Y1);\n\tXMVECTOR Rx = XMVectorSubtract(R0, R1);\n\tX0 = XMVectorReciprocalSqrt(X0);\n\t//debug_assert(!XMVectorGetIntX(XMVectorIsNaN(X0)));\n\tRx = XMVectorMultiply(Rx, X0);\n\tRx = XMVectorSelect(Rx, Ry, g_XMSelect0101);\n\t// Field intensity in P centered coordinate\n\tRx = XMVectorAndInt(Rx, g_XMSelect1100);\n\n\tRx = XMVectorMultiply(Rx, Ds);\n\tRx = XMVector3Transform(Rx, Rot);\n\n\t//debug_assert(!XMVectorGetIntX(XMVectorIsNaN(Rx)));\n\treturn Rx;\n}\n\nDirectX::XMVECTOR XM_CALLCONV Causality::HandPhysicalModel::FieldAtPoint(DirectX::FXMVECTOR P)\n{\n\t// Palm push force \n\t//XMVECTOR palmP = m_Hand.palmPosition().toVector3();\n\t//XMVECTOR palmN = m_Hand.palmNormal().toVector3();\n\t//auto dis = XMVectorSubtract(P,palmP);\n\t//auto mag = XMVectorReciprocal(XMVector3LengthSq(dis));\n\t//dis = XMVector3Normalize(dis);\n\t//auto fac = XMVector3Dot(dis, palmN);\n\t//mag = XMVectorMultiply(fac, mag);\n\t//return XMVectorMultiply(dis, mag);\n\n\tXMVECTOR field = XMVectorZero();\n\tfor (const auto& bone : m_Bones)\n\t{\n\t\tXMVECTOR v0 = bone.first;\n\t\tXMVECTOR v1 = bone.second;\n\t\tXMVECTOR f = FieldSegmentToPoint(P, v0, v1);\n\t\tfield += f;\n\t\t//XMVECTOR l = XMVector3LengthSq(XMVectorSubtract(v1,v0));\n\t}\n\treturn field;\n}\n\ninline Causality::CubeModel::CubeModel(const std::string & name, DirectX::FXMVECTOR extend, DirectX::FXMVECTOR color)\n{\n\tName = name;\n\tm_Color = color;\n\n\tXMStoreFloat3(&BoundBox.Extents, extend);\n\tXMStoreFloat3(&BoundOrientedBox.Extents, extend);\n}\n\nstd::shared_ptr Causality::CubeModel::CreateCollisionShape()\n{\n\tstd::shared_ptr pShape;\n\tpShape.reset(new btBoxShape(vector_cast(BoundBox.Extents)));\n\treturn pShape;\n}\n\nvoid Causality::CubeModel::Render(ID3D11DeviceContext * pContext, DirectX::IEffect * pEffect)\n{\n\tXMVECTOR extent = XMLoadFloat3(&BoundBox.Extents);\n\tXMMATRIX world = GetWorldMatrix();\n\t//XMVECTOR scale, pos, rot;\n\tXMVECTOR color = m_Color;\n\tcolor = XMVectorSetW(color, Opticity);\n\tg_PrimitiveDrawer.DrawCube(extent, world, color);\n}\n\n// !!!Current don't support dynamic scaling for each state now!!!\ninline std::shared_ptr Causality::ShapedGeomrtricModel::CreateCollisionShape()\n{\n\tif (!m_pShape)\n\t{\n\t\tbtTransform trans;\n\t\tstd::shared_ptr pShape(new btCompoundShape());\n\t\t//trans.setOrigin(vector_cast(model->BoundOrientedBox.Center));\n\t\t//trans.setRotation(vector_cast(model->BoundOrientedBox.Orientation));\n\t\t//pShape->addChildShape(trans, new btBoxShape(vector_cast(model->BoundOrientedBox.Extents)));\n\t\tfor (const auto& part : Parts)\n\t\t{\n\t\t\ttrans.setOrigin(vector_cast(part->BoundOrientedBox.Center));\n\t\t\ttrans.setRotation(vector_cast(part->BoundOrientedBox.Orientation));\n\t\t\tpShape->addChildShape(trans, new btBoxShape(vector_cast(part->BoundOrientedBox.Extents)));\n\t\t}\n\t\tm_pShape = pShape;\n\t\treturn m_pShape;\n\t}\n\telse\n\t{\n\t\treturn m_pShape;\n\t}\n}\n\nvoid Causality::WorldBranch::InitializeBranchPool(int size, bool autoExpandation)\n{\n\tfor (size_t i = 0; i < 30; i++)\n\t{\n\t\tBranchPool.emplace(new WorldBranch());\n\t}\n}\n\nvoid Causality::WorldBranch::Reset()\n{\n\tfor (const auto& pair : Items)\n\t{\n\t\tpair.second->Disable();\n\t}\n\tItems.clear();\n}\n\nvoid Causality::WorldBranch::Collapse()\n{\n\t//using namespace cpplinq;\n\t//using cref = decltype(m_StateFrames)::const_reference;\n\t//auto mlh = from(m_StateFrames)\n\t//\t>> where([](cref pFrame) {return pFrame->IsEnabled; })\n\t//\t>> max([](cref pFrame)->float {return pFrame->Liklyhood(); });\n\t//WordBranch master_frame;\n\t////for (auto & pFrame : m_StateFrames)\n\t////{\n\t////\tif (pFrame->Liklyhood() < mlh)\n\t////\t{\n\t////\t\tpFrame->Disable();\n\t////\t\tm_pFramesPool->Recycle(std::move(pFrame));\n\t////\t}\n\t////\telse\n\t////\t{\n\t////\t\tmaster_frame = std::move(pFrame);\n\t////\t}\n\t////}\n\t//m_StateFrames.clear();\n\t//m_StateFrames.push_back(std::move(master_frame));\n}\n\nSuperpositionMap Causality::WorldBranch::CaculateSuperposition()\n{\n\tusing namespace cpplinq;\n\tSuperpositionMap SuperStates;\n\n\tauto itr = this->begin();\n\tauto eitr = this->end();\n\n\tNormalizeLiklyhood(CaculateLiklyhood());\n\n\tauto pItem = Items.begin();\n\tfor (size_t i = 0; i < Items.size(); i++,++pItem)\n\t{\n\t\tconst auto& pModel = pItem->second;\n\t\tauto& distribution = SuperStates[pItem->first];\n\t\t//auto& = state.StatesDistribution;\n\t\tint j = 0;\n\n\t\tfor (const auto& branch : leaves())\n\t\t{\n\t\t\tif (!branch.IsEnabled)\n\t\t\t\tcontinue;\n\n\t\t\tauto itrObj = branch.Items.find(pItem->first);\n\n\t\t\tif (itrObj == branch.Items.end())\n\t\t\t\tcontinue;\n\t\t\tauto pNew = itrObj->second;\n\n\t\t\tProblistiscAffineTransform tNew;\n\t\t\ttNew.Translation = pNew->GetPosition();\n\t\t\ttNew.Rotation = pNew->GetOrientation();\n\t\t\ttNew.Scale = pNew->GetScale();\n\t\t\ttNew.Probability = branch.Liklyhood();\n\n\t\t\tauto itr = std::find_if(distribution.begin(), distribution.end(),\n\t\t\t\t[&tNew](std::remove_reference_t::const_reference trans) -> bool\n\t\t\t{\n\t\t\t\treturn trans.NearEqual(tNew);\n\t\t\t});\n\n\t\t\tif (itr == distribution.end())\n\t\t\t{\n\t\t\t\tdistribution.push_back(tNew);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\titr->Probability += tNew.Probability;\n\t\t\t}\n\t\t\tj++;\n\t\t}\n\t}\n\n\treturn SuperStates;\n}\n\nvoid Causality::WorldBranch::InternalEvolution(float timeStep, const Leap::Frame & frame, const DirectX::Matrix4x4 & leapTransform)\n{\n\tauto& subjects = Subjects;\n\n\tBoundingSphere sphere;\n\n\t//if (!is_leaf()) return;\n\tfor (auto itr = subjects.begin(); itr != subjects.end(); )\n\t{\n\t\tbool result = itr->second->Update(frame, leapTransform);\n\t\t// Remove hands lost track for 60+ frames\n\t\tif (!result)\n\t\t{\n\t\t\tif (itr->second->LostFramesCount() > 60)\n\t\t\t\titr = subjects.erase(itr);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t//std::vector collideObjects;\n\n\t\t\t//for (auto& item : Items)\n\t\t\t//{\n\t\t\t//\tbtVector3 c;\n\t\t\t//\titem.second->GetBulletShape()->getBoundingSphere(c, sphere.Radius);\n\t\t\t//\tsphere.Center = vector_cast(sphere.Center);\n\t\t\t//\tif (itr->second->OperatingFrustum().Contains(sphere) != ContainmentType::DISJOINT)\n\t\t\t//\t{\n\t\t\t//\t\tcollideObjects.push_back(item.second.get());\n\t\t\t//\t}\n\t\t\t//}\n\t\t\t//if (collideObjects.size() > 0)\n\t\t\t//{\n\t\t\t//\tFork(collideObjects);\n\t\t\t//}\n\n\n\t\t\t//const auto &pHand = itr->second;\n\t\t\t//for (auto& item : pFrame->Objects)\n\t\t\t//{\n\t\t\t//\tconst auto& pObj = item.second;\n\t\t\t//\tif (pObj->GetBulletRigid()->isStaticObject())\n\t\t\t//\t\tcontinue;\n\t\t\t//\t//pObj->GetBulletShape()->\n\t\t\t//\tauto force = pHand->FieldAtPoint(pObj->GetPosition()) * 0.00001f;\n\t\t\t//\tpObj->GetBulletRigid()->clearForces();\n\t\t\t//\t//vector_cast(force) * 0.01f\n\t\t\t//\tstd::cout << item.first << \" : \" << Vector3(force) << std::endl;\n\t\t\t//\tpObj->GetBulletRigid()->applyCentralForce(vector_cast(force));\n\t\t\t//\tpObj->GetBulletRigid()->activate();\n\t\t\t//}\n\t\t\t++itr;\n\t\t}\n\t}\n\tpDynamicsWorld->stepSimulation(timeStep, 10);\n}\n\nfloat Causality::WorldBranch::CaculateLiklyhood()\n{\n\tif (is_leaf())\n\t{\n\t\tif (IsEnabled)\n\t\t\t_Liklyhood = 1;\n\t\telse\n\t\t\t_Liklyhood = 0;\n\t\treturn _Liklyhood;\n\t}\n\telse\n\t{\n\t\t_Liklyhood = 0;\n\t\tfor (auto& branch : children())\n\t\t{\n\t\t\t_Liklyhood += branch.CaculateLiklyhood();\n\t\t}\n\t\treturn _Liklyhood;\n\t}\n}\n\nvoid Causality::WorldBranch::NormalizeLiklyhood(float total)\n{\n\tfor (auto& branch : nodes_in_tree())\n\t{\n\t\tbranch._Liklyhood /= total;\n\t}\n}\n\nvoid Causality::WorldBranch::AddSubjectiveObject(const Leap::Hand & hand, const DirectX::Matrix4x4& leapTransform)\n{\n\tif (!Subjects[hand.id()])\n\t{\n\t\tSubjects[hand.id()].reset(\n\t\t\tnew HandPhysicalModel(\n\t\t\tpDynamicsWorld,\n\t\t\thand, leapTransform,\n\t\t\tSubjectTransform)\n\t\t\t);\n\t\t//for (const auto& itm : pFrame->Objects)\n\t\t//{\n\t\t//\tconst auto& pObj = itm.second;\n\t\t//\tif (!pObj->GetBulletRigid()->isStaticOrKinematicObject())\n\t\t//\t{\n\t\t//\t\tfor (const auto& bone : subjects[hand.id()]->Rigids())\n\t\t//\t\t\tpObj->GetBulletRigid()->setIgnoreCollisionCheck(bone.get(), false);\n\t\t//\t}\n\t\t//}\n\t}\n}\n\nvoid Causality::WorldBranch::AddDynamicObject(const std::string &name, const std::shared_ptr &pShape, float mass, const DirectX::Vector3 & Position, const DirectX::Quaternion & Orientation)\n{\n\tfor (auto& branch : nodes_in_tree())\n\t{\n\t\tauto pObject = std::shared_ptr(new PhysicalRigid());\n\t\tpObject->InitializePhysics(branch.pDynamicsWorld, pShape, mass, Position, Orientation);\n\t\tpObject->GetBulletRigid()->setFriction(1.0f);\n\t\tpObject->GetBulletRigid()->setDamping(0.8f, 0.9f);\n\t\tpObject->GetBulletRigid()->setRestitution(0.0);\n\t\tbranch.Items[name] = pObject;\n\t}\n}\n\nvoid Causality::WorldBranch::Evolution(float timeStep, const Leap::Frame & frame, const DirectX::Matrix4x4 & leapTransform)\n{\n\tusing namespace cpplinq;\n\tvector> leaves;\n\n\n\n\t//auto levr = this->leaves();\n\tcopy(this->leaves_begin(), leaves_end(), back_inserter(leaves));\n\n\tauto branchEvolution = [timeStep, &frame, &leapTransform](WorldBranch& branch) {\n\t\tbranch.InternalEvolution(timeStep,frame, leapTransform);\n\t};\n\t//auto branchEvolution = std::bind(&WorldBranch::InternalEvolution, placeholders::_1, frame, leapTransform);\n\n\tif (leaves.size() >= 10)\n\t\tconcurrency::parallel_for_each(leaves.begin(), leaves.end(), branchEvolution);\n\telse\n\t\tfor_each(leaves.begin(), leaves.end(), branchEvolution);\n}\n\nvoid Causality::WorldBranch::Fork(const std::vector& focusObjects)\n{\n\t//int i = 0;\n\t//for (const auto& obj : focusObjects)\n\t//{\n\t//\tauto branch = DemandCreate((boost::format(\"%s/%d\") % this->Name % i++).str());\n\t//}\n}\n\nvoid Causality::WorldBranch::Fork(const std::vector& subjectTransforms)\n{\n\tfor (int i = subjectTransforms.size() - 1; i >= 0; --i)\n\t{\n\t\tconst auto& trans = subjectTransforms[i];\n\t\tauto branch = DemandCreate((boost::format(\"%s/%d\") % this->Name % i).str());\n\t\tbranch->Enable(trans);\n\t\tappend_children_front(branch.release());\n\t\t//branch->SubjectTransform = trans;\n\t}\n}\nstd::unique_ptr Causality::WorldBranch::DemandCreate(const string& branchName)\n{\n\tif (!BranchPool.empty())\n\t{\n\t\tauto frame = std::move(BranchPool.front());\n\t\tBranchPool.pop();\n\t\tframe->Name = branchName;\n\t\treturn frame;\n\t}\n\telse\n\t\treturn nullptr;\n}\n\nvoid Causality::WorldBranch::Recycle(std::unique_ptr&& pFrame)\n{\n\tpFrame->Reset();\n\tBranchPool.push(std::move(pFrame));\n}\n\n//inline void Causality::SkeletonModel::Render(ID3D11DeviceContext * pContext, DirectX::IEffect * pEffect)\n//{\n//\tg_PrimitiveDrawer.DrawCylinder(Joints[0],Joints[1].Position)\n//}\n", "meta": {"hexsha": "9d09ab5479200b17fcdfacb91a37167835f67963", "size": 49418, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Causality/Foregrounds.cpp", "max_stars_repo_name": "ArcEarth/SrInspection", "max_stars_repo_head_hexsha": "63c540d1736e323a0f409914e413cb237f03c5c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-07-13T18:30:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-31T22:20:34.000Z", "max_issues_repo_path": "Causality/Foregrounds.cpp", "max_issues_repo_name": "ArcEarth/SrInspection", "max_issues_repo_head_hexsha": "63c540d1736e323a0f409914e413cb237f03c5c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Causality/Foregrounds.cpp", "max_forks_repo_name": "ArcEarth/SrInspection", "max_forks_repo_head_hexsha": "63c540d1736e323a0f409914e413cb237f03c5c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2016-01-16T14:25:28.000Z", "max_forks_repo_forks_event_max_datetime": "2017-06-12T16:15:18.000Z", "avg_line_length": 31.9031633312, "max_line_length": 251, "alphanum_fraction": 0.6812295115, "num_tokens": 15560, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.3923368301671084, "lm_q1q2_score": 0.19923329715255295}} {"text": "/*\n * Copyright (c) 2017 Peter Conrad, and other contributors.\n *\n * The MIT License\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n * THE SOFTWARE.\n */\n\n#include \n\n#include \n\n#include \n\n#include \"../common/database_fixture.hpp\"\n\nusing namespace graphene::chain;\nusing namespace graphene::chain::test;\n\nBOOST_FIXTURE_TEST_SUITE(market_tests, database_fixture)\n\nBOOST_AUTO_TEST_CASE(issue_338_etc)\n{ try {\n generate_blocks(HARDFORK_615_TIME); // get around Graphene issue #615 feed expiration bug\n generate_block();\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call3_id = call3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This order slightly below the call price will not be matched #606\n limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(7), core.amount(59))->id;\n // This order above the MSSP will not be matched\n limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(60))->id;\n\n cancel_limit_order( sell_med(db) );\n cancel_limit_order( sell_high(db) );\n cancel_limit_order( sell_low(db) );\n\n // current implementation: an incoming limit order will be filled at the\n // requested price #338\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(60)) );\n BOOST_CHECK_EQUAL( 993, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 60, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 993, call.debt.value );\n BOOST_CHECK_EQUAL( 14940, call.collateral.value );\n\n limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id;\n // margin call takes precedence\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(60)) );\n BOOST_CHECK_EQUAL( 986, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 120, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 986, call.debt.value );\n BOOST_CHECK_EQUAL( 14880, call.collateral.value );\n\n limit_order_id_type buy_med = create_sell_order(buyer, asset(105), bitusd.amount(10))->id;\n // margin call takes precedence\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(70)) );\n BOOST_CHECK_EQUAL( 979, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 190, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 979, call.debt.value );\n BOOST_CHECK_EQUAL( 14810, call.collateral.value );\n\n limit_order_id_type buy_high = create_sell_order(buyer, asset(115), bitusd.amount(10))->id;\n // margin call still has precedence (!) #625\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(7), core.amount(77)) );\n BOOST_CHECK_EQUAL( 972, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 267, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 972, call.debt.value );\n BOOST_CHECK_EQUAL( 14733, call.collateral.value );\n\n cancel_limit_order( buy_high(db) );\n cancel_limit_order( buy_med(db) );\n cancel_limit_order( buy_low(db) );\n\n // call with more usd\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(7700)) );\n BOOST_CHECK_EQUAL( 272, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 7967, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 272, call.debt.value );\n BOOST_CHECK_EQUAL( 7033, call.collateral.value );\n\n // at this moment, collateralization of call is 7033 / 272 = 25.8\n // collateralization of call2 is 15500 / 1000 = 15.5\n // collateralization of call3 is 16000 / 1000 = 16\n\n // call more, still matches with the first call order #343\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(10), core.amount(110)) );\n BOOST_CHECK_EQUAL( 262, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 8077, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 262, call.debt.value );\n BOOST_CHECK_EQUAL( 6923, call.collateral.value );\n\n // at this moment, collateralization of call is 6923 / 262 = 26.4\n // collateralization of call2 is 15500 / 1000 = 15.5\n // collateralization of call3 is 16000 / 1000 = 16\n\n // force settle\n force_settle( seller, bitusd.amount(10) );\n BOOST_CHECK_EQUAL( 252, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 8077, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 262, call.debt.value );\n BOOST_CHECK_EQUAL( 6923, call.collateral.value );\n\n // generate blocks to let the settle order execute (price feed will expire after it)\n generate_blocks( HARDFORK_615_TIME + fc::hours(25) );\n // call2 get settled #343\n BOOST_CHECK_EQUAL( 252, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 8177, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 262, call_id(db).debt.value );\n BOOST_CHECK_EQUAL( 6923, call_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 990, call2_id(db).debt.value );\n BOOST_CHECK_EQUAL( 15400, call2_id(db).collateral.value );\n\n set_expiration( db, trx );\n update_feed_producers( usd_id(db), {feedproducer_id} );\n\n // at this moment, collateralization of call is 8177 / 252 = 32.4\n // collateralization of call2 is 15400 / 990 = 15.5\n // collateralization of call3 is 16000 / 1000 = 16\n\n // adjust price feed to get call2 into black swan territory, but not the first call order\n current_feed.settlement_price = asset(1, usd_id) / asset(20, core_id);\n publish_feed( usd_id(db), feedproducer_id(db), current_feed );\n // settlement price = 1/20, mssp = 1/22\n\n // black swan event doesn't occur #649\n BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() );\n\n // generate a block\n generate_block();\n\n set_expiration( db, trx );\n update_feed_producers( usd_id(db), {feedproducer_id} );\n\n // adjust price feed back\n current_feed.settlement_price = asset(1, usd_id) / asset(10, core_id);\n publish_feed( usd_id(db), feedproducer_id(db), current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n transfer(borrower2_id, seller_id, asset(1000, usd_id));\n transfer(borrower3_id, seller_id, asset(1000, usd_id));\n\n // Re-create sell_low, slightly below the call price, will not be matched, will expire soon\n sell_low = create_sell_order(seller_id(db), asset(7, usd_id), asset(59), db.head_block_time()+fc::seconds(300) )->id;\n // This would match but is blocked by sell_low, it has an amount same as call's debt which will be full filled later\n sell_med = create_sell_order(seller_id(db), asset(262, usd_id), asset(2620))->id; // 1/10\n // Another big order above sell_med, blocked\n limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1\n // Another small order above sell_med2, blocked\n limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2\n\n // generate a block, sell_low will expire\n BOOST_TEST_MESSAGE( \"Expire sell_low\" );\n generate_blocks( HARDFORK_615_TIME + fc::hours(26) );\n BOOST_CHECK( db.find( sell_low ) == nullptr );\n\n // #453 multiple order matching issue occurs\n BOOST_CHECK( db.find( sell_med ) == nullptr ); // sell_med get filled\n BOOST_CHECK( db.find( sell_med2 ) != nullptr ); // sell_med2 is still there\n BOOST_CHECK( db.find( sell_med3 ) == nullptr ); // sell_med3 get filled\n BOOST_CHECK( db.find( call_id ) == nullptr ); // the first call order get filled\n BOOST_CHECK( db.find( call2_id ) == nullptr ); // the second call order get filled\n BOOST_CHECK( db.find( call3_id ) != nullptr ); // the third call order is still there\n\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hardfork_core_338_test)\n{ try {\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call3_id = call3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This sell order above MSSP will not be matched with a call\n create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n\n BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // This buy order is too low will not be matched with a sell order\n limit_order_id_type buy_low = create_sell_order(buyer, asset(90), bitusd.amount(10))->id;\n // This buy order at MSSP will be matched only if no margin call (margin call takes precedence)\n limit_order_id_type buy_med = create_sell_order(buyer, asset(110), bitusd.amount(10))->id;\n // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence)\n limit_order_id_type buy_high = create_sell_order(buyer, asset(111), bitusd.amount(10))->id;\n\n BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 90 - 110 - 111, get_balance(buyer, core) );\n\n // This order slightly below the call price will be matched: #606 fixed\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(5900) ) );\n\n // firstly it will match with buy_high, at buy_high's price: #625 fixed\n BOOST_CHECK( !db.find( buy_high ) );\n BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 );\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 );\n\n // buy_high pays 111 CORE, receives 10 USD goes to buyer's balance\n BOOST_CHECK_EQUAL( 10, get_balance(buyer, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 90 - 110 - 111, get_balance(buyer, core) );\n // sell order pays 10 USD, receives 111 CORE, remaining 690 USD for sale, still at price 7/59\n\n // then it will match with call, at mssp: 1/11 = 690/7590 : #338 fixed\n BOOST_CHECK_EQUAL( 2293, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 7701, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 310, call.debt.value );\n BOOST_CHECK_EQUAL( 7410, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n\n // call's call_price will be updated after the match, to 741/31/1.75 CORE/USD = 2964/217\n // it's above settlement price (10/1) so won't be margin called again\n BOOST_CHECK( price(asset(2964),asset(217,usd_id)) == call.call_price );\n\n // This would match with call before, but would match with call2 after #343 fixed\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(6000) ) );\n BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 110 );\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 90 );\n\n // fill price would be mssp: 1/11 = 700/7700 : #338 fixed\n BOOST_CHECK_EQUAL( 1593, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 15401, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 310, call.debt.value );\n BOOST_CHECK_EQUAL( 7410, call.collateral.value );\n BOOST_CHECK_EQUAL( 300, call2.debt.value );\n BOOST_CHECK_EQUAL( 7800, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n // call2's call_price will be updated after the match, to 78/3/1.75 CORE/USD = 312/21\n BOOST_CHECK( price(asset(312),asset(21,usd_id)) == call2.call_price );\n // it's above settlement price (10/1) so won't be margin called\n\n // at this moment, collateralization of call is 7410 / 310 = 23.9\n // collateralization of call2 is 7800 / 300 = 26\n // collateralization of call3 is 16000 / 1000 = 16\n\n // force settle\n force_settle( seller, bitusd.amount(10) );\n\n BOOST_CHECK_EQUAL( 1583, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 15401, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 310, call.debt.value );\n BOOST_CHECK_EQUAL( 7410, call.collateral.value );\n BOOST_CHECK_EQUAL( 300, call2.debt.value );\n BOOST_CHECK_EQUAL( 7800, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n\n // generate blocks to let the settle order execute (price feed will expire after it)\n generate_block();\n generate_blocks( db.head_block_time() + fc::hours(24) );\n\n // call3 get settled, at settlement price 1/10: #343 fixed\n BOOST_CHECK_EQUAL( 1583, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 15501, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 310, call_id(db).debt.value );\n BOOST_CHECK_EQUAL( 7410, call_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 300, call2_id(db).debt.value );\n BOOST_CHECK_EQUAL( 7800, call2_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 990, call3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 15900, call3_id(db).collateral.value );\n\n set_expiration( db, trx );\n update_feed_producers( usd_id(db), {feedproducer_id} );\n\n // at this moment, collateralization of call is 7410 / 310 = 23.9\n // collateralization of call2 is 7800 / 300 = 26\n // collateralization of call3 is 15900 / 990 = 16.06\n\n // adjust price feed to get call3 into black swan territory, but not the other call orders\n // Note: after hard fork, black swan should occur when callateralization < mssp, but not at < feed\n current_feed.settlement_price = asset(1, usd_id) / asset(16, core_id);\n publish_feed( usd_id(db), feedproducer_id(db), current_feed );\n // settlement price = 1/16, mssp = 10/176\n\n // black swan event will occur: #649 fixed\n BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() );\n // short positions will be closed\n BOOST_CHECK( !db.find( call_id ) );\n BOOST_CHECK( !db.find( call2_id ) );\n BOOST_CHECK( !db.find( call3_id ) );\n\n // generate a block\n generate_block();\n\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hardfork_core_453_test)\n{ try {\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_453_TIME - mi); // assume all hard forks occur at same time\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call3_id = call3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // no margin call so far\n\n // This order would match call when it's margin called, it has an amount same as call's debt which will be full filled later\n limit_order_id_type sell_med = create_sell_order(seller_id(db), asset(1000, usd_id), asset(10000))->id; // 1/10\n // Another big order above sell_med, amount bigger than call2's debt\n limit_order_id_type sell_med2 = create_sell_order(seller_id(db), asset(1200, usd_id), asset(12120))->id; // 1/10.1\n // Another small order above sell_med2\n limit_order_id_type sell_med3 = create_sell_order(seller_id(db), asset(120, usd_id), asset(1224))->id; // 1/10.2\n\n // adjust price feed to get the call orders into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // Fixed #453 multiple order matching issue occurs\n BOOST_CHECK( !db.find( sell_med ) ); // sell_med get filled\n BOOST_CHECK( !db.find( sell_med2 ) ); // sell_med2 get filled\n BOOST_CHECK( !db.find( sell_med3 ) ); // sell_med3 get filled\n BOOST_CHECK( !db.find( call_id ) ); // the first call order get filled\n BOOST_CHECK( !db.find( call2_id ) ); // the second call order get filled\n BOOST_CHECK( db.find( call3_id ) ); // the third call order is still there\n\n // generate a block\n generate_block();\n\n\n} FC_LOG_AND_RETHROW() }\n\n/***\n * Tests (big) limit order matching logic after #625 got fixed\n */\nBOOST_AUTO_TEST_CASE(hardfork_core_625_big_limit_order_test)\n{ try {\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_625_TIME - mi); // assume all hard forks occur at same time\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(buyer2)(buyer3)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, buyer2_id, asset(init_balance));\n transfer(committee_account, buyer3_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000));\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) );\n\n // adjust price feed to get call and call2 (but not call3) into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This sell order above MSSP will not be matched with a call\n limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 );\n\n BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // This buy order is too low will not be matched with a sell order\n limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id;\n // This buy order at MSSP will be matched only if no margin call (margin call takes precedence)\n limit_order_id_type buy_med = create_sell_order(buyer2, asset(11000), bitusd.amount(1000))->id;\n // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence)\n limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id;\n\n BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(buyer3, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) );\n BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(buyer2, core) );\n BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) );\n\n // Create a big sell order slightly below the call price, will be matched with several orders\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) );\n\n // firstly it will match with buy_high, at buy_high's price\n BOOST_CHECK( !db.find( buy_high ) );\n // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance\n BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) );\n\n // then it will match with call, at mssp: 1/11 = 1000/11000\n BOOST_CHECK( !db.find( call_id ) );\n // call pays 11000 CORE, receives 1000 USD to cover borrower's position, remaining CORE goes to borrower's balance\n BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n\n // then it will match with call2, at mssp: 1/11 = 1000/11000\n BOOST_CHECK( !db.find( call2_id ) );\n // call2 pays 11000 CORE, receives 1000 USD to cover borrower2's position, remaining CORE goes to borrower2's balance\n BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n\n // then it will match with buy_med, at buy_med's price. Since buy_med is too big, it's partially filled.\n // buy_med receives the remaining USD of sell order, minus market fees, goes to buyer2's balance\n BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) ); // 700*4-10-1000-1000=790, minus 1% market fee 790*100/10000=7\n BOOST_CHECK_EQUAL( init_balance - 11000, get_balance(buyer2, core) );\n // buy_med pays at 1/11 = 790/8690\n BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 11000-8690 );\n\n // call3 is not in margin call territory so won't be matched\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n\n // buy_low's price is too low that won't be matched\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 );\n\n // check seller balance\n BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4\n BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); // 111 + 11000 + 11000 + 8690\n\n // Cancel buy_med\n cancel_limit_order( buy_med(db) );\n BOOST_CHECK( !db.find( buy_med ) );\n BOOST_CHECK_EQUAL( 783, get_balance(buyer2, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 8690, get_balance(buyer2, core) );\n\n // Create another sell order slightly below the call price, won't fill\n limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id;\n BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 );\n // check seller balance\n BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) );\n\n // call3 is not in margin call territory so won't be matched\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n\n // buy_low's price is too low that won't be matched\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 );\n\n // generate a block\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hard_fork_453_cross_test)\n{ try { // create orders before hard fork, which will be matched on hard fork\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_453_TIME - mi); // assume all hard forks occur at same time\n generate_block();\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& biteur = create_bitasset(\"EURBIT\", feedproducer_id);\n const auto& bitcny = create_bitasset(\"CNYBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type eur_id = biteur.id;\n asset_id_type cny_id = bitcny.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n update_feed_producers( biteur, {feedproducer.id} );\n update_feed_producers( bitcny, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n current_feed.settlement_price = biteur.amount( 1 ) / core.amount(5);\n publish_feed( biteur, feedproducer, current_feed );\n current_feed.settlement_price = bitcny.amount( 1 ) / core.amount(5);\n publish_feed( bitcny, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call_usd = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_usd_id = call_usd.id;\n const call_order_object& call_eur = *borrow( borrower, biteur.amount(1000), asset(15000));\n call_order_id_type call_eur_id = call_eur.id;\n const call_order_object& call_cny = *borrow( borrower, bitcny.amount(1000), asset(15000));\n call_order_id_type call_cny_id = call_cny.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call_usd2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call_usd2_id = call_usd2.id;\n const call_order_object& call_eur2 = *borrow( borrower2, biteur.amount(1000), asset(15500));\n call_order_id_type call_eur2_id = call_eur2.id;\n const call_order_object& call_cny2 = *borrow( borrower2, bitcny.amount(1000), asset(15500));\n call_order_id_type call_cny2_id = call_cny2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call_usd3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call_usd3_id = call_usd3.id;\n const call_order_object& call_eur3 = *borrow( borrower3, biteur.amount(1000), asset(16000));\n call_order_id_type call_eur3_id = call_eur3.id;\n const call_order_object& call_cny3 = *borrow( borrower3, bitcny.amount(1000), asset(16000));\n call_order_id_type call_cny3_id = call_cny3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n transfer(borrower, seller, biteur.amount(1000));\n transfer(borrower2, seller, biteur.amount(1000));\n transfer(borrower3, seller, biteur.amount(1000));\n transfer(borrower, seller, bitcny.amount(1000));\n transfer(borrower2, seller, bitcny.amount(1000));\n transfer(borrower3, seller, bitcny.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call_usd.debt.value );\n BOOST_CHECK_EQUAL( 15000, call_usd.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_usd2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call_usd2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_usd3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call_usd3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 1000, call_eur.debt.value );\n BOOST_CHECK_EQUAL( 15000, call_eur.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_eur2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call_eur2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_eur3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call_eur3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, biteur) );\n BOOST_CHECK_EQUAL( 1000, call_cny.debt.value );\n BOOST_CHECK_EQUAL( 15000, call_cny.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_cny2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call_cny2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call_cny3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call_cny3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitcny) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n current_feed.settlement_price = biteur.amount( 1 ) / core.amount(10);\n publish_feed( biteur, feedproducer, current_feed );\n current_feed.settlement_price = bitcny.amount( 1 ) / core.amount(10);\n publish_feed( bitcny, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This order below the call price will not be matched before hard fork: 1/8 #606\n limit_order_id_type sell_usd_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id;\n // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606\n limit_order_id_type sell_usd_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id;\n // This order above the MSSP will not be matched before hard fork\n limit_order_id_type sell_usd_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_usd_med = create_sell_order(seller, bitusd.amount(700), core.amount(6400))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_usd_med2 = create_sell_order(seller, bitusd.amount(7), core.amount(65))->id;\n\n // This order below the call price will not be matched before hard fork: 1/8 #606\n limit_order_id_type sell_eur_low = create_sell_order(seller, biteur.amount(1000), core.amount(7000))->id;\n // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606\n limit_order_id_type sell_eur_low2 = create_sell_order(seller, biteur.amount(1007), core.amount(8056))->id;\n // This order above the MSSP will not be matched before hard fork\n limit_order_id_type sell_eur_high = create_sell_order(seller, biteur.amount(7), core.amount(78))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_eur_med = create_sell_order(seller, biteur.amount(700), core.amount(6400))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_eur_med2 = create_sell_order(seller, biteur.amount(7), core.amount(65))->id;\n\n // This order below the call price will not be matched before hard fork: 1/8 #606\n limit_order_id_type sell_cny_low = create_sell_order(seller, bitcny.amount(1000), core.amount(7000))->id;\n // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606\n limit_order_id_type sell_cny_low2 = create_sell_order(seller, bitcny.amount(1007), core.amount(8056))->id;\n // This order above the MSSP will not be matched before hard fork\n limit_order_id_type sell_cny_high = create_sell_order(seller, bitcny.amount(7), core.amount(78))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_cny_med = create_sell_order(seller, bitcny.amount(700), core.amount(6400))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_cny_med2 = create_sell_order(seller, bitcny.amount(7), core.amount(65))->id;\n\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, eur_id) );\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, cny_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // generate a block to include operations above\n generate_block();\n // go over the hard fork, make sure feed doesn't expire\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n // sell_low and call should get matched first\n BOOST_CHECK( !db.find( sell_usd_low ) );\n BOOST_CHECK( !db.find( call_usd_id ) );\n // sell_low2 and call2 should get matched\n BOOST_CHECK( !db.find( call_usd2_id ) );\n // sell_low2 and call3 should get matched: fixed #453\n BOOST_CHECK( !db.find( sell_usd_low2 ) );\n // sell_med and call3 should get matched\n BOOST_CHECK( !db.find( sell_usd_med ) );\n // call3 now is not at margin call state, so sell_med2 won't get matched\n BOOST_CHECK_EQUAL( db.find( sell_usd_med2 )->for_sale.value, 7 );\n // sell_high should still be there, didn't match anything\n BOOST_CHECK_EQUAL( db.find( sell_usd_high )->for_sale.value, 7 );\n\n // sell_low and call should get matched first\n BOOST_CHECK( !db.find( sell_eur_low ) );\n BOOST_CHECK( !db.find( call_eur_id ) );\n // sell_low2 and call2 should get matched\n BOOST_CHECK( !db.find( call_eur2_id ) );\n // sell_low2 and call3 should get matched: fixed #453\n BOOST_CHECK( !db.find( sell_eur_low2 ) );\n // sell_med and call3 should get matched\n BOOST_CHECK( !db.find( sell_eur_med ) );\n // call3 now is not at margin call state, so sell_med2 won't get matched\n BOOST_CHECK_EQUAL( db.find( sell_eur_med2 )->for_sale.value, 7 );\n // sell_high should still be there, didn't match anything\n BOOST_CHECK_EQUAL( db.find( sell_eur_high )->for_sale.value, 7 );\n\n // sell_low and call should get matched first\n BOOST_CHECK( !db.find( sell_cny_low ) );\n BOOST_CHECK( !db.find( call_cny_id ) );\n // sell_low2 and call2 should get matched\n BOOST_CHECK( !db.find( call_cny2_id ) );\n // sell_low2 and call3 should get matched: fixed #453\n BOOST_CHECK( !db.find( sell_cny_low2 ) );\n // sell_med and call3 should get matched\n BOOST_CHECK( !db.find( sell_cny_med ) );\n // call3 now is not at margin call state, so sell_med2 won't get matched\n BOOST_CHECK_EQUAL( db.find( sell_cny_med2 )->for_sale.value, 7 );\n // sell_high should still be there, didn't match anything\n BOOST_CHECK_EQUAL( db.find( sell_cny_high )->for_sale.value, 7 );\n\n // all match price would be limit order price\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, eur_id) );\n BOOST_CHECK_EQUAL( 3000-1000-1007-7-700-7, get_balance(seller_id, cny_id) );\n BOOST_CHECK_EQUAL( (7000+8056+6400)*3, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 1000-7-700, call_usd3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 16000-56-6400, call_usd3_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000-7-700, call_eur3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 16000-56-6400, call_eur3_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000-7-700, call_cny3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 16000-56-6400, call_cny3_id(db).collateral.value );\n // call3's call_price should be updated: 9544/293/1.75 = 9544*4 / 293*7 = 38176/2051 CORE/USD\n BOOST_CHECK( price(asset(38176),asset(2051,usd_id)) == call_usd3_id(db).call_price );\n BOOST_CHECK( price(asset(38176),asset(2051,eur_id)) == call_eur3_id(db).call_price );\n BOOST_CHECK( price(asset(38176),asset(2051,cny_id)) == call_cny3_id(db).call_price );\n\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hard_fork_338_cross_test)\n{ try { // create orders before hard fork, which will be matched on hard fork\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_338_TIME - mi); // assume all hard forks occur at same time\n generate_block();\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n transfer(committee_account, borrower4_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call3_id = call3.id;\n // create yet another position with 400% collateral, call price is 20/1.75 CORE/USD = 80/7\n const call_order_object& call4 = *borrow( borrower4, bitusd.amount(1000), asset(20000));\n call_order_id_type call4_id = call4.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This order below the call price will not be matched before hard fork: 1/8 #606\n limit_order_id_type sell_low = create_sell_order(seller, bitusd.amount(1000), core.amount(7000))->id;\n // This is a big order, price below the call price will not be matched before hard fork: 1007/9056 = 1/8 #606\n limit_order_id_type sell_low2 = create_sell_order(seller, bitusd.amount(1007), core.amount(8056))->id;\n // This would match but is blocked by sell_low?! #606\n limit_order_id_type sell_med = create_sell_order(seller, bitusd.amount(7), core.amount(64))->id;\n\n // adjust price feed to get call_order into black swan territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(16);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/16, mssp = 10/176\n\n // due to sell_low, black swan won't occur\n BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() );\n\n BOOST_CHECK_EQUAL( 3000-1000-1007-7, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // generate a block to include operations above\n generate_block();\n // go over the hard fork, make sure feed doesn't expire\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n // sell_low and call should get matched first\n BOOST_CHECK( !db.find( sell_low ) );\n BOOST_CHECK( !db.find( call_id ) );\n // sell_low2 and call2 should get matched\n BOOST_CHECK( !db.find( call2_id ) );\n // sell_low2 and call3 should get matched: fixed #453\n BOOST_CHECK( !db.find( sell_low2 ) );\n // sell_med and call3 should get matched\n BOOST_CHECK( !db.find( sell_med ) );\n\n // at this moment,\n // collateralization of call3 is (16000-56-64) / (1000-7-7) = 15880/986 = 16.1, it's > 16 but < 17.6\n // although there is no sell order, it should trigger a black swan event right away,\n // because after hard fork new limit order won't trigger black swan event\n BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() );\n BOOST_CHECK( !db.find( call3_id ) );\n BOOST_CHECK( !db.find( call4_id ) );\n\n // since 16.1 > 16, global settlement should at feed price 16/1\n // so settlement fund should be 986*16 + 1000*16\n BOOST_CHECK_EQUAL( 1986*16, usd_id(db).bitasset_data(db).settlement_fund.value );\n // global settlement price should be 16/1, since no rounding here\n BOOST_CHECK( price(asset(1,usd_id),asset(16) ) == usd_id(db).bitasset_data(db).settlement_price );\n\n BOOST_CHECK_EQUAL( 3000-1000-1007-7, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 7000+8056+64, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower3_id, usd_id) );\n BOOST_CHECK_EQUAL( init_balance-16000+15880-986*16, get_balance(borrower3_id, core_id) );\n BOOST_CHECK_EQUAL( 1000, get_balance(borrower4_id, usd_id) );\n BOOST_CHECK_EQUAL( init_balance-1000*16, get_balance(borrower4_id, core_id) );\n\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hard_fork_649_cross_test)\n{ try { // create orders before hard fork, which will be matched on hard fork\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time\n generate_block();\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n transfer(committee_account, borrower4_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 320% collateral, call price is 16/1.75 CORE/USD = 64/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(16000));\n call_order_id_type call3_id = call3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 16000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This would match with call at price 707/6464\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(707), core.amount(6464)) );\n BOOST_CHECK_EQUAL( 3000-707, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 6464, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 293, call.debt.value );\n BOOST_CHECK_EQUAL( 8536, call.collateral.value );\n\n // at this moment,\n // collateralization of call is 8536 / 293 = 29.1\n // collateralization of call2 is 15500 / 1000 = 15.5\n // collateralization of call3 is 16000 / 1000 = 16\n\n generate_block();\n set_expiration( db, trx );\n update_feed_producers( usd_id(db), {feedproducer_id} );\n\n // adjust price feed to get call_order into black swan territory\n current_feed.settlement_price = price(asset(1,usd_id) / asset(20));\n publish_feed( usd_id(db), feedproducer_id(db), current_feed );\n // settlement price = 1/20, mssp = 1/22\n\n // due to #649, black swan won't occur\n BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() );\n\n // generate a block to include operations above\n generate_block();\n BOOST_CHECK( !usd_id(db).bitasset_data(db).has_settlement() );\n // go over the hard fork, make sure feed doesn't expire\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n // a black swan event should occur\n BOOST_CHECK( usd_id(db).bitasset_data(db).has_settlement() );\n BOOST_CHECK( !db.find( call_id ) );\n BOOST_CHECK( !db.find( call2_id ) );\n BOOST_CHECK( !db.find( call3_id ) );\n\n // since least collateral ratio 15.5 < 20, global settlement should execute at price = least collateral ratio 15.5/1\n // so settlement fund should be 15500 + 15500 + round_up(15.5 * 293)\n BOOST_CHECK_EQUAL( 15500*2 + (293 * 155 + 9) / 10, usd_id(db).bitasset_data(db).settlement_fund.value );\n // global settlement price should be settlement_fund/(2000+293), but not 15.5/1 due to rounding\n BOOST_CHECK( price(asset(2293,usd_id),asset(15500*2+(293*155+9)/10) ) == usd_id(db).bitasset_data(db).settlement_price );\n\n BOOST_CHECK_EQUAL( 3000-707, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 6464, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower_id, usd_id) );\n BOOST_CHECK_EQUAL( init_balance-6464-(293*155+9)/10, get_balance(borrower_id, core_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2_id, usd_id) );\n BOOST_CHECK_EQUAL( init_balance-15500, get_balance(borrower2_id, core_id) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower3_id, usd_id) );\n BOOST_CHECK_EQUAL( init_balance-15500, get_balance(borrower3_id, core_id) );\n\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_CASE(hard_fork_343_cross_test)\n{ try { // create orders before hard fork, which will be matched on hard fork\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_343_TIME - mi); // assume all hard forks occur at same time\n generate_block();\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(borrower4)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n asset_id_type usd_id = bitusd.id;\n asset_id_type core_id = core.id;\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n transfer(committee_account, borrower4_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000));\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500));\n call_order_id_type call2_id = call2.id;\n // create yet another position with 350% collateral, call price is 17.5/1.75 CORE/USD = 77/7\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(17500));\n call_order_id_type call3_id = call3.id;\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 17500, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // adjust price feed to get call_order into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This would match with call at price 700/6400\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700), core.amount(6400)) );\n BOOST_CHECK_EQUAL( 3000-700, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 6400, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 300, call.debt.value );\n BOOST_CHECK_EQUAL( 8600, call.collateral.value );\n\n // at this moment,\n // collateralization of call is 8600 / 300 = 28.67\n // collateralization of call2 is 15500 / 1000 = 15.5\n // collateralization of call3 is 17500 / 1000 = 17.5\n\n // generate a block to include operations above\n generate_block();\n // go over the hard fork, make sure feed doesn't expire\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n // This will match with call2 at price 7/77 (#343 fixed)\n BOOST_CHECK( !create_sell_order(seller_id(db), asset(7*50,usd_id), asset(65*50)) );\n BOOST_CHECK_EQUAL( 3000-700-7*50, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 6400+77*50, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 300, call_id(db).debt.value );\n BOOST_CHECK_EQUAL( 8600, call_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000-7*50, call2_id(db).debt.value );\n BOOST_CHECK_EQUAL( 15500-77*50, call2_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 17500, call3_id(db).collateral.value );\n\n // at this moment,\n // collateralization of call is 8600 / 300 = 28.67\n // collateralization of call2 is 11650 / 650 = 17.9\n // collateralization of call3 is 17500 / 1000 = 17.5\n\n // This will match with call3 at price 7/77 (#343 fixed)\n BOOST_CHECK( !create_sell_order(seller_id(db), asset(7,usd_id), asset(65)) );\n BOOST_CHECK_EQUAL( 3000-700-7*50-7, get_balance(seller_id, usd_id) );\n BOOST_CHECK_EQUAL( 6400+77*50+77, get_balance(seller_id, core_id) );\n BOOST_CHECK_EQUAL( 300, call_id(db).debt.value );\n BOOST_CHECK_EQUAL( 8600, call_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000-7*50, call2_id(db).debt.value );\n BOOST_CHECK_EQUAL( 15500-77*50, call2_id(db).collateral.value );\n BOOST_CHECK_EQUAL( 1000-7, call3_id(db).debt.value );\n BOOST_CHECK_EQUAL( 17500-77, call3_id(db).collateral.value );\n\n // at this moment,\n // collateralization of call is 8600 / 300 = 28.67\n // collateralization of call2 is 11650 / 650 = 17.9\n // collateralization of call3 is 17423 / 993 = 17.55\n\n // no more margin call now\n BOOST_CHECK( create_sell_order(seller_id(db), asset(7,usd_id), asset(65)) );\n\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\n/***\n * BSIP38 \"target_collateral_ratio\" test: matching a taker limit order with multiple maker call orders\n */\nBOOST_AUTO_TEST_CASE(target_cr_test_limit_call)\n{ try {\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_834_TIME - mi);\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(buyer2)(buyer3)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, buyer2_id, asset(init_balance));\n transfer(committee_account, buyer3_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175%\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700);\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175%\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000);\n call_order_id_type call2_id = call2.id;\n // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000));\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) );\n\n // adjust price feed to get call and call2 (but not call3) into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // This sell order above MSSP will not be matched with a call\n limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 );\n\n BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // This buy order is too low will not be matched with a sell order\n limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id;\n // This buy order at MSSP will be matched only if no margin call (margin call takes precedence)\n limit_order_id_type buy_med = create_sell_order(buyer2, asset(33000), bitusd.amount(3000))->id;\n // This buy order above MSSP will be matched with a sell order (limit order with better price takes precedence)\n limit_order_id_type buy_high = create_sell_order(buyer3, asset(111), bitusd.amount(10))->id;\n\n BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(buyer2, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(buyer3, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) );\n BOOST_CHECK_EQUAL( init_balance - 33000, get_balance(buyer2, core) );\n BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) );\n\n // call and call2's CR is quite high, and debt amount is quite a lot, assume neither of them will be completely filled\n price match_price( bitusd.amount(1) / core.amount(11) );\n share_type call_to_cover = call_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750);\n share_type call2_to_cover = call2_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750);\n BOOST_CHECK_LT( call_to_cover.value, call_id(db).debt.value );\n BOOST_CHECK_LT( call2_to_cover.value, call2_id(db).debt.value );\n // even though call2 has a higher CR, since call's TCR is less than call2's TCR, so we expect call will cover less when called\n BOOST_CHECK_LT( call_to_cover.value, call2_to_cover.value );\n\n // Create a big sell order slightly below the call price, will be matched with several orders\n BOOST_CHECK( !create_sell_order(seller, bitusd.amount(700*4), core.amount(5900*4) ) );\n\n // firstly it will match with buy_high, at buy_high's price\n BOOST_CHECK( !db.find( buy_high ) );\n // buy_high pays 111 CORE, receives 10 USD goes to buyer3's balance\n BOOST_CHECK_EQUAL( 10, get_balance(buyer3, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 111, get_balance(buyer3, core) );\n\n // then it will match with call, at mssp: 1/11 = 1000/11000\n const call_order_object* tmp_call = db.find( call_id );\n BOOST_CHECK( tmp_call != nullptr );\n\n // call will receive call_to_cover, pay 11*call_to_cover\n share_type call_to_pay = call_to_cover * 11;\n BOOST_CHECK_EQUAL( 1000 - call_to_cover.value, call.debt.value );\n BOOST_CHECK_EQUAL( 15000 - call_to_pay.value, call.collateral.value );\n // new collateral ratio should be higher than mcr as well as tcr\n BOOST_CHECK( call.debt.value * 10 * 1750 < call.collateral.value * 1000 );\n idump( (call) );\n // borrower's balance doesn't change\n BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n\n // the limit order then will match with call2, at mssp: 1/11 = 1000/11000\n const call_order_object* tmp_call2 = db.find( call2_id );\n BOOST_CHECK( tmp_call2 != nullptr );\n\n // call2 will receive call2_to_cover, pay 11*call2_to_cover\n share_type call2_to_pay = call2_to_cover * 11;\n BOOST_CHECK_EQUAL( 1000 - call2_to_cover.value, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500 - call2_to_pay.value, call2.collateral.value );\n // new collateral ratio should be higher than mcr as well as tcr\n BOOST_CHECK( call2.debt.value * 10 * 2000 < call2.collateral.value * 1000 );\n idump( (call2) );\n // borrower2's balance doesn't change\n BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n\n // then it will match with buy_med, at buy_med's price. Since buy_med is too big, it's partially filled.\n // buy_med receives the remaining USD of sell order, minus market fees, goes to buyer2's balance\n share_type buy_med_get = 700*4 - 10 - call_to_cover - call2_to_cover;\n share_type buy_med_pay = buy_med_get * 11; // buy_med pays at 1/11\n buy_med_get -= (buy_med_get/100); // minus 1% market fee\n BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 33000, get_balance(buyer2, core) );\n BOOST_CHECK_EQUAL( db.find( buy_med )->for_sale.value, 33000-buy_med_pay.value );\n\n // call3 is not in margin call territory so won't be matched\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n\n // buy_low's price is too low that won't be matched\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 );\n\n // check seller balance\n BOOST_CHECK_EQUAL( 193, get_balance(seller, bitusd) ); // 3000 - 7 - 700*4\n BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) ); // 111 + (700*4-10)*11\n\n // Cancel buy_med\n cancel_limit_order( buy_med(db) );\n BOOST_CHECK( !db.find( buy_med ) );\n BOOST_CHECK_EQUAL( buy_med_get.value, get_balance(buyer2, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - buy_med_pay.value, get_balance(buyer2, core) );\n\n // Create another sell order slightly below the call price, won't fill\n limit_order_id_type sell_med = create_sell_order( seller, bitusd.amount(7), core.amount(59) )->id;\n BOOST_CHECK_EQUAL( db.find( sell_med )->for_sale.value, 7 );\n // check seller balance\n BOOST_CHECK_EQUAL( 193-7, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 30801, get_balance(seller, core) );\n\n // call3 is not in margin call territory so won't be matched\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n\n // buy_low's price is too low that won't be matched\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 );\n\n // generate a block\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\n/***\n * BSIP38 \"target_collateral_ratio\" test: matching a maker limit order with multiple taker call orders\n */\nBOOST_AUTO_TEST_CASE(target_cr_test_call_limit)\n{ try {\n auto mi = db.get_global_properties().parameters.maintenance_interval;\n generate_blocks(HARDFORK_CORE_834_TIME - mi);\n generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);\n\n set_expiration( db, trx );\n\n ACTORS((buyer)(seller)(borrower)(borrower2)(borrower3)(feedproducer));\n\n const auto& bitusd = create_bitasset(\"USDBIT\", feedproducer_id);\n const auto& core = asset_id_type()(db);\n\n int64_t init_balance(1000000);\n\n transfer(committee_account, buyer_id, asset(init_balance));\n transfer(committee_account, borrower_id, asset(init_balance));\n transfer(committee_account, borrower2_id, asset(init_balance));\n transfer(committee_account, borrower3_id, asset(init_balance));\n update_feed_producers( bitusd, {feedproducer.id} );\n\n price_feed current_feed;\n current_feed.maintenance_collateral_ratio = 1750;\n current_feed.maximum_short_squeeze_ratio = 1100;\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(5);\n publish_feed( bitusd, feedproducer, current_feed );\n // start out with 300% collateral, call price is 15/1.75 CORE/USD = 60/7, tcr 170% is lower than 175%\n const call_order_object& call = *borrow( borrower, bitusd.amount(1000), asset(15000), 1700);\n call_order_id_type call_id = call.id;\n // create another position with 310% collateral, call price is 15.5/1.75 CORE/USD = 62/7, tcr 200% is higher than 175%\n const call_order_object& call2 = *borrow( borrower2, bitusd.amount(1000), asset(15500), 2000);\n call_order_id_type call2_id = call2.id;\n // create yet another position with 500% collateral, call price is 25/1.75 CORE/USD = 100/7, no tcr\n const call_order_object& call3 = *borrow( borrower3, bitusd.amount(1000), asset(25000));\n transfer(borrower, seller, bitusd.amount(1000));\n transfer(borrower2, seller, bitusd.amount(1000));\n transfer(borrower3, seller, bitusd.amount(1000));\n\n BOOST_CHECK_EQUAL( 1000, call.debt.value );\n BOOST_CHECK_EQUAL( 15000, call.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500, call2.collateral.value );\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n BOOST_CHECK_EQUAL( 3000, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( init_balance - 25000, get_balance(borrower3, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower3, bitusd) );\n\n // This sell order above MSSP will not be matched with a call\n limit_order_id_type sell_high = create_sell_order(seller, bitusd.amount(7), core.amount(78))->id;\n BOOST_CHECK_EQUAL( db.find( sell_high )->for_sale.value, 7 );\n\n BOOST_CHECK_EQUAL( 2993, get_balance(seller, bitusd) );\n BOOST_CHECK_EQUAL( 0, get_balance(seller, core) );\n\n // This buy order is too low will not be matched with a sell order\n limit_order_id_type buy_low = create_sell_order(buyer, asset(80), bitusd.amount(10))->id;\n\n BOOST_CHECK_EQUAL( 0, get_balance(buyer, bitusd) );\n BOOST_CHECK_EQUAL( init_balance - 80, get_balance(buyer, core) );\n\n // Create a sell order which will be matched with several call orders later, price 1/9\n limit_order_id_type sell_id = create_sell_order(seller, bitusd.amount(500), core.amount(4500) )->id;\n BOOST_CHECK_EQUAL( db.find( sell_id )->for_sale.value, 500 );\n\n // prepare price feed to get call and call2 (but not call3) into margin call territory\n current_feed.settlement_price = bitusd.amount( 1 ) / core.amount(10);\n\n // call and call2's CR is quite high, and debt amount is quite a lot, assume neither of them will be completely filled\n price match_price = sell_id(db).sell_price;\n share_type call_to_cover = call_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750);\n share_type call2_to_cover = call2_id(db).get_max_debt_to_cover(match_price,current_feed.settlement_price,1750);\n BOOST_CHECK_LT( call_to_cover.value, call_id(db).debt.value );\n BOOST_CHECK_LT( call2_to_cover.value, call2_id(db).debt.value );\n // even though call2 has a higher CR, since call's TCR is less than call2's TCR, so we expect call will cover less when called\n BOOST_CHECK_LT( call_to_cover.value, call2_to_cover.value );\n\n // adjust price feed to get call and call2 (but not call3) into margin call territory\n publish_feed( bitusd, feedproducer, current_feed );\n // settlement price = 1/10, mssp = 1/11\n\n // firstly the limit order will match with call, at limit order's price: 1/9\n const call_order_object* tmp_call = db.find( call_id );\n BOOST_CHECK( tmp_call != nullptr );\n\n // call will receive call_to_cover, pay 9*call_to_cover\n share_type call_to_pay = call_to_cover * 9;\n BOOST_CHECK_EQUAL( 1000 - call_to_cover.value, call.debt.value );\n BOOST_CHECK_EQUAL( 15000 - call_to_pay.value, call.collateral.value );\n // new collateral ratio should be higher than mcr as well as tcr\n BOOST_CHECK( call.debt.value * 10 * 1750 < call.collateral.value * 1000 );\n idump( (call) );\n // borrower's balance doesn't change\n BOOST_CHECK_EQUAL( init_balance - 15000, get_balance(borrower, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower, bitusd) );\n\n // the limit order then will match with call2, at limit order's price: 1/9\n const call_order_object* tmp_call2 = db.find( call2_id );\n BOOST_CHECK( tmp_call2 != nullptr );\n\n // if the limit is big enough, call2 will receive call2_to_cover, pay 11*call2_to_cover\n // however it's not the case, so call2 will receive less\n call2_to_cover = 500 - call_to_cover;\n share_type call2_to_pay = call2_to_cover * 9;\n BOOST_CHECK_EQUAL( 1000 - call2_to_cover.value, call2.debt.value );\n BOOST_CHECK_EQUAL( 15500 - call2_to_pay.value, call2.collateral.value );\n idump( (call2) );\n // borrower2's balance doesn't change\n BOOST_CHECK_EQUAL( init_balance - 15500, get_balance(borrower2, core) );\n BOOST_CHECK_EQUAL( 0, get_balance(borrower2, bitusd) );\n\n // call3 is not in margin call territory so won't be matched\n BOOST_CHECK_EQUAL( 1000, call3.debt.value );\n BOOST_CHECK_EQUAL( 25000, call3.collateral.value );\n\n // sell_id is completely filled\n BOOST_CHECK( !db.find( sell_id ) );\n\n // check seller balance\n BOOST_CHECK_EQUAL( 2493, get_balance(seller, bitusd) ); // 3000 - 7 - 500\n BOOST_CHECK_EQUAL( 4500, get_balance(seller, core) ); // 500*9\n\n // buy_low's price is too low that won't be matched\n BOOST_CHECK_EQUAL( db.find( buy_low )->for_sale.value, 80 );\n\n // generate a block\n generate_block();\n\n} FC_LOG_AND_RETHROW() }\n\nBOOST_AUTO_TEST_SUITE_END()\n", "meta": {"hexsha": "d5021880c4edb8e6d2222ebb2c0f51aa59f90952", "size": 77688, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/tests/market_tests.cpp", "max_stars_repo_name": "enumivo/eidos-core", "max_stars_repo_head_hexsha": "f614d87a8424253e9c677ef3818b495f5a5e755a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/tests/market_tests.cpp", "max_issues_repo_name": "enumivo/eidos-core", "max_issues_repo_head_hexsha": "f614d87a8424253e9c677ef3818b495f5a5e755a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tests/market_tests.cpp", "max_forks_repo_name": "enumivo/eidos-core", "max_forks_repo_head_hexsha": "f614d87a8424253e9c677ef3818b495f5a5e755a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.1395973154, "max_line_length": 129, "alphanum_fraction": 0.7387369993, "num_tokens": 22155, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.39233683016710835, "lm_q1q2_score": 0.19923329715255292}} {"text": "// Distributed under the MIT License.\n// See LICENSE.txt for details.\n\n#include \"Framework/TestingFramework.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"DataStructures/Tensor/Tensor.hpp\"\n#include \"Domain/BoundaryConditions/BoundaryCondition.hpp\"\n#include \"Domain/CoordinateMaps/Affine.hpp\"\n#include \"Domain/CoordinateMaps/CoordinateMap.hpp\"\n#include \"Domain/CoordinateMaps/CoordinateMap.tpp\"\n#include \"Domain/CoordinateMaps/ProductMaps.hpp\"\n#include \"Domain/CoordinateMaps/ProductMaps.tpp\"\n#include \"Domain/Creators/AlignedLattice.hpp\"\n#include \"Domain/Creators/DomainCreator.hpp\"\n#include \"Domain/Domain.hpp\"\n#include \"Domain/OptionTags.hpp\"\n#include \"Framework/TestCreation.hpp\"\n#include \"Framework/TestHelpers.hpp\"\n#include \"Helpers/Domain/BoundaryConditions/BoundaryCondition.hpp\"\n#include \"Helpers/Domain/DomainTestHelpers.hpp\"\n#include \"Parallel/RegisterDerivedClassesWithCharm.hpp\"\n\nnamespace Frame {\nstruct Inertial;\n} // namespace Frame\n\nnamespace domain {\nnamespace {\ntemplate \nstd::unique_ptr\ncreate_boundary_condition() {\n return std::make_unique>(\n Direction::upper_xi(), 100);\n}\n\ntemplate \nvoid test_aligned_blocks(\n const creators::AlignedLattice& aligned_lattice,\n const std::unique_ptr&\n expected_boundary_condition) {\n Parallel::register_classes_with_charm(\n typename creators::AlignedLattice::maps_list{});\n\n const auto test_impl = [&expected_boundary_condition,\n &aligned_lattice](const auto& domain) {\n test_initial_domain(domain, aligned_lattice.initial_refinement_levels());\n\n if (expected_boundary_condition != nullptr) {\n const auto& blocks = domain.blocks();\n for (size_t block_id = 0; block_id < blocks.size(); ++block_id) {\n CAPTURE(block_id);\n const auto& block = domain.blocks()[block_id];\n REQUIRE(block.external_boundaries().size() ==\n block.external_boundary_conditions().size());\n for (const auto& direction : block.external_boundaries()) {\n CAPTURE(direction);\n REQUIRE(block.external_boundary_conditions().count(direction) == 1);\n REQUIRE(block.external_boundary_conditions().at(direction) !=\n nullptr);\n const auto& bc =\n dynamic_cast&>(\n *block.external_boundary_conditions().at(direction));\n const auto& expected_bc =\n dynamic_cast&>(\n *expected_boundary_condition);\n CHECK(bc.direction() == expected_bc.direction());\n CHECK(bc.block_id() == expected_bc.block_id());\n }\n }\n }\n };\n\n test_impl(aligned_lattice.create_domain());\n test_impl(serialize_and_deserialize(aligned_lattice.create_domain()));\n}\n\ntemplate \nauto make_domain_creator(const std::string& opt_string,\n const bool use_boundary_condition) {\n if (use_boundary_condition) {\n return TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithBoundaryConditions<\n VolumeDim, domain::creators::AlignedLattice>>(\n opt_string + std::string{\" BoundaryCondition:\\n\"\n \" TestBoundaryCondition:\\n\"\n \" Direction: upper-xi\\n\"\n \" BlockId: 100\\n\"});\n } else {\n return TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithoutBoundaryConditions<\n VolumeDim, domain::creators::AlignedLattice>>(\n opt_string);\n }\n}\n} // namespace\n\nSPECTRE_TEST_CASE(\"Unit.Domain.Creators.AlignedLattice\", \"[Domain][Unit]\") {\n TestHelpers::domain::BoundaryConditions::register_derived_with_charm();\n\n for (const bool use_boundary_condition : {true, false}) {\n const std::unique_ptr\n expected_boundary_condition_1d =\n use_boundary_condition ? create_boundary_condition<1>() : nullptr;\n const std::unique_ptr\n expected_boundary_condition_2d =\n use_boundary_condition ? create_boundary_condition<2>() : nullptr;\n const std::unique_ptr\n expected_boundary_condition_3d =\n use_boundary_condition ? create_boundary_condition<3>() : nullptr;\n\n const auto domain_creator_1d = make_domain_creator<1>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1, 5.2, 7.2]]\\n\" +\n std::string{use_boundary_condition ? \"\"\n : \" IsPeriodicIn: [false]\\n\"} +\n \" InitialGridPoints: [3]\\n\"\n \" InitialLevels: [2]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: []\\n\",\n use_boundary_condition);\n const auto* aligned_blocks_creator_1d =\n dynamic_cast*>(\n domain_creator_1d.get());\n test_aligned_blocks(*aligned_blocks_creator_1d,\n expected_boundary_condition_1d);\n\n const auto domain_creator_2d = make_domain_creator<2>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1], [-0.4, 3.2, 6.2, 8.9]]\\n\" +\n std::string{use_boundary_condition\n ? \"\"\n : \" IsPeriodicIn: [false, false]\\n\"} +\n \" InitialGridPoints: [3, 4]\\n\"\n \" InitialLevels: [2, 1]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: []\\n\",\n use_boundary_condition);\n const auto* aligned_blocks_creator_2d =\n dynamic_cast*>(\n domain_creator_2d.get());\n test_aligned_blocks(*aligned_blocks_creator_2d,\n expected_boundary_condition_2d);\n\n const auto domain_creator_3d = make_domain_creator<3>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1], [-0.4, 3.2, 6.2], [-0.2, 3.2]]\\n\" +\n std::string{use_boundary_condition\n ? \"\"\n : \" IsPeriodicIn: [false, false, false]\\n\"} +\n \" InitialGridPoints: [3, 4, 5]\\n\"\n \" InitialLevels: [2, 1, 0]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: []\\n\",\n use_boundary_condition);\n const auto* aligned_blocks_creator_3d =\n dynamic_cast*>(\n domain_creator_3d.get());\n test_aligned_blocks(*aligned_blocks_creator_3d,\n expected_boundary_condition_3d);\n\n const auto cubical_shell_domain = make_domain_creator<3>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1, 6.0], [-0.4, 3.2, 6.2, 7.0], \"\n \"[-0.2, 3.2, 4.0, 5.2]]\\n\" +\n std::string{use_boundary_condition\n ? \"\"\n : \" IsPeriodicIn: [false, false, false]\\n\"} +\n \" InitialGridPoints: [3, 4, 5]\\n\"\n \" InitialLevels: [2, 1, 0]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: [[1, 1, 1]]\\n\",\n use_boundary_condition);\n const auto* cubical_shell_creator_3d =\n dynamic_cast*>(\n cubical_shell_domain.get());\n test_aligned_blocks(*cubical_shell_creator_3d,\n expected_boundary_condition_3d);\n\n const auto unit_cubical_shell_domain = make_domain_creator<3>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[-1.5, -0.5, 0.5, 1.5], [-1.5, -0.5, 0.5, 1.5], \"\n \"[-1.5, -0.5, 0.5, 1.5]]\\n\" +\n std::string{use_boundary_condition\n ? \"\"\n : \" IsPeriodicIn: [false, false, false]\\n\"} +\n \" InitialGridPoints: [5, 5, 5]\\n\"\n \" InitialLevels: [1, 1, 1]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: [[1, 1, 1]]\\n\",\n use_boundary_condition);\n const auto* unit_cubical_shell_creator_3d =\n dynamic_cast*>(\n unit_cubical_shell_domain.get());\n test_aligned_blocks(*unit_cubical_shell_creator_3d,\n expected_boundary_condition_3d);\n }\n\n const auto domain_creator_2d_periodic = TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator<2>,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithoutBoundaryConditions<\n 2, domain::creators::AlignedLattice<2>>>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1], [-0.4, 3.2, 6.2, 8.9]]\\n\"\n \" IsPeriodicIn: [false, true]\\n\"\n \" InitialGridPoints: [3, 4]\\n\"\n \" InitialLevels: [2, 1]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: []\\n\");\n const auto* aligned_blocks_creator_2d_periodic =\n dynamic_cast*>(\n domain_creator_2d_periodic.get());\n test_aligned_blocks(*aligned_blocks_creator_2d_periodic, nullptr);\n\n const auto domain_creator_3d_periodic = TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator<3>,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithoutBoundaryConditions<\n 3, domain::creators::AlignedLattice<3>>>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[0.1, 2.6, 5.1], [-0.4, 3.2, 6.2], [-0.2, 3.2]]\\n\"\n \" IsPeriodicIn: [false, true, false]\\n\"\n \" InitialGridPoints: [3, 4, 5]\\n\"\n \" InitialLevels: [2, 1, 0]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints: []\\n\"\n \" BlocksToExclude: []\\n\");\n const auto* aligned_blocks_creator_3d_periodic =\n dynamic_cast*>(\n domain_creator_3d_periodic.get());\n test_aligned_blocks(*aligned_blocks_creator_3d_periodic, nullptr);\n\n {\n // Expected domain refinement:\n // 23 23 67\n // 23 45 67\n // 23 XX 45\n const auto refined_domain = TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator<2>,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithoutBoundaryConditions<\n 2, domain::creators::AlignedLattice<2>>>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[70, 71, 72, 73], [90, 91, 92, 93]]\\n\"\n \" IsPeriodicIn: [false, false]\\n\"\n \" InitialGridPoints: [2, 3]\\n\"\n \" InitialLevels: [0, 0]\\n\"\n \" BlocksToExclude: [[1, 0]]\\n\"\n \" RefinedLevels: []\\n\"\n \" RefinedGridPoints:\\n\"\n \" - LowerCornerIndex: [1, 0]\\n\"\n \" UpperCornerIndex: [3, 2]\\n\"\n \" Refinement: [4, 5]\\n\"\n \" - LowerCornerIndex: [2, 1]\\n\"\n \" UpperCornerIndex: [3, 3]\\n\"\n \" Refinement: [6, 7]\");\n std::unordered_set<\n std::pair, std::array>,\n boost::hash, std::array>>>\n expected_blocks{{{70.0, 90.0}, {{2, 3}}}, {{72.0, 90.0}, {{4, 5}}},\n {{70.0, 91.0}, {{2, 3}}}, {{71.0, 91.0}, {{4, 5}}},\n {{72.0, 91.0}, {{6, 7}}}, {{70.0, 92.0}, {{2, 3}}},\n {{71.0, 92.0}, {{2, 3}}}, {{72.0, 92.0}, {{6, 7}}}};\n const auto domain = refined_domain->create_domain();\n test_initial_domain(domain, refined_domain->initial_refinement_levels());\n\n const auto& blocks = domain.blocks();\n const auto extents = refined_domain->initial_extents();\n REQUIRE(blocks.size() == extents.size());\n for (size_t i = 0; i < blocks.size(); ++i) {\n const auto location =\n blocks[i]\n .stationary_map()(\n tnsr::I{{{-1.0, -1.0}}})\n .get_vector_of_data()\n .second;\n INFO(\"Unexpected block\");\n CAPTURE(location);\n CAPTURE(extents[i]);\n CHECK(expected_blocks.erase({location, extents[i]}) == 1);\n }\n CAPTURE(expected_blocks);\n CHECK(expected_blocks.empty());\n }\n\n {\n // Expected domain refinement:\n // 25 25 46\n // 25 35 46\n // 25 XX 35\n const auto refined_domain = TestHelpers::test_option_tag<\n domain::OptionTags::DomainCreator<2>,\n TestHelpers::domain::BoundaryConditions::\n MetavariablesWithoutBoundaryConditions<\n 2, domain::creators::AlignedLattice<2>>>(\n \"AlignedLattice:\\n\"\n \" BlockBounds: [[70, 71, 72, 73], [90, 91, 92, 93]]\\n\"\n \" IsPeriodicIn: [false, false]\\n\"\n \" InitialGridPoints: [10, 10]\\n\"\n \" InitialLevels: [2, 5]\\n\"\n \" BlocksToExclude: [[1, 0]]\\n\"\n \" RefinedGridPoints: []\\n\"\n \" RefinedLevels:\\n\"\n \" - LowerCornerIndex: [1, 0]\\n\"\n \" UpperCornerIndex: [3, 2]\\n\"\n \" Refinement: [3, 5]\\n\"\n \" - LowerCornerIndex: [2, 1]\\n\"\n \" UpperCornerIndex: [3, 3]\\n\"\n \" Refinement: [4, 6]\");\n std::unordered_set<\n std::pair, std::array>,\n boost::hash, std::array>>>\n expected_blocks{{{70.0, 90.0}, {{2, 5}}}, {{72.0, 90.0}, {{3, 5}}},\n {{70.0, 91.0}, {{2, 5}}}, {{71.0, 91.0}, {{3, 5}}},\n {{72.0, 91.0}, {{4, 6}}}, {{70.0, 92.0}, {{2, 5}}},\n {{71.0, 92.0}, {{2, 5}}}, {{72.0, 92.0}, {{4, 6}}}};\n const auto domain = refined_domain->create_domain();\n const auto refinement_levels = refined_domain->initial_refinement_levels();\n test_initial_domain(domain, refinement_levels);\n\n const auto& blocks = domain.blocks();\n REQUIRE(blocks.size() == refinement_levels.size());\n for (size_t i = 0; i < blocks.size(); ++i) {\n const auto location =\n blocks[i]\n .stationary_map()(\n tnsr::I{{{-1.0, -1.0}}})\n .get_vector_of_data()\n .second;\n INFO(\"Unexpected block\");\n CAPTURE(location);\n CAPTURE(refinement_levels[i]);\n CHECK(expected_blocks.erase({location, refinement_levels[i]}) == 1);\n }\n CAPTURE(expected_blocks);\n CHECK(expected_blocks.empty());\n }\n\n CHECK_THROWS_WITH(\n creators::AlignedLattice<3>({{{{-1.5, -0.5, 0.5, 1.5}},\n {{1.5, -0.5, 0.5, 1.5}},\n {{-1.5, -0.5, 0.5, 1.5}}}},\n {{1, 1, 1}}, {{5, 5, 5}}, {}, {},\n {{{{1, 1, 1}}}}, {{true, false, false}},\n Options::Context{false, {}, 1, 1}),\n Catch::Matchers::Contains(\n \"Cannot exclude blocks as well as have periodic boundary\"));\n CHECK_THROWS_WITH(\n creators::AlignedLattice<3>({{{{-1.5, -0.5, 0.5, 1.5}},\n {{1.5, -0.5, 0.5, 1.5}},\n {{-1.5, -0.5, 0.5, 1.5}}}},\n {{1, 1, 1}}, {{5, 5, 5}}, {}, {},\n {{{{1, 1, 1}}}}, {{false, true, false}},\n Options::Context{false, {}, 1, 1}),\n Catch::Matchers::Contains(\n \"Cannot exclude blocks as well as have periodic boundary\"));\n CHECK_THROWS_WITH(\n creators::AlignedLattice<3>({{{{-1.5, -0.5, 0.5, 1.5}},\n {{1.5, -0.5, 0.5, 1.5}},\n {{-1.5, -0.5, 0.5, 1.5}}}},\n {{1, 1, 1}}, {{5, 5, 5}}, {}, {},\n {{{{1, 1, 1}}}}, {{true, false, true}},\n Options::Context{false, {}, 1, 1}),\n Catch::Matchers::Contains(\n \"Cannot exclude blocks as well as have periodic boundary\"));\n CHECK_THROWS_WITH(\n creators::AlignedLattice<3>(\n {{{{-1.5, -0.5, 0.5, 1.5}},\n {{1.5, -0.5, 0.5, 1.5}},\n {{-1.5, -0.5, 0.5, 1.5}}}},\n {{1, 1, 1}}, {{5, 5, 5}}, {}, {}, {{{{1, 1, 1}}}},\n std::make_unique>(),\n Options::Context{false, {}, 1, 1}),\n Catch::Matchers::Contains(\n \"Cannot exclude blocks as well as have periodic boundary\"));\n CHECK_THROWS_WITH(\n creators::AlignedLattice<3>(\n {{{{-1.5, -0.5, 0.5, 1.5}},\n {{1.5, -0.5, 0.5, 1.5}},\n {{-1.5, -0.5, 0.5, 1.5}}}},\n {{1, 1, 1}}, {{5, 5, 5}}, {}, {}, {{{{1, 1, 1}}}},\n std::make_unique>(),\n Options::Context{false, {}, 1, 1}),\n Catch::Matchers::Contains(\n \"None boundary condition is not supported. If you would like an \"\n \"outflow boundary condition, you must use that.\"));\n}\n} // namespace domain\n", "meta": {"hexsha": "4b4c9cc040e4553a67e63068de5b275dce2efee2", "size": 17842, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/Unit/Domain/Creators/Test_AlignedLattice.cpp", "max_stars_repo_name": "nilsvu/spectre", "max_stars_repo_head_hexsha": "1455b9a8d7e92db8ad600c66f54795c29c3052ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 117.0, "max_stars_repo_stars_event_min_datetime": "2017-04-08T22:52:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T07:23:36.000Z", "max_issues_repo_path": "tests/Unit/Domain/Creators/Test_AlignedLattice.cpp", "max_issues_repo_name": "GitHimanshuc/spectre", "max_issues_repo_head_hexsha": "4de4033ba36547113293fe4dbdd77591485a4aee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3177.0, "max_issues_repo_issues_event_min_datetime": "2017-04-07T21:10:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:55:59.000Z", "max_forks_repo_path": "tests/Unit/Domain/Creators/Test_AlignedLattice.cpp", "max_forks_repo_name": "geoffrey4444/spectre", "max_forks_repo_head_hexsha": "9350d61830b360e2d5b273fdd176dcc841dbefb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 85.0, "max_forks_repo_forks_event_min_datetime": "2017-04-07T19:36:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T10:21:00.000Z", "avg_line_length": 43.3058252427, "max_line_length": 79, "alphanum_fraction": 0.5636699922, "num_tokens": 4867, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.39233683016710835, "lm_q1q2_score": 0.19923329715255292}} {"text": "/*\n Copyright (C) 2017 Quaternion Risk Management Ltd\n All rights reserved.\n\n This file is part of ORE, a free-software/open-source library\n for transparent pricing and risk analysis - http://opensourcerisk.org\n\n ORE is free software: you can redistribute it and/or modify it\n under the terms of the Modified BSD License. You should have received a\n copy of the license along with this program.\n The license is also available online at \n\n This program is distributed on the basis that it will form a useful\n contribution to risk analytics and model standardisation, but WITHOUT\n ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace QuantLib;\nusing namespace QuantExt;\n\nnamespace ore {\nnamespace data {\n\nvoid CreditDefaultSwapData::fromXML(XMLNode* node) {\n XMLUtils::checkNode(node, \"CreditDefaultSwapData\");\n issuerId_ = XMLUtils::getChildValue(node, \"IssuerId\");\n creditCurveId_ = XMLUtils::getChildValue(node, \"CreditCurveId\", true);\n settlesAccrual_ = XMLUtils::getChildValueAsBool(node, \"SettlesAccrual\", false); // default = Y\n paysAtDefaultTime_ = XMLUtils::getChildValueAsBool(node, \"PaysAtDefaultTime\", false); // default = Y\n XMLNode* tmp = XMLUtils::getChildNode(node, \"ProtectionStart\");\n if (tmp)\n protectionStart_ = parseDate(XMLUtils::getNodeValue(tmp)); // null date if empty or missing\n else\n protectionStart_ = Date();\n tmp = XMLUtils::getChildNode(node, \"UpfrontDate\");\n if (tmp)\n upfrontDate_ = parseDate(XMLUtils::getNodeValue(tmp)); // null date if empty or mssing\n else\n upfrontDate_ = Date();\n upfrontFee_ = parseReal(XMLUtils::getChildValue(node, \"UpfrontFee\", false)); // zero if empty or missing\n if (upfrontDate_ == Date()) {\n QL_REQUIRE(close_enough(upfrontFee_, 0.0), \"CreditDefaultSwapData::fromXML(): UpfronFee not zero (\"\n << upfrontFee_ << \"), but no upfront data given\");\n upfrontFee_ = Null();\n }\n leg_.fromXML(XMLUtils::getChildNode(node, \"LegData\"));\n}\n\nXMLNode* CreditDefaultSwapData::toXML(XMLDocument& doc) {\n XMLNode* node = doc.allocNode(\"CreditDefaultSwapData\");\n XMLUtils::addChild(doc, node, \"IssuerId\", issuerId_);\n XMLUtils::addChild(doc, node, \"CreditCurveId\", creditCurveId_);\n XMLUtils::addChild(doc, node, \"SettlesAccrual\", settlesAccrual_);\n XMLUtils::addChild(doc, node, \"PaysAtDefaultTime\", paysAtDefaultTime_);\n if (protectionStart_ != Date()) {\n std::ostringstream tmp;\n tmp << QuantLib::io::iso_date(protectionStart_);\n XMLUtils::addChild(doc, node, \"ProtectionStart\", tmp.str());\n }\n if (upfrontDate_ != Date()) {\n std::ostringstream tmp;\n tmp << QuantLib::io::iso_date(upfrontDate_);\n XMLUtils::addChild(doc, node, \"UpfrontDate\", tmp.str());\n }\n if (upfrontFee_ != Null())\n XMLUtils::addChild(doc, node, \"UpfrontFee\", upfrontFee_);\n XMLUtils::appendNode(node, leg_.toXML(doc));\n return node;\n}\n} // namespace data\n} // namespace ore\n", "meta": {"hexsha": "66f3ff6ee7f735e12803cdf817bcb7e3eb433f4e", "size": 3368, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OREData/ored/portfolio/creditdefaultswapdata.cpp", "max_stars_repo_name": "paul-giltinan/Engine", "max_stars_repo_head_hexsha": "49b6e142905ca2cce93c2ae46e9ac69380d9f7a1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OREData/ored/portfolio/creditdefaultswapdata.cpp", "max_issues_repo_name": "paul-giltinan/Engine", "max_issues_repo_head_hexsha": "49b6e142905ca2cce93c2ae46e9ac69380d9f7a1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OREData/ored/portfolio/creditdefaultswapdata.cpp", "max_forks_repo_name": "paul-giltinan/Engine", "max_forks_repo_head_hexsha": "49b6e142905ca2cce93c2ae46e9ac69380d9f7a1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6329113924, "max_line_length": 108, "alphanum_fraction": 0.700415677, "num_tokens": 827, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.39233683016710835, "lm_q1q2_score": 0.19923329715255292}} {"text": "#include \"qpainterwrapper.h\"\n#include \n#include \n#include \n\nQPainterWrapper::QPainterWrapper(Gui::CanvasRenderer *p)\n : p(p),\n mTimeStart(0.0),\n mTimeEnd(5.0),\n mFrequencyScale(FrequencyScale::Mel),\n mMinFrequency(60),\n mMaxFrequency(8000),\n mMaxGain(0)\n{\n}\n\nQRect QPainterWrapper::viewport() const\n{\n return p->viewport();\n}\n\nvoid QPainterWrapper::setZoom(double scale)\n{\n p->setZoomScale(scale);\n}\n\nvoid QPainterWrapper::setTimeRange(double start, double end)\n{\n mTimeStart = start;\n mTimeEnd = end;\n}\n\nvoid QPainterWrapper::setFrequencyScale(FrequencyScale scale)\n{\n mFrequencyScale = scale;\n}\n\nvoid QPainterWrapper::setMinFrequency(double minFrequency)\n{\n mMinFrequency = minFrequency;\n}\n\nvoid QPainterWrapper::setMaxFrequency(double maxFrequency)\n{\n mMaxFrequency = maxFrequency;\n}\n\nvoid QPainterWrapper::setMaxGain(double maxGain)\n{\n mMaxGain = maxGain;\n}\n\ndouble QPainterWrapper::transformFrequency(double frequency)\n{\n return transformFrequency(frequency, mFrequencyScale);\n}\n\ndouble QPainterWrapper::inverseFrequency(double value)\n{\n return inverseFrequency(value, mFrequencyScale);\n}\n\ndouble QPainterWrapper::mapTimeToX(double time)\n{\n return mapTimeToX(time, p->viewport().width(), mTimeStart, mTimeEnd);\n}\n\ndouble QPainterWrapper::mapFrequencyToY(double frequency)\n{\n return mapFrequencyToY(frequency, p->viewport().height(), mFrequencyScale, mMinFrequency, mMaxFrequency);\n}\n\nstatic std::string numberToString(double val)\n{\n std::stringstream ss;\n ss << val;\n return ss.str();\n}\n\nvoid QPainterWrapper::drawTimeAxis()\n{\n rpm::vector majorTicks;\n rpm::vector minorTicks;\n rpm::vector minorMinorTicks;\n\n int timeStart = std::floor(mTimeStart);\n int timeEnd = std::ceil(mTimeEnd);\n\n for (int timeInt = timeStart; timeInt <= timeEnd; ++timeInt) {\n majorTicks.push_back(timeInt);\n \n // No need to be so detailed for negative time stamps.\n if (timeInt < 0)\n continue;\n\n for (int division = 1; division <= 9; ++division) {\n const double time = timeInt + division / 10.0;\n if (division == 5)\n minorTicks.push_back(time);\n else\n minorMinorTicks.push_back(time);\n }\n }\n\n int y1 = viewport().height();\n std::vector bits(viewport().width(), false);\n \n for (const double val : majorTicks) {\n const double x = mapTimeToX(val);\n const auto valstr = numberToString(val);\n QRect rect = p->textBoundsSmall(valstr);\n rect.translate(x - rect.width() / 2, y1 - 10);\n bool covered = false;\n for (int tx = rect.x(); tx <= rect.x() + rect.width(); ++tx) {\n if (tx >= 0 && tx < bits.size()\n && bits[tx]) {\n covered = true;\n break;\n }\n }\n p->drawLine(x, y1, x, y1 - 8, Qt::white, 3);\n if (!covered && val >= 0) {\n p->drawTextSmallOutlined(x - rect.width() / 2, y1 - 10, Qt::white, valstr, Qt::black);\n for (int tx = rect.x(); tx <= rect.x() + rect.width(); ++tx) {\n if (tx >= 0 && tx < bits.size())\n bits[tx] = true;\n }\n }\n }\n\n for (const double val : minorTicks) {\n const double x = mapTimeToX(val);\n const auto valstr = numberToString(val);\n QRect rect = p->textBoundsSmaller(valstr);\n rect.translate(x - rect.width() / 2, y1 - 10);\n bool covered = false;\n for (int tx = rect.x(); tx <= rect.x() + rect.width(); ++tx) {\n if (tx >= 0 && tx < bits.size()\n && bits[tx]) {\n covered = true;\n break;\n }\n }\n p->drawLine(x, y1, x, y1 - 4, Qt::white, 2);\n if (!covered) {\n p->drawTextSmallerOutlined(x - rect.width() / 2, y1 - 10, Qt::white, valstr, Qt::black);\n for (int tx = rect.x(); tx <= rect.x() + rect.width(); ++tx) {\n if (tx >= 0 && tx < bits.size())\n bits[tx] = true;\n }\n }\n }\n \n for (const double val : minorMinorTicks) {\n const double x = mapTimeToX(val);\n p->drawLine(x, y1, x, y1 - 2, Qt::white, 1.5);\n }\n}\n\nvoid QPainterWrapper::drawFrequencyScale()\n{\n rpm::vector majorTicks;\n rpm::vector minorTicks;\n rpm::vector minorMinorTicks;\n\n if (mFrequencyScale == FrequencyScale::Linear) {\n int loFreq = std::floor(mMinFrequency / 1000) * 1000;\n int hiFreq = std::ceil(mMaxFrequency / 1000) * 1000;\n\n for (int freqInt = loFreq; freqInt <= hiFreq; freqInt += 1000) {\n majorTicks.push_back(freqInt);\n\n for (int division = 1; division <= 9; ++division) {\n const double freq = freqInt + division * 100.0;\n if (division == 5)\n minorTicks.push_back(freq);\n else\n minorMinorTicks.push_back(freq);\n }\n }\n }\n else {\n double loLog = log10(mMinFrequency);\n double hiLog = log10(mMaxFrequency);\n int loDecade = (int) floor(loLog);\n\n double val;\n double startDecade = pow(10.0, (double) loDecade);\n\n // Major ticks are the decades.\n double decade = startDecade;\n double delta = hiLog - loLog, steps = fabs(delta);\n double step = delta >= 0 ? 10 : 0.1;\n double rMin = std::min(mMinFrequency, mMaxFrequency);\n double rMax = std::max(mMinFrequency, mMaxFrequency);\n for (int i = 0; i <= steps; ++i) { \n val = decade;\n if (val >= rMin && val < rMax) {\n majorTicks.push_back(val);\n }\n decade *= step;\n }\n\n // Minor ticks are multiple of decades.\n decade = startDecade;\n float start, end, mstep;\n if (delta > 0) {\n start = 2; end = 9; mstep = 1;\n }\n else {\n start = 9; end = 2; mstep = -1;\n }\n ++steps;\n for (int i = 0; i <= steps; ++i) {\n for (int j = start; mstep > 0 ? j <= end : j >= end; j += mstep) {\n val = decade * j;\n if (val >= rMin && val < rMax) {\n minorTicks.push_back(val);\n }\n }\n decade *= step;\n }\n\n // MinorMinor ticks are multiple of decades.\n decade = startDecade;\n if (delta > 0) {\n start = 10; end = 100; mstep = 1;\n }\n else {\n start = 100; end = 10; mstep = -1;\n }\n ++steps;\n for (int i = 0; i <= steps; ++i) {\n if (decade >= 10.0) {\n for (int f = start; mstep > 0 ? f <= end : f >= end; f += mstep) {\n if ((int) (f / 10) != f / 10.0) {\n val = decade * f / 10;\n if (val >= rMin && val < rMax) {\n minorMinorTicks.push_back(val);\n }\n }\n }\n }\n decade *= step;\n }\n }\n\n int x1 = viewport().width();\n std::vector bits(viewport().height(), false);\n \n for (const double val : majorTicks) {\n const double y = mapFrequencyToY(val);\n const auto valstr = numberToString(val);\n QRect rect = p->textBoundsNormal(valstr);\n rect.translate(x1 - 12 - rect.width(), y + rect.height() / 2);\n bool covered = false;\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size()\n && bits[ty]) {\n covered = true;\n break;\n }\n }\n if (covered) {\n continue;\n }\n p->drawLine(x1 - 8, y, x1, y, Qt::white, 3);\n p->drawTextNormalOutlined(rect.x(), rect.y(), Qt::white, valstr, Qt::black);\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size())\n bits[ty] = true;\n }\n }\n\n for (const double val : minorTicks) {\n const double y = mapFrequencyToY(val);\n const auto valstr = numberToString(val);\n QRect rect = p->textBoundsSmall(valstr);\n rect.translate(x1 - 12 - rect.width(), y + rect.height() / 2);\n bool covered = false;\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size()\n && bits[ty]) {\n covered = true;\n break;\n }\n }\n if (covered) {\n continue;\n }\n p->drawLine(x1 - 6, y, x1, y, Qt::white, 2);\n p->drawTextSmallOutlined(rect.x(), rect.y(), Qt::white, valstr, Qt::black);\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size())\n bits[ty] = true;\n }\n }\n\n for (const double val : minorMinorTicks) {\n const double y = mapFrequencyToY(val);\n const auto valstr = numberToString(val);\n QRect rect = p->textBoundsSmaller(valstr);\n rect.translate(x1 - 12 - rect.width(), y + rect.height() / 2);\n bool covered = false;\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size()\n && bits[ty]) {\n covered = true;\n break;\n }\n }\n if (covered) {\n continue;\n }\n p->drawLine(x1 - 4, y, x1, y, Qt::white, 2);\n p->drawTextSmallerOutlined(rect.x(), rect.y(), Qt::white, valstr, Qt::black);\n for (int ty = rect.y(); ty <= rect.y() + rect.height(); ++ty) {\n if (ty >= 0 && ty < bits.size())\n bits[ty] = true;\n }\n }\n}\n\nvoid QPainterWrapper::drawFrequencyTrack(\n const TimeTrack::const_iterator& begin,\n const TimeTrack::const_iterator& end,\n float radius,\n const QColor &color)\n{\n rpm::vector points;\n\n for (auto it = begin; it != end; ++it) {\n double time = it->first;\n double pitch = it->second;\n\n double x = mapTimeToX(time);\n double y = mapFrequencyToY(pitch);\n\n points.emplace_back(x, y);\n }\n\n p->drawScatterWithOutline(points, radius, color);\n}\n\nvoid QPainterWrapper::drawFrequencyTrack(\n const OptionalTimeTrack::const_iterator& begin,\n const OptionalTimeTrack::const_iterator& end,\n float radius,\n const QColor &color)\n{\n rpm::vector points;\n\n for (auto it = begin; it != end; ++it) {\n if (it->second.has_value()) {\n double time = it->first;\n double pitch = *(it->second);\n\n double x = mapTimeToX(time);\n double y = mapFrequencyToY(pitch);\n\n points.emplace_back(x, y);\n }\n }\n\n p->drawScatterWithOutline(points, radius, color);\n}\n\n", "meta": {"hexsha": "a3a22c3bb7ef5e09b21227625a9b7844be837390", "size": 11140, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gui/qpainterwrapper.cpp", "max_stars_repo_name": "alargepileofash/in-formant", "max_stars_repo_head_hexsha": "3fc77925b68e349b96d7cf20c00223a4b343d04d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 55.0, "max_stars_repo_stars_event_min_datetime": "2020-10-07T20:22:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-28T10:58:36.000Z", "max_issues_repo_path": "src/gui/qpainterwrapper.cpp", "max_issues_repo_name": "alargepileofash/in-formant", "max_issues_repo_head_hexsha": "3fc77925b68e349b96d7cf20c00223a4b343d04d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16.0, "max_issues_repo_issues_event_min_datetime": "2020-12-06T22:02:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T09:37:56.000Z", "max_forks_repo_path": "src/gui/qpainterwrapper.cpp", "max_forks_repo_name": "alargepileofash/in-formant", "max_forks_repo_head_hexsha": "3fc77925b68e349b96d7cf20c00223a4b343d04d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2019-12-16T16:06:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-15T15:28:31.000Z", "avg_line_length": 30.2717391304, "max_line_length": 109, "alphanum_fraction": 0.5129263914, "num_tokens": 2862, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5078118642792044, "lm_q2_score": 0.3923368301671083, "lm_q1q2_score": 0.1992332971525529}} {"text": "/*\n * Copyright (C) 2018 Naomasa Matsubayashi\n * Licensed under MIT license, see file LICENSE in this source tree.\n */\n#ifndef UWG_CONFIG_HPP\n#define UWG_CONFIG_HPP\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace uwg {\n struct invalid_config {};\n struct invalid_key {};\n struct invalid_endpoint {};\n struct invalid_listen_port {};\n struct config_t {\n config_t( boost::asio::io_service &io_service, const std::string &filename ) : self_static_public( wg_key_len, 0 ), self_port( 0 ) {\n boost::property_tree::ptree root;\n boost::property_tree::read_ini( filename, root );\n const auto self_static_private_serialized = root.get_optional< std::string >( \"Interface.PrivateKey\" );\n if( !self_static_private_serialized )\n throw invalid_key();\n const auto listen_port = root.get_optional< uint16_t >( \"Interface.ListenPort\" );\n if( !listen_port )\n throw invalid_listen_port();\n const auto remote_static_public_serialized = root.get_optional< std::string >( \"Peer.PublicKey\" );\n if( !remote_static_public_serialized )\n throw invalid_key();\n const auto remote_address = root.get_optional< std::string >( \"Peer.Endpoint\" );\n if( !remote_address )\n throw invalid_endpoint();\n parse_key( self_static_private_serialized->begin(), self_static_private_serialized->end(), std::back_inserter( self_static_private ) );\n parse_key( remote_static_public_serialized->begin(), remote_static_public_serialized->end(), std::back_inserter( remote_static_public ) );\n if( self_static_private.size() != wg_key_len )\n throw invalid_key();\n if( remote_static_public.size() != wg_key_len )\n throw invalid_key();\n if( crypto_scalarmult_base( self_static_public.data(), self_static_private.data() ) != 0 )\n throw scalar_mult_failed();\n const auto remote_address_sep = remote_address->find( ':' );\n if( remote_address_sep == std::string::npos )\n throw invalid_endpoint();\n remote_host = remote_address->substr( 0, remote_address_sep );\n remote_port = remote_address->substr( remote_address_sep + 1 );\n boost::asio::ip::udp::resolver resolver( io_service );\n boost::asio::ip::udp::resolver::query query( remote_host, remote_port );\n remote_endpoint = *resolver.resolve( query );\n self_port = *listen_port;\n }\n wg_key_type self_static_private;\n wg_key_type self_static_public;\n wg_key_type remote_static_public;\n std::string remote_host;\n boost::asio::ip::udp::endpoint remote_endpoint;\n uint16_t self_port;\n std::string remote_port;\n };\n}\n\n#endif\n\n", "meta": {"hexsha": "40a7e0f928f390ca119b94b8f112f35a5de063c3", "size": 2877, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/uwg/config.hpp", "max_stars_repo_name": "Fadis/userspace_wireguard", "max_stars_repo_head_hexsha": "13daa4759c4d96d0e9a112d8c35f680681a6687d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2018-07-21T04:46:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T06:04:32.000Z", "max_issues_repo_path": "include/uwg/config.hpp", "max_issues_repo_name": "Fadis/userspace_wireguard", "max_issues_repo_head_hexsha": "13daa4759c4d96d0e9a112d8c35f680681a6687d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/uwg/config.hpp", "max_forks_repo_name": "Fadis/userspace_wireguard", "max_forks_repo_head_hexsha": "13daa4759c4d96d0e9a112d8c35f680681a6687d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6956521739, "max_line_length": 144, "alphanum_fraction": 0.7097671185, "num_tokens": 666, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.588889130767832, "lm_q2_score": 0.33807711748081287, "lm_q1q2_score": 0.19908993984577011}} {"text": "// Author(s): Wieger Wesselink\n// Copyright: see the accompanying file COPYING or copy at\n// https://github.com/mCRL2org/mCRL2/blob/master/COPYING\n//\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n//\n/// \\file txt2pbes_test.cpp\n/// \\brief Add your file description here.\n\n#define BOOST_TEST_MODULE txt2pbes_test\n#include \n#include \"mcrl2/pbes/txt2pbes.h\"\n\nusing namespace mcrl2;\nusing namespace mcrl2::pbes_system;\n\nconst std::string PBESSPEC1 =\n \"pbes nu X(b: Bool) = exists n: Nat. Y(n) && val(b); \\n\"\n \" mu Y(n: Nat) = X(n >= 10); \\n\"\n \" \\n\"\n \"init X(true); \\n\"\n ;\n\nconst std::string PBESSPEC2 =\n \"sort DATA = struct d1 | d2; \\n\"\n \" Enum3 = struct e2_3 | e1_3 | e0_3; \\n\"\n \" Frame = struct frame(getd: DATA, getb: DATA); \\n\"\n \" \\n\"\n \"glob dc: Frame; \\n\"\n \" \\n\"\n \"pbes nu X(s30_K: Pos, f_K: Frame) = \\n\"\n \" X(1, f_K); \\n\"\n \" \\n\"\n \"init X(1, dc); \\n\"\n ;\n\nBOOST_AUTO_TEST_CASE(test_txt2pbes)\n{\n pbes p;\n p = txt2pbes(PBESSPEC1);\n BOOST_CHECK(p.is_well_typed());\n p = txt2pbes(PBESSPEC2);\n BOOST_CHECK(p.is_well_typed());\n}\n", "meta": {"hexsha": "5f2c8c63fb7e5cd871806a177fe47bdbe16358bf", "size": 1638, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libraries/pbes/test/txt2pbes_test.cpp", "max_stars_repo_name": "Noxsense/mCRL2", "max_stars_repo_head_hexsha": "dd2fcdd6eb8b15af2729633041c2dbbd2216ad24", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 61.0, "max_stars_repo_stars_event_min_datetime": "2018-05-24T13:14:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T11:35:03.000Z", "max_issues_repo_path": "libraries/pbes/test/txt2pbes_test.cpp", "max_issues_repo_name": "Noxsense/mCRL2", "max_issues_repo_head_hexsha": "dd2fcdd6eb8b15af2729633041c2dbbd2216ad24", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 229.0, "max_issues_repo_issues_event_min_datetime": "2018-05-28T08:31:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:02:41.000Z", "max_forks_repo_path": "libraries/pbes/test/txt2pbes_test.cpp", "max_forks_repo_name": "Noxsense/mCRL2", "max_forks_repo_head_hexsha": "dd2fcdd6eb8b15af2729633041c2dbbd2216ad24", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 28.0, "max_forks_repo_forks_event_min_datetime": "2018-04-11T14:09:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T15:57:39.000Z", "avg_line_length": 34.8510638298, "max_line_length": 61, "alphanum_fraction": 0.4816849817, "num_tokens": 445, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO\n\n", "lm_q1_score": 0.5888891163376236, "lm_q2_score": 0.33807711081161995, "lm_q1q2_score": 0.19908993103983175}} {"text": "/*\n Copyright (C) 2016 Quaternion Risk Management Ltd\n Copyright (C) 2017 Aareal Bank AG\n\n All rights reserved.\n\n This file is part of ORE, a free-software/open-source library\n for transparent pricing and risk analysis - http://opensourcerisk.org\n\n ORE is free software: you can redistribute it and/or modify it\n under the terms of the Modified BSD License. You should have received a\n copy of the license along with this program.\n The license is also available online at \n\n This program is distributed on the basis that it will form a useful\n contribution to risk analytics and model standardisation, but WITHOUT\n ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.\n*/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace QuantLib;\nusing namespace QuantExt;\n\nnamespace ore {\nnamespace data {\n\nnamespace {\nLeg joinLegs(const std::vector& legs) {\n Leg masterLeg;\n for (Size i = 0; i < legs.size(); ++i) {\n // check if the periods of adjacent legs are consistent\n if (i > 0) {\n auto lcpn = boost::dynamic_pointer_cast(legs[i - 1].back());\n auto fcpn = boost::dynamic_pointer_cast(legs[i].front());\n QL_REQUIRE(lcpn, \"joinLegs: expected coupon as last cashflow in leg #\" << (i - 1));\n QL_REQUIRE(fcpn, \"joinLegs: expected coupon as first cashflow in leg #\" << i);\n QL_REQUIRE(lcpn->accrualEndDate() == fcpn->accrualStartDate(),\n \"joinLegs: accrual end date of last coupon in leg #\"\n << (i - 1) << \" (\" << lcpn->accrualEndDate()\n << \") is not equal to accrual start date of first coupon in leg #\" << i << \" (\"\n << fcpn->accrualStartDate() << \")\");\n }\n // copy legs together\n masterLeg.insert(masterLeg.end(), legs[i].begin(), legs[i].end());\n }\n return masterLeg;\n}\n} // namespace\n\nvoid Bond::build(const boost::shared_ptr& engineFactory) {\n DLOG(\"Bond::build() called for trade \" << id());\n\n // Clear the separateLegs_ member here. Should be done in reset() but it is not virtual\n separateLegs_.clear();\n\n const boost::shared_ptr market = engineFactory->market();\n\n boost::shared_ptr builder = engineFactory->builder(\"Bond\");\n\n Date issueDate = parseDate(issueDate_);\n Calendar calendar = parseCalendar(calendar_);\n Natural settlementDays = boost::lexical_cast(settlementDays_);\n boost::shared_ptr bond;\n\n // FIXME: zero bonds are always long (firstLegIsPayer = false, mult = 1.0)\n bool firstLegIsPayer = (coupons_.size() == 0) ? false : coupons_[0].isPayer();\n Real mult = firstLegIsPayer ? -1.0 : 1.0;\n if (zeroBond_) { // Zero coupon bond\n bond.reset(new QuantLib::ZeroCouponBond(settlementDays, calendar, faceAmount_, parseDate(maturityDate_)));\n } else { // Coupon bond\n for (Size i = 0; i < coupons_.size(); ++i) {\n bool legIsPayer = coupons_[i].isPayer();\n QL_REQUIRE(legIsPayer == firstLegIsPayer, \"Bond legs must all have same pay/receive flag\");\n if (i == 0)\n currency_ = coupons_[i].currency();\n else {\n QL_REQUIRE(currency_ == coupons_[i].currency(), \"leg #\" << i << \" currency (\" << coupons_[i].currency()\n << \") not equal to leg #0 currency (\"\n << coupons_[0].currency());\n }\n Leg leg;\n auto configuration = builder->configuration(MarketContext::pricing);\n auto legBuilder = engineFactory->legBuilder(coupons_[i].legType());\n leg = legBuilder->buildLeg(coupons_[i], engineFactory, configuration);\n separateLegs_.push_back(leg);\n \n // Initialise the set of [index name, leg] index pairs\n for (const auto& index : coupons_[i].indices()) {\n nameIndexPairs_.insert(make_pair(index, separateLegs_.size() - 1));\n }\n\n } // for coupons_\n Leg leg = joinLegs(separateLegs_);\n bond.reset(new QuantLib::Bond(settlementDays, calendar, issueDate, leg));\n // workaround, QL doesn't register a bond with its leg's cashflows\n for (auto const& c : leg)\n bond->registerWith(c);\n }\n\n Currency currency = parseCurrency(currency_);\n boost::shared_ptr bondBuilder = boost::dynamic_pointer_cast(builder);\n QL_REQUIRE(bondBuilder, \"No Builder found for Bond: \" << id());\n bond->setPricingEngine(bondBuilder->engine(currency, creditCurveId_, securityId_, referenceCurveId_));\n instrument_.reset(new VanillaInstrument(bond, mult));\n\n npvCurrency_ = currency_;\n maturity_ = bond->cashflows().back()->date();\n notional_ = currentNotional(bond->cashflows());\n\n // Add legs (only 1)\n legs_ = {bond->cashflows()};\n legCurrencies_ = {npvCurrency_};\n legPayers_ = {firstLegIsPayer};\n}\n\nmap> Bond::fixings(const Date& settlementDate) const {\n\n map> result;\n\n for (const auto& nameIndexPair : nameIndexPairs_) {\n // For clarity\n string indexName = nameIndexPair.first;\n Size legNumber = nameIndexPair.second;\n\n // Get the set of fixing dates for the [index name, leg index] pair\n set dates = fixingDates(separateLegs_[legNumber], settlementDate);\n\n // Update the results with the fixing dates.\n if (!dates.empty()) result[indexName].insert(dates.begin(), dates.end());\n }\n\n return result;\n}\n\nvoid Bond::fromXML(XMLNode* node) {\n Trade::fromXML(node);\n XMLNode* bondNode = XMLUtils::getChildNode(node, \"BondData\");\n QL_REQUIRE(bondNode, \"No BondData Node\");\n issuerId_ = XMLUtils::getChildValue(bondNode, \"IssuerId\", true);\n creditCurveId_ =\n XMLUtils::getChildValue(bondNode, \"CreditCurveId\", false); // issuer credit term structure not mandatory\n securityId_ = XMLUtils::getChildValue(bondNode, \"SecurityId\", true);\n referenceCurveId_ = XMLUtils::getChildValue(bondNode, \"ReferenceCurveId\", true);\n settlementDays_ = XMLUtils::getChildValue(bondNode, \"SettlementDays\", true);\n calendar_ = XMLUtils::getChildValue(bondNode, \"Calendar\", true);\n issueDate_ = XMLUtils::getChildValue(bondNode, \"IssueDate\", true);\n XMLNode* legNode = XMLUtils::getChildNode(bondNode, \"LegData\");\n while (legNode != nullptr) {\n auto ld = createLegData();\n ld->fromXML(legNode);\n coupons_.push_back(*boost::static_pointer_cast(ld));\n legNode = XMLUtils::getNextSibling(legNode, \"LegData\");\n }\n}\n\nboost::shared_ptr Bond::createLegData() const { return boost::make_shared(); }\n\nXMLNode* Bond::toXML(XMLDocument& doc) {\n XMLNode* node = Trade::toXML(doc);\n XMLNode* bondNode = doc.allocNode(\"BondData\");\n XMLUtils::appendNode(node, bondNode);\n XMLUtils::addChild(doc, bondNode, \"IssuerId\", issuerId_);\n XMLUtils::addChild(doc, bondNode, \"CreditCurveId\", creditCurveId_);\n XMLUtils::addChild(doc, bondNode, \"SecurityId\", securityId_);\n XMLUtils::addChild(doc, bondNode, \"ReferenceCurveId\", referenceCurveId_);\n XMLUtils::addChild(doc, bondNode, \"SettlementDays\", settlementDays_);\n XMLUtils::addChild(doc, bondNode, \"Calendar\", calendar_);\n XMLUtils::addChild(doc, bondNode, \"IssueDate\", issueDate_);\n for (auto& c : coupons_)\n XMLUtils::appendNode(bondNode, c.toXML(doc));\n return node;\n}\n} // namespace data\n} // namespace ore\n", "meta": {"hexsha": "824dc62b0f7ded4e0a0f3570a306bb5066b72ea8", "size": 8183, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OREData/ored/portfolio/bond.cpp", "max_stars_repo_name": "PiotrSiejda/Engine", "max_stars_repo_head_hexsha": "8360b5de32408f2a37da5ac3ca7b4e913bf67e9f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OREData/ored/portfolio/bond.cpp", "max_issues_repo_name": "PiotrSiejda/Engine", "max_issues_repo_head_hexsha": "8360b5de32408f2a37da5ac3ca7b4e913bf67e9f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OREData/ored/portfolio/bond.cpp", "max_forks_repo_name": "PiotrSiejda/Engine", "max_forks_repo_head_hexsha": "8360b5de32408f2a37da5ac3ca7b4e913bf67e9f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-02-07T02:04:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T02:04:10.000Z", "avg_line_length": 43.5265957447, "max_line_length": 119, "alphanum_fraction": 0.6564829525, "num_tokens": 2009, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165382362518, "lm_q2_score": 0.37754066879814546, "lm_q1q2_score": 0.19908343851403734}} {"text": "/*=============================================================================\n\n NifTK: A software platform for medical image computing.\n\n Copyright (c) University College London (UCL). All rights reserved.\n\n This software is distributed WITHOUT ANY WARRANTY; without even\n the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n PURPOSE.\n\n See LICENSE.txt in the top level directory for details.\n\n=============================================================================*/\n\n#include \"mitkUltrasoundTransformAndImageMerger.h\"\n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace mitk\n{\n\n//-----------------------------------------------------------------------------\nUltrasoundTransformAndImageMerger::~UltrasoundTransformAndImageMerger()\n{\n}\n\n\n//-----------------------------------------------------------------------------\nUltrasoundTransformAndImageMerger::UltrasoundTransformAndImageMerger()\n{\n}\n\n\n//-----------------------------------------------------------------------------\nvoid UltrasoundTransformAndImageMerger::Merge(const std::string& inputMatrixDirectory,\n const std::string& inputImageDirectory,\n const std::string& outputImageFileName,\n const std::string& imageOrientation)\n{\n cv::Matx44d identityMatrix;\n mitk::MakeIdentity(identityMatrix);\n\n mitk::TrackingAndTimeStampsContainer trackingTimeStamps;\n bool haltOnMatrixReadFailure = true;\n int badMatrixFiles = trackingTimeStamps.LoadFromDirectory(inputMatrixDirectory, haltOnMatrixReadFailure);\n\n std::vector imageFiles = niftk::GetFilesInDirectory(inputImageDirectory);\n std::sort(imageFiles.begin(), imageFiles.end());\n\n // Load all images. OK, so this will eventually run out of memory, but its ok for now.\n std::vector images;\n for (int i = 0; i < imageFiles.size(); i++)\n {\n images.push_back(mitk::IOUtil::LoadImage(imageFiles[i]));\n }\n\n std::cout << \"Number of matrices=\" << trackingTimeStamps.GetSize() << std::endl;\n std::cout << \"Number of images=\" << imageFiles.size() << std::endl;\n\n if (trackingTimeStamps.GetSize() < imageFiles.size())\n {\n std::ostringstream errorMessage;\n errorMessage << \"Loaded \" << trackingTimeStamps.GetSize() << \" matrices, and loaded a difference number of images \" << images.size() << \", and number of images must be less than number of matrices.\" << std::endl;\n mitkThrow() << errorMessage.str();\n }\n\n if (imageFiles.size() != images.size())\n {\n std::ostringstream errorMessage;\n errorMessage << \"Retrieved \" << imageFiles.size() << \" file names for images, but could only load \" << images.size() << \" images!\" << std::endl;\n mitkThrow() << errorMessage.str();\n }\n\n // Now generate output.\n\n typedef itk::Image ImageType;\n ImageType::Pointer outputImage = ImageType::New();\n\n int sizeX = images[0]->GetDimension(0);\n int sizeY = images[0]->GetDimension(1);\n\n ImageType::SizeType size;\n size[0] = sizeX;\n size[1] = sizeY;\n size[2] = images.size();\n\n ImageType::IndexType offset;\n offset.Fill(0);\n\n ImageType::RegionType region;\n region.SetSize(size);\n region.SetIndex(offset);\n\n ImageType::SpacingType spacing;\n spacing.Fill(1);\n\n ImageType::PointType origin;\n origin.Fill(0);\n\n ImageType::DirectionType direction;\n direction.SetIdentity();\n\n outputImage->SetSpacing(spacing);\n outputImage->SetOrigin(origin);\n outputImage->SetRegions(region);\n outputImage->SetDirection(direction);\n outputImage->Allocate();\n outputImage->FillBuffer(0);\n\n // Fill 3D image, slice by slice. This is slow, but we wont do it often.\n itk::Index<3> inputImageIndex;\n itk::Index<3> outputImageIndex;\n\n for (unsigned int i = 0; i < images.size(); i++)\n {\n mitk::ImagePixelReadAccessor readAccess(images[i], images[i]->GetVolumeData(0));\n\n for (unsigned int y = 0; y < sizeY; y++)\n {\n for (unsigned int x = 0; x < sizeX; x++)\n {\n inputImageIndex[0] = x;\n inputImageIndex[1] = y;\n inputImageIndex[2] = 0;\n\n outputImageIndex[0] = x;\n outputImageIndex[1] = y;\n outputImageIndex[2] = i;\n\n outputImage->SetPixel(outputImageIndex, readAccess.GetPixelByIndex(inputImageIndex));\n }\n }\n }\n\n // Now we write a volume. We just output the extra header info required.\n // The user can manually append it to the file if necessary.\n std::string outputImgFile = outputImageFileName + \".mhd\";\n itk::ImageFileWriter::Pointer writer = itk::ImageFileWriter::New();\n writer->SetFileName(outputImgFile);\n writer->SetInput(outputImage);\n writer->Update();\n\n std::cout << \"Written image data to \" << outputImgFile << std::endl;\n\n // Read .mhd header file.\n std::vector linesFromMhdFile;\n std::ifstream fin(outputImgFile.c_str());\n if ( !fin )\n {\n std::ostringstream errorMessage;\n errorMessage << \"Could not open \" << outputImgFile << \" for reading!\" << std::endl;\n mitkThrow() << errorMessage.str();\n }\n char lineOfText[256];\n do {\n fin.getline(lineOfText,256);\n if (fin.good())\n {\n linesFromMhdFile.push_back(std::string(lineOfText));\n std::cout << \"Read:\" << lineOfText << std::endl;\n }\n } while (fin.good());\n fin.close();\n\n // Now, re-open file .mhd file to add meta-data.\n std::ofstream fout(outputImgFile.c_str(), std::ios::out | std::ios::app);\n if ( !fout )\n {\n std::ostringstream errorMessage;\n errorMessage << \"Could not open \" << outputImgFile << \" for text output!\" << std::endl;\n mitkThrow() << errorMessage.str();\n }\n\n // Write everything except the last string of the existing header.\n for (unsigned int i = 0; i < linesFromMhdFile.size() - 1; i++)\n {\n fout << linesFromMhdFile[i] << std::endl;\n }\n\n fout << \"UltrasoundImageOrientation = \" << imageOrientation << std::endl;\n fout << \"UltrasoundImageType = BRIGHTNESS\" << std::endl;\n\n std::string oneZero = \"0\";\n std::string twoZero = \"00\";\n std::string threeZero = \"000\";\n\n fout.precision(10);\n\n boost::regex timeStampFilter ( \"([0-9]{19})(.)*\");\n boost::cmatch what;\n std::string timeStampAsString;\n unsigned long long timeStamp;\n long long timingError;\n bool inBounds;\n unsigned long long timeStampFirstFrame = 0;\n double timeStampInSeconds = 0;\n cv::Matx44d interpolatedMatrix;\n\n for (unsigned int i = 0; i < images.size(); i++)\n {\n std::ostringstream suffix;\n if (i < 10)\n {\n suffix << threeZero << i;\n }\n else if (i < 100)\n {\n suffix << twoZero << i;\n }\n else if (i < 1000)\n {\n suffix << oneZero << i;\n }\n else\n {\n suffix << i;\n }\n\n std::string nameToMatch = niftk::Basename(imageFiles[i]);\n if ( boost::regex_match( nameToMatch.c_str(), what, timeStampFilter) )\n {\n timeStampAsString = nameToMatch.substr(0, 19);\n timeStamp = boost::lexical_cast(timeStampAsString);\n if (timeStampFirstFrame == 0)\n {\n timeStampFirstFrame = timeStamp;\n }\n interpolatedMatrix = trackingTimeStamps.InterpolateMatrix(timeStamp, timingError, inBounds);\n timeStampInSeconds = (timeStamp - timeStampFirstFrame)/static_cast(1000000000);\n\n fout << \"Seq_Frame\" << suffix.str() << \"_FrameNumber = \" << i << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_UnfilteredTimestamp = \" << timeStampInSeconds << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_Timestamp = \" << timeStampInSeconds << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_ProbeToTrackerTransform =\";\n\n for (int r = 0; r < 4; r++)\n {\n for (int c = 0; c < 4; c++)\n {\n fout << \" \" << interpolatedMatrix(r, c);\n }\n }\n fout << std::endl;\n\n fout << \"Seq_Frame\" << suffix.str() << \"_ProbeToTrackerTransformStatus = OK\" << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_ReferenceToTrackerTransform =\";\n for (int r = 0; r < 4; r++)\n {\n for (int c = 0; c < 4; c++)\n {\n // We are not actually tracking a reference object.\n // This is just so that I can get data into fCal.\n fout << \" \" << identityMatrix(r, c);\n }\n }\n fout << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_ReferenceToTrackerTransformStatus = OK\" << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_StylusToTrackerTransform =\";\n for (int r = 0; r < 4; r++)\n {\n for (int c = 0; c < 4; c++)\n {\n // We are not actually tracking a stylus object.\n // This is just so that I can get data into fCal.\n fout << \" \" << identityMatrix(r, c);\n }\n }\n fout << std::endl;\n fout << \"Seq_Frame\" << suffix.str() << \"_StylusToTrackerTransformStatus = OK\" << std::endl;\n }\n else\n {\n std::ostringstream errorMessage;\n errorMessage << \"Image \" << imageFiles[i] << \" does not look like it contains a time-stamp.\" << std::endl;\n mitkThrow() << errorMessage.str();\n }\n }\n\n fout << linesFromMhdFile[linesFromMhdFile.size() - 1];\n fout.close();\n\n std::cout << \"Written meta-data to \" << outputImgFile << std::endl;\n}\n\n\n//-----------------------------------------------------------------------------\n} // end namespace\n", "meta": {"hexsha": "b0793fcd46d5f82a34688ea262081f2d0a8ff7df", "size": 9635, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "MITK/Modules/OpenCV/UltrasoundCalibration/mitkUltrasoundTransformAndImageMerger.cxx", "max_stars_repo_name": "NifTK/NifTK", "max_stars_repo_head_hexsha": "2358b333c89ff1bba1c232eecbbcdc8003305dfe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2018-07-28T13:36:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T19:17:39.000Z", "max_issues_repo_path": "MITK/Modules/OpenCV/UltrasoundCalibration/mitkUltrasoundTransformAndImageMerger.cxx", "max_issues_repo_name": "NifTK/NifTK", "max_issues_repo_head_hexsha": "2358b333c89ff1bba1c232eecbbcdc8003305dfe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MITK/Modules/OpenCV/UltrasoundCalibration/mitkUltrasoundTransformAndImageMerger.cxx", "max_forks_repo_name": "NifTK/NifTK", "max_forks_repo_head_hexsha": "2358b333c89ff1bba1c232eecbbcdc8003305dfe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2018-08-20T07:06:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T07:55:27.000Z", "avg_line_length": 31.6940789474, "max_line_length": 216, "alphanum_fraction": 0.6167099118, "num_tokens": 2453, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165233795671, "lm_q2_score": 0.37754066879814546, "lm_q1q2_score": 0.1990834329050347}} {"text": "\n#include \n#include \n#include \n#include \n#include \n#include \n\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"std_msgs/String.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"colormod.h\" // namespace Color\n#include \"keyboard.h\"\n\n#ifndef Included_MATH_HELPER_H\n#define Included_MATH_HELPER_H\n#include \"math_helper.h\"\n#endif\n#ifndef Included_STRING_CONVERTOR_H\n#define Included_STRING_CONVERTOR_H\n#include \"string_convertor.h\"\n#endif\n\n// MoveIt!\n#include \n#include \n#include \n\n\n// Robot state publishing\n#include \n#include \n\n// Kinematics\n#include \n#include \n#include \n#include \n#include \n\n//wam\n#include \"wam_msgs/MatrixMN.h\"\n#include \"wam_srvs/JointMove.h\"\n#include \"sensor_msgs/JointState.h\"\n\nusing namespace std;\n\nColor::Modifier c_red(Color::FG_RED);\nColor::Modifier c_yellow(Color::FG_YELLOW);\nColor::Modifier c_green(Color::FG_GREEN);\nColor::Modifier c_default(Color::FG_DEFAULT);\nbool lock = false;\nbool ready_signal1 = false;\nbool ready_signal2 = false;\nbool ready_signal3 = false;\ndouble step_size = 0.02;//m\ndouble angle_step_size = 0.01745329251;//1 degree\n\ndouble current_pos[]={9999,9999,9999,9999,9999,9999, 9999};//initialize as an invalid value set, stores the current pose\n\nstd::vector fullJointStates; //current joint states, 8 values, including a virtual joint\nstd::vector joint_values; //IK solutions\nmoveit::core::RobotModelPtr kinematic_model;\nstd::vector joint_names;\nros::Publisher robot_state_publisher ;\nint dofNum = 7;\nwam_srvs::JointMove mv_srv;\nros::ServiceClient Joint_move_client ;\n\nbool grasping_mode = false;\nsensor_msgs::JointState grasp_msg;\n\ngeometry_msgs::Pose thisPose;\n\n// void wamPoseCallback(const geometry_msgs::PoseStamped::ConstPtr& msg)\n// {\n// //cout<<\"wam msgs\"<pose;\n// cv::Mat t(3,1,cv::DataType::type);\n// cv::Mat R(3,3,cv::DataType::type);\n//\n// t.at(0,0) = thisPose.position.x; t.at(1,0) = thisPose.position.y; t.at(2,0) = thisPose.position.z + 1.365;\n//\n// tf::Quaternion q(thisPose.orientation.x, thisPose.orientation.y , thisPose.orientation.z, thisPose.orientation.w);\n// tf::Matrix3x3 rMatrix(q);\n// for(int i=0;i<3;i++)\n// for(int j=0;j<3;j++)\n// R.at(i,j)= rMatrix[i][j];\n// // current_trans = t;\n// // current_rot = R;\n// ready_signal2=true;\n// //cout<<\"pose callback\"<setVariablePositions(current_pos);\n cout<<\"compute fk current joints \"<update();\n const Eigen::Affine3d end_effector_state = kinematic_state->getGlobalLinkTransform(\"wam/wrist_palm_link\");//table_base\n ROS_INFO_STREAM(\"FK Translation: \" << end_effector_state.translation());\n Eigen::Quaterniond rot_q(end_effector_state.rotation());\n ROS_INFO_STREAM(\"FK Rotation q x: \" << rot_q.x());\n ROS_INFO_STREAM(\"FK Rotation q y: \" << rot_q.y());\n ROS_INFO_STREAM(\"FK Rotation q z: \" << rot_q.z());\n ROS_INFO_STREAM(\"FK Rotation q w: \" << rot_q.w());\n /* Print end-effector pose. Remember that this is in the model frame */\n //print out current joints\n // cout<<\"print out current joints\"<copyJointGroupPositions(joint_model_group, joint_values);\n // for(std::size_t i=0; i < joint_names.size(); ++i)\n // {\n // ROS_INFO(\"current Joint %s: %f\", joint_names[i].c_str(), joint_values[i]);\n // }\n return end_effector_state;\n}\n\nbool comput_IK(geometry_msgs::Pose p)\n{\n robot_state::RobotStatePtr kinematic_state(new robot_state::RobotState(kinematic_model));\n const robot_state::JointModelGroup* joint_model_group = kinematic_model->getJointModelGroup(\"arm\");//All, it's in the planning group name\n const std::vector &joint_names2 = joint_model_group->getJointModelNames();\n\n joint_names= joint_names2;\n joint_names=string_convertor::safeRemoveStringVec(joint_names,0);//first item of joint name is caused by mistake\n cout<<\"joint names count \"<setVariablePositions(current_pos);\n cout<<\"compute IK current joints \"<update();\n\n bool found_ik = kinematic_state->setFromIK(joint_model_group, p, 10, 0.1);\n // Now, we can print out the IK solution (if found):\n if (found_ik)\n {\n kinematic_state->copyJointGroupPositions(joint_model_group, joint_values);\n cout< constructFullJointNames()\n{\n std::vector rts;\n rts.push_back(\"jaco_arm_0_joint\");\n rts.push_back(\"jaco_arm_1_joint\");\n rts.push_back(\"jaco_arm_2_joint\");\n rts.push_back(\"jaco_arm_3_joint\");\n rts.push_back(\"jaco_arm_4_joint\");\n rts.push_back(\"jaco_arm_5_joint\");\n rts.push_back(\"jaco_finger_joint_0\");\n rts.push_back(\"jaco_finger_joint_2\");\n rts.push_back(\"jaco_finger_joint_4\");\n return rts;\n}\nstd::vector constructFullJoints(double *strs)\n{\n std::vector rts;\n int arraySize =(int)(sizeof(strs)/sizeof(*strs));\n for( size_t i=0;i joint_values_t)\n{\n lock=true;\n std::vector jnts;\n for(size_t i=0;i joint_values_temp; //\n std::vector effort_temp; //\n bool needEffortControl = false;\n std::vector joint_names_temp;\n int ik_needed = 0;//0 no motion neeeded . 1 motion needed and ik solution needed; 2 motion needed, but no need for ik solution.\n\n switch (k.code) {\n case 273://up arrow\n std::cout << \"move forward\" << '\\n';\n ik_needed=1;\n p.position.x = end_effector_state.translation()(0)-step_size;\n break;\n case 274://down arrow\n std::cout << \"move backward\" << '\\n';\n ik_needed=1;\n p.position.x = end_effector_state.translation()(0)+step_size;\n break;\n case 276://left arrow\n std::cout << \"move left\" << '\\n';\n ik_needed=1;\n p.position.y = end_effector_state.translation()(1)-step_size;\n break;\n case 275://right arrow\n std::cout << \"move right\" << '\\n';\n ik_needed=1;\n p.position.y = end_effector_state.translation()(1)+step_size;\n break;\n case 97://a\n std::cout << \"move up\" << '\\n';\n ik_needed=1;\n p.position.z = end_effector_state.translation()(2)+step_size;\n break;\n case 115://s\n std::cout << \"move down\" << '\\n';\n ik_needed=1;\n p.position.z = end_effector_state.translation()(2)-step_size;\n break;\n case 100://d\n std::cout << \"rotate clockwise\" << '\\n';\n ik_needed=2;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp.push_back(\"jaco_arm_5_joint\");\n joint_values_temp.push_back(fullJointStates[6]+angle_step_size);\n break;\n case 102://f\n std::cout << \"rotate anti-clockwise\" << '\\n';\n ik_needed=2;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp.push_back(\"jaco_arm_5_joint\");\n joint_values_temp.push_back(fullJointStates[6]-angle_step_size);\n break;\n case 103://g\n std::cout << \"grasp\" << '\\n';\n grasping_mode=true;\n ik_needed=2;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp.push_back(\"jaco_finger_joint_0\");\n joint_names_temp.push_back(\"jaco_finger_joint_2\");\n joint_names_temp.push_back(\"jaco_finger_joint_4\");\n joint_values_temp.push_back(1.0);\n joint_values_temp.push_back(1.0);\n joint_values_temp.push_back(1.0);\n needEffortControl=true;\n effort_temp.push_back(50);\n effort_temp.push_back(50);\n effort_temp.push_back(50);\n grasp_msg.name = joint_names_temp;\n grasp_msg.position = joint_values_temp;\n grasp_msg.effort = effort_temp;\n break;\n case 104://h\n ik_needed=2;\n std::cout << \"release\" << '\\n';\n grasping_mode=false;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp.push_back(\"jaco_finger_joint_0\");\n joint_names_temp.push_back(\"jaco_finger_joint_2\");\n joint_names_temp.push_back(\"jaco_finger_joint_4\");\n joint_values_temp.push_back(0);\n joint_values_temp.push_back(0);\n joint_values_temp.push_back(0);\n break;\n case 106: //j\n ik_needed=0;\n std::cout << \"step size increase to maximum: 1 cm\" << '\\n';\n step_size=0.01;\n angle_step_size = 0.01745329251;\n break;\n case 107: //k\n ik_needed=0;\n std::cout << \"step size decrease to minimum: 1 mm\" << '\\n';\n step_size=0.001;\n angle_step_size = 0.005;\n break;\n case 257://num 1, pose 1\n ik_needed=2;\n std::cout << \"go to pre grasp pose 1\" << '\\n';\n grasping_mode=false;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp=constructFullJointNames();\n //joint_pos=double[]{-1.3855690196259935, -0.6055034496049199, -0.13711278461549714, -0.26863385237514037, -2.164869929572392, 0.022610752306681192, 0.005429499871134169, 0.002955580646637479, 0.0054895619658141825};//initialize as an invalid value set\n joint_values_temp=string_convertor::split2double(\"-1.5067168867631673, -0.7126268917944647, 0.06637582003291165, -0.2569294266773463, -2.261174910063673, -0.07121034017964067, 0.005231278056194277, 0.004921044751721837, 0.005474238319798097\",',');\n break;\n case 258://num 2, pose 2\n ik_needed=2;\n std::cout << \"go to pre grasp pose 2\" << '\\n';\n grasping_mode=false;\n std::vector().swap(joint_values_temp); //clear the vector\n std::vector().swap(joint_names_temp);\n joint_names_temp=constructFullJointNames();\n joint_values_temp=string_convertor::split2double(\"-1.6785439713205421, -0.8295996787645588, 0.5727369662738369, -0.15870884433891863, -2.693067655844972, -0.4522102718374619, 0.0050140626796837395, 0.0047726880740635025, 0.005041075119017968\",',');//initialize as an invalid value set\n //joint_values_temp=constructFullJoints(joint_pos2);\n break;\n }\n sensor_msgs::JointState msg;\n if(ik_needed==1)\n {\n bool ik_found = comput_IK(p);\n if(ik_found)\n {\n // msg.name = joint_names;\n // msg.position = joint_values;\n // cout<position;\n //cout<<\"joint pose obtained:\"<position[0];\n current_pos[1]=msg->position[1];\n current_pos[2]=msg->position[2];\n current_pos[3]=msg->position[3];\n current_pos[4]=msg->position[4];\n current_pos[5]=msg->position[5];\n current_pos[6]=msg->position[6];\n ready_signal1 = true;\n }\n\n\n\n//===========================MAIN FUNCTION START===========================\n\nint main(int argc, char* argv[]){\n // Initialize the ROS system and become a node.\n // if(argc==2)\n // numThres = atoi(argv[1]);\n ros::init(argc, argv, \"tele_op\");\n ros::NodeHandle n(\"~\");\n //ros::Subscriber subP = n.subscribe(\"/zeus/wam/pose\", 1, wamPoseCallback);\n //robot_state_publisher = n.advertise( \"/jaco/joint_control\", 1000 );\n //ros::Duration(1).sleep();\n ros::Subscriber wam_joints_sub=n.subscribe(\"/zeus/wam/joint_states\",1, wamJointsCallback);\n Joint_move_client = n.serviceClient(\"/zeus/wam/joint_move\");\n\n\n robot_model_loader::RobotModelLoader robot_model_loader(\"robot_description\");\n kinematic_model = robot_model_loader.getModel();\n ROS_INFO(\"Model frame: %s\", kinematic_model->getModelFrame().c_str());\n\n robot_state::RobotStatePtr kinematic_state(new robot_state::RobotState(kinematic_model));\n const robot_state::JointModelGroup* joint_model_group = kinematic_model->getJointModelGroup(\"arm\");//All, it's in the planning group name\n const std::vector &joint_names2 = joint_model_group->getJointModelNames();\n cout<<\"joint names: \"<( \"allow_repeat\", allow_repeat, false ); // disable by default\n n.param( \"repeat_delay\", repeat_delay, SDL_DEFAULT_REPEAT_DELAY );\n n.param( \"repeat_interval\", repeat_interval, SDL_DEFAULT_REPEAT_INTERVAL );\n\n if ( !allow_repeat ) repeat_delay=0; // disable\n keyboard::Keyboard kbd( repeat_delay, repeat_interval );\n\n ros::Rate r(50);\n cout<<\"========================================================================\"<\n\n#include \n\n#include \"keypoint.hpp\"\n#include \"camera_intrinsics.hpp\"\n#include \"frame.hpp\"\n#include \"depth_source.hpp\"\n#include \"motion_estimation.hpp\"\n#include \"options.hpp\"\n\nnamespace fovis\n{\n\n/**\n * Utility class so that the VisualOdometry class not need\n * EIGEN_MAKE_ALIGNED_OPERATOR_NEW.\n */\nclass VisualOdometryPriv\n{\n private:\n friend class VisualOdometry;\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n // best estimate for current position and orientation\n Eigen::Isometry3d pose;\n\n // transformation relating reference frame and most recent frame\n Eigen::Isometry3d ref_to_prev_frame;\n\n // best estimate of motion from current to previous frame\n Eigen::Isometry3d motion_estimate;\n // the 6x6 estimate of the covriance [x-y-z, roll-pitch-yaw];\n Eigen::MatrixXd motion_estimate_covariance;\n\n Eigen::Matrix3d initial_homography_est;\n Eigen::Isometry3d initial_motion_estimate;\n Eigen::MatrixXd initial_motion_cov;\n};\n\n/**\n * \\ingroup FovisCore\n * \\brief Main visual odometry class.\n * \\code\n * #include \n * \\endcode\n *\n * This is the primary fovis class for estimating visual odometry.\n * To use it, you'll need three things:\n * \\li a source of grayscale input images.\n * \\li a \\ref DepthSource that can estimate the distance to as many pixels in the input images as possible.\n * \\li a \\ref Rectification object for converting the source image coordinates\n * to a rectified pinhole projection coordinate system. This is typically used\n * to correct radial lens distortion.\n *\n * A typical use case for the VisualOdometry class is to repeatedly call\n * processFrame() as new image data is available, which estimates the camera\n * motion and makes the resulting estimation data available via accessor\n * methods.\n *\n * Options to control the behavior of the visual odometry algorithm can be\n * passed in to the constructor using a \\ref VisualOdometryOptions object.\n */\nclass VisualOdometry\n{\n public:\n /**\n * Constructs a new visual odometry estimator.\n *\n * \\param rectification specifies the input image dimensions, as well as the\n * mapping from input image coordinates to rectified image coordinates.\n * \\param options controls the behavior of the estimation algorithms. This\n * is specified as a key/value dictionary.\n */\n VisualOdometry(const Rectification* rectification,\n const VisualOdometryOptions& options);\n\n ~VisualOdometry();\n\n /**\n * process an input image and estimate the 3D camera motion between \\p gray\n * and the frame previously passed to this method. The estimated motion\n * for the very first frame will always be the identity transform.\n *\n * \\param gray a new input image. The image dimensions must match those\n * passed in to the constructor, and the image data must be stored in\n * row-major order, with no pad bytes between rows. An internal copy of\n * the image is made, and the input data is no longer needed once this\n * method returns.\n * \\param depth_source a source of depth information that can either\n * provide a depth estimate at each pixel of the input image, or report\n * that no depth estimate is available.\n */\n void processFrame(const uint8_t* gray, DepthSource* depth_source);\n\n /**\n * Retrieves the integrated pose estimate. On initialization, the camera\n * is positioned at the origin, with +Z pointing along the camera look\n * vector, +X to the right, and +Y down.\n */\n const Eigen::Isometry3d& getPose() {\n return _p->pose;\n }\n\n /**\n * Retrieve the current reference frame used for motion estimation. The\n * reference frame will not change as long as new input frames are easily\n * matched to it.\n */\n const OdometryFrame* getReferenceFrame() const {\n return _ref_frame;\n }\n\n /**\n * Retrieve the current target frame used for motion estimation.\n */\n const OdometryFrame* getTargetFrame() const {\n return _cur_frame;\n }\n\n /**\n * If this returns true, then the current target frame will become the\n * reference frame on the next call to processFrame().\n */\n bool getChangeReferenceFrames() const {\n return _change_reference_frames;\n }\n\n /**\n * \\return whether motion estimation succeeded on the most recent call to\n * processFrame(), or provides a rough failure reason.\n */\n MotionEstimateStatusCode getMotionEstimateStatus() const {\n return _estimator->getMotionEstimateStatus();\n }\n\n /**\n * \\return the estimated camera motion from the previous frame to the\n * current frame.\n */\n const Eigen::Isometry3d& getMotionEstimate() const {\n return _p->motion_estimate;\n }\n\n /**\n * \\return the covariance matrix resulting from the final nonlinear\n * least-squares motion estimation step.\n */\n const Eigen::MatrixXd& getMotionEstimateCov() const {\n return _p->motion_estimate_covariance;\n }\n\n /**\n * \\return the \\ref MotionEstimator object used internally.\n */\n const MotionEstimator* getMotionEstimator() const {\n return _estimator;\n }\n\n /**\n * \\return the threshold used by the FAST feature detector.\n */\n int getFastThreshold() const {\n return _fast_threshold;\n }\n\n /**\n * \\return the 2D homography computed during initial rotation estimation.\n */\n const Eigen::Matrix3d & getInitialHomography() const {\n return _p->initial_homography_est;\n }\n\n /**\n * \\return the options passed in to the constructor.\n */\n const VisualOdometryOptions& getOptions() const {\n return _options;\n }\n\n /**\n * \\return a reasonable set of default options that can be passed in to the\n * constructor if you don't know or care about the options.\n */\n static VisualOdometryOptions getDefaultOptions();\n\n /**\n * Performs some internal sanity checks and aborts the program on failure.\n * This is for debugging only.\n */\n void sanityCheck() const;\n\n private:\n void prepareFrame(OdometryFrame* frame);\n\n Eigen::Quaterniond estimateInitialRotation(const OdometryFrame* prev,\n const OdometryFrame* cur,\n const Eigen::Isometry3d\n &init_motion_estimate =\n Eigen::Isometry3d::Identity());\n\n const Rectification* _rectification;\n\n OdometryFrame* _ref_frame;\n OdometryFrame* _prev_frame;\n OdometryFrame* _cur_frame;\n\n MotionEstimator* _estimator;\n\n VisualOdometryPriv* _p;\n\n bool _change_reference_frames;\n\n long _frame_count;\n\n // === tuning parameters ===\n\n int _feature_window_size;\n\n int _num_pyramid_levels;\n\n // initial feature detector threshold\n int _fast_threshold;\n\n // params for adaptive feature detector threshold\n int _fast_threshold_min;\n int _fast_threshold_max;\n int _target_pixels_per_feature;\n float _fast_threshold_adaptive_gain;\n\n bool _use_adaptive_threshold;\n bool _use_homography_initialization;\n\n // if there are least this many inliers in the previous motion estimate,\n // don't change reference frames.\n int _ref_frame_change_threshold;\n\n // Which level of the image pyramid to use for initial rotation estimation\n int _initial_rotation_pyramid_level;\n\n VisualOdometryOptions _options;\n};\n\n}\n#endif\n", "meta": {"hexsha": "ef3b1101514282435857fc8a6c40a3199d2b4615", "size": 7597, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "navigation_layer/fovis/libfovis/libfovis/libfovis/visual_odometry.hpp", "max_stars_repo_name": "kartavya2000/Anahita", "max_stars_repo_head_hexsha": "9afbf6c238658188df7d0d97b2fec3bd48028c03", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2019-03-21T15:18:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T07:52:10.000Z", "max_issues_repo_path": "perception/rwth_people_tracker/rwth_visual_odometry/3rd_party/fovis/libfovis/visual_odometry.hpp", "max_issues_repo_name": "VisualComputingInstitute/CROWDBOT_perception", "max_issues_repo_head_hexsha": "df98f3f658c39fb3fa4ac0456f1214f7918009f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19.0, "max_issues_repo_issues_event_min_datetime": "2018-10-03T12:14:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-07T09:33:14.000Z", "max_forks_repo_path": "perception/rwth_people_tracker/rwth_visual_odometry/3rd_party/fovis/libfovis/visual_odometry.hpp", "max_forks_repo_name": "VisualComputingInstitute/CROWDBOT_perception", "max_forks_repo_head_hexsha": "df98f3f658c39fb3fa4ac0456f1214f7918009f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2018-09-09T12:35:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-03T09:28:19.000Z", "avg_line_length": 30.5100401606, "max_line_length": 107, "alphanum_fraction": 0.6880347506, "num_tokens": 1669, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165233795671, "lm_q2_score": 0.37754066879814546, "lm_q1q2_score": 0.1990834329050347}} {"text": "// Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n#include \"CTCInferenceCodeletTestConnection.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\nusing namespace poplar;\nusing namespace poplar::program;\nusing namespace poplibs_test::ctc;\nusing namespace poplibs_test;\nusing namespace poplibs_test::util;\nusing namespace poplibs_support;\nusing namespace poputil;\n\nnamespace poplibs_test {\nnamespace ctc {\ntemplate \nstatic std::vector>\nrunCodeletCommon(poplar::Graph &graph, poplibs_support::TestDevice &device,\n poplibs_support::DeviceType deviceType,\n poplar::Type partialsType,\n const std::vector> &unsortedCandidates,\n unsigned beamwidth, unsigned timestep, bool testReduceVertex,\n bool profile) {\n const auto target = graph.getTarget();\n\n auto complete = graph.addConstant(UNSIGNED_INT, {}, 0u);\n\n graph.setTileMapping(complete, 0);\n\n auto cs = graph.addComputeSet(\"cs\");\n auto vertex = graph.addVertex(\n cs, templateVertex(testReduceVertex ? \"popnn::CTCReduceCandidates\"\n : \"popnn::CTCRankCandidates\",\n partialsType, UNSIGNED_INT));\n graph.setTileMapping(vertex, 0);\n\n const auto totalCandidates = unsortedCandidates.size();\n graph.setInitialValue(vertex[\"totalCandidates\"], totalCandidates);\n\n if (!testReduceVertex) {\n graph.setInitialValue(vertex[\"beamwidth\"], beamwidth);\n graph.setInitialValue(vertex[\"firstCandidateToRank\"], 0);\n graph.setInitialValue(vertex[\"lastCandidateToRank\"], totalCandidates);\n\n graph.connect(vertex[\"complete\"], complete);\n }\n\n Sequence uploadProg, downloadProg;\n std::vector> tmap;\n\n auto rawCandidates = createAndConnectCandidates(\n graph, vertex, \"candidate\", partialsType, {totalCandidates}, uploadProg,\n downloadProg, tmap);\n\n std::vector candidateParentIn{};\n std::vector candidateAddendIn{};\n std::vector candidateBeamProbNonBlankIn{};\n std::vector candidateBeamProbBlankIn{};\n std::vector candidateBeamProbTotalIn{};\n\n for (unsigned c = 0; c < totalCandidates; c++) {\n candidateParentIn.push_back(unsortedCandidates[c].beam);\n candidateAddendIn.push_back(unsortedCandidates[c].addend);\n candidateBeamProbNonBlankIn.push_back(unsortedCandidates[c].pnb);\n candidateBeamProbBlankIn.push_back(unsortedCandidates[c].pb);\n candidateBeamProbTotalIn.push_back(unsortedCandidates[c].pTotal);\n }\n\n copy(target, candidateParentIn, UNSIGNED_INT, rawCandidates.parent.get());\n copy(target, candidateAddendIn, UNSIGNED_INT, rawCandidates.addend.get());\n copy(target, candidateBeamProbNonBlankIn, partialsType,\n rawCandidates.probNonBlank.get());\n copy(target, candidateBeamProbBlankIn, partialsType,\n rawCandidates.probBlank.get());\n copy(target, candidateBeamProbTotalIn, partialsType,\n rawCandidates.probTotal.get().get());\n\n // Outputs\n CandidateHandles rawSortedCandidates;\n if (testReduceVertex) {\n rawSortedCandidates = createAndConnectCandidates(\n graph, vertex, \"reducedCandidate\", partialsType, {}, uploadProg,\n downloadProg, tmap);\n } else {\n rawSortedCandidates = createAndConnectCandidates(\n graph, vertex, \"rankedCandidate\", partialsType, {beamwidth}, uploadProg,\n downloadProg, tmap);\n }\n OptionFlags engineOptions;\n if (profile) {\n engineOptions.set(\"debug.instrumentCompute\", \"true\");\n }\n Sequence prog;\n prog.add(Execute(cs));\n Engine engine(graph, Sequence{uploadProg, prog, downloadProg}, engineOptions);\n attachStreams(engine, tmap);\n device.bind([&](const Device &d) {\n engine.load(d);\n engine.run();\n });\n\n const unsigned outSize = testReduceVertex ? 1 : beamwidth;\n std::vector candidateParentOut(outSize);\n std::vector candidateAddendOut(outSize);\n\n // TODO partialsType == float\n std::vector candidateBeamProbBlankOut(outSize);\n std::vector candidateBeamProbNonBlankOut(outSize);\n std::vector candidateBeamProbTotalOut(outSize);\n\n copy(target, UNSIGNED_INT, rawSortedCandidates.parent.get(),\n candidateParentOut);\n copy(target, UNSIGNED_INT, rawSortedCandidates.addend.get(),\n candidateAddendOut);\n copy(target, partialsType, rawSortedCandidates.probNonBlank.get(),\n candidateBeamProbNonBlankOut);\n copy(target, partialsType, rawSortedCandidates.probBlank.get(),\n candidateBeamProbBlankOut);\n copy(target, partialsType, rawSortedCandidates.probTotal.get().get(),\n candidateBeamProbTotalOut);\n if (profile && deviceType != DeviceType::Cpu) {\n engine.printProfileSummary(std::cout,\n OptionFlags{{\"showExecutionSteps\", \"true\"}});\n }\n std::vector> selectedCandidates;\n for (unsigned i = 0; i < outSize; i++) {\n selectedCandidates.push_back({candidateParentOut[i], candidateAddendOut[i],\n candidateBeamProbNonBlankOut[i],\n candidateBeamProbBlankOut[i],\n candidateBeamProbTotalOut[i]});\n }\n return selectedCandidates;\n}\n\ntemplate \nstd::vector> runRankCandidatesCodelet(\n poplar::Graph &graph, poplibs_support::TestDevice &device,\n poplibs_support::DeviceType deviceType, poplar::Type partialsType,\n const std::vector> &candidates, unsigned beamwidth,\n unsigned timestep, bool profile) {\n\n return runCodeletCommon(graph, device, deviceType, partialsType, candidates,\n beamwidth, timestep, false, profile);\n}\n\ntemplate \nstd::vector> runReduceCandidatesCodelet(\n poplar::Graph &graph, poplibs_support::TestDevice &device,\n poplibs_support::DeviceType deviceType, poplar::Type partialsType,\n const std::vector> &candidates, unsigned beamwidth,\n unsigned timestep, bool profile) {\n\n return runCodeletCommon(graph, device, deviceType, partialsType, candidates,\n beamwidth, timestep, true, profile);\n}\n\ntemplate std::vector> runRankCandidatesCodelet(\n poplar::Graph &graph, poplibs_support::TestDevice &device,\n poplibs_support::DeviceType deviceType, poplar::Type partialsType,\n const std::vector> &candidates, unsigned beamwidth,\n unsigned timestep, bool profile);\n\ntemplate std::vector> runReduceCandidatesCodelet(\n poplar::Graph &graph, poplibs_support::TestDevice &device,\n poplibs_support::DeviceType deviceType, poplar::Type partialsType,\n const std::vector> &candidates, unsigned beamwidth,\n unsigned timestep, bool profile);\n\n} // namespace ctc\n} // namespace poplibs_test\n", "meta": {"hexsha": "ca671a3b83a6d1efb753e23de4373bca96f23f50", "size": 7203, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/popnn/codelets/CTCInferenceRankAndReduceCandidates.cpp", "max_stars_repo_name": "graphcore/poplibs", "max_stars_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 95.0, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T14:42:28.000Z", "max_issues_repo_path": "tests/popnn/codelets/CTCInferenceRankAndReduceCandidates.cpp", "max_issues_repo_name": "graphcore/poplibs", "max_issues_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/popnn/codelets/CTCInferenceRankAndReduceCandidates.cpp", "max_forks_repo_name": "graphcore/poplibs", "max_forks_repo_head_hexsha": "3fe5a3ecafe995eddb72675d1b4a7af8a622009e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2020-07-15T12:32:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T14:58:45.000Z", "avg_line_length": 39.5769230769, "max_line_length": 80, "alphanum_fraction": 0.7270581702, "num_tokens": 1655, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165233795671, "lm_q2_score": 0.37754066879814546, "lm_q1q2_score": 0.1990834329050347}} {"text": "// Copyright (c) 2012-2013 The PPCoin developers\n// Distributed under the MIT/X11 software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#include \n\n#include \"kernel.h\"\n#include \"db.h\"\n\nusing namespace std;\n\nextern int nStakeMaxAge;\nextern int nStakeTargetSpacing;\n\n// Modifier interval: time to elapse before new modifier is computed\n// Set to 6-hour for production network and 20-minute for test network\nunsigned int nModifierInterval = MODIFIER_INTERVAL;\n\n// Hard checkpoints of stake modifiers to ensure they are deterministic\nstatic std::map mapStakeModifierCheckpoints =\n boost::assign::map_list_of\n ( 0, 0x0e00670bu )\n ;\n\n// Get the last stake modifier and its generation time from a given block\nstatic bool GetLastStakeModifier(const CBlockIndex* pindex, uint64& nStakeModifier, int64& nModifierTime)\n{\n if (!pindex)\n return error(\"GetLastStakeModifier: null pindex\");\n while (pindex && pindex->pprev && !pindex->GeneratedStakeModifier())\n pindex = pindex->pprev;\n if (!pindex->GeneratedStakeModifier())\n return error(\"GetLastStakeModifier: no generation at genesis block\");\n nStakeModifier = pindex->nStakeModifier;\n nModifierTime = pindex->GetBlockTime();\n return true;\n}\n\n// Get selection interval section (in seconds)\nstatic int64 GetStakeModifierSelectionIntervalSection(int nSection)\n{\n assert (nSection >= 0 && nSection < 64);\n return (nModifierInterval * 63 / (63 + ((63 - nSection) * (MODIFIER_INTERVAL_RATIO - 1))));\n}\n\n// Get stake modifier selection interval (in seconds)\nstatic int64 GetStakeModifierSelectionInterval()\n{\n int64 nSelectionInterval = 0;\n for (int nSection=0; nSection<64; nSection++)\n nSelectionInterval += GetStakeModifierSelectionIntervalSection(nSection);\n return nSelectionInterval;\n}\n\n// select a block from the candidate blocks in vSortedByTimestamp, excluding\n// already selected blocks in vSelectedBlocks, and with timestamp up to\n// nSelectionIntervalStop.\nstatic bool SelectBlockFromCandidates(\n vector >& vSortedByTimestamp,\n map& mapSelectedBlocks,\n int64 nSelectionIntervalStop, uint64 nStakeModifierPrev,\n const CBlockIndex** pindexSelected)\n{\n bool fSelected = false;\n uint256 hashBest = 0;\n *pindexSelected = (const CBlockIndex*) 0;\n BOOST_FOREACH(const PAIRTYPE(int64, uint256)& item, vSortedByTimestamp)\n {\n if (!mapBlockIndex.count(item.second))\n return error(\"SelectBlockFromCandidates: failed to find block index for candidate block %s\", item.second.ToString().c_str());\n const CBlockIndex* pindex = mapBlockIndex[item.second];\n if (fSelected && pindex->GetBlockTime() > nSelectionIntervalStop)\n break;\n if (mapSelectedBlocks.count(pindex->GetBlockHash()) > 0)\n continue;\n // compute the selection hash by hashing its proof-hash and the\n // previous proof-of-stake modifier\n uint256 hashProof = pindex->IsProofOfStake()? pindex->hashProofOfStake : pindex->GetBlockHash();\n CDataStream ss(SER_GETHASH, 0);\n ss << hashProof << nStakeModifierPrev;\n uint256 hashSelection = Hash(ss.begin(), ss.end());\n // the selection hash is divided by 2**32 so that proof-of-stake block\n // is always favored over proof-of-work block. this is to preserve\n // the energy efficiency property\n if (pindex->IsProofOfStake())\n hashSelection >>= 32;\n if (fSelected && hashSelection < hashBest)\n {\n hashBest = hashSelection;\n *pindexSelected = (const CBlockIndex*) pindex;\n }\n else if (!fSelected)\n {\n fSelected = true;\n hashBest = hashSelection;\n *pindexSelected = (const CBlockIndex*) pindex;\n }\n }\n if (fDebug && GetBoolArg(\"-printstakemodifier\"))\n printf(\"SelectBlockFromCandidates: selection hash=%s\\n\", hashBest.ToString().c_str());\n return fSelected;\n}\n\n// Stake Modifier (hash modifier of proof-of-stake):\n// The purpose of stake modifier is to prevent a txout (coin) owner from\n// computing future proof-of-stake generated by this txout at the time\n// of transaction confirmation. To meet kernel protocol, the txout\n// must hash with a future stake modifier to generate the proof.\n// Stake modifier consists of bits each of which is contributed from a\n// selected block of a given block group in the past.\n// The selection of a block is based on a hash of the block's proof-hash and\n// the previous stake modifier.\n// Stake modifier is recomputed at a fixed time interval instead of every \n// block. This is to make it difficult for an attacker to gain control of\n// additional bits in the stake modifier, even after generating a chain of\n// blocks.\nbool ComputeNextStakeModifier(const CBlockIndex* pindexPrev, uint64& nStakeModifier, bool& fGeneratedStakeModifier)\n{\n nStakeModifier = 0;\n fGeneratedStakeModifier = false;\n if (!pindexPrev)\n {\n fGeneratedStakeModifier = true;\n return true; // genesis block's modifier is 0\n }\n // First find current stake modifier and its generation block time\n // if it's not old enough, return the same stake modifier\n int64 nModifierTime = 0;\n if (!GetLastStakeModifier(pindexPrev, nStakeModifier, nModifierTime))\n return error(\"ComputeNextStakeModifier: unable to get last modifier\");\n if (fDebug)\n {\n printf(\"ComputeNextStakeModifier: prev modifier=0x%016\"PRI64x\" time=%s\\n\", nStakeModifier, DateTimeStrFormat(nModifierTime).c_str());\n }\n if (nModifierTime / nModifierInterval >= pindexPrev->GetBlockTime() / nModifierInterval)\n return true;\n\n // Sort candidate blocks by timestamp\n vector > vSortedByTimestamp;\n vSortedByTimestamp.reserve(64 * nModifierInterval / nStakeTargetSpacing);\n int64 nSelectionInterval = GetStakeModifierSelectionInterval();\n int64 nSelectionIntervalStart = (pindexPrev->GetBlockTime() / nModifierInterval) * nModifierInterval - nSelectionInterval;\n const CBlockIndex* pindex = pindexPrev;\n while (pindex && pindex->GetBlockTime() >= nSelectionIntervalStart)\n {\n vSortedByTimestamp.push_back(make_pair(pindex->GetBlockTime(), pindex->GetBlockHash()));\n pindex = pindex->pprev;\n }\n int nHeightFirstCandidate = pindex ? (pindex->nHeight + 1) : 0;\n reverse(vSortedByTimestamp.begin(), vSortedByTimestamp.end());\n sort(vSortedByTimestamp.begin(), vSortedByTimestamp.end());\n\n // Select 64 blocks from candidate blocks to generate stake modifier\n uint64 nStakeModifierNew = 0;\n int64 nSelectionIntervalStop = nSelectionIntervalStart;\n map mapSelectedBlocks;\n for (int nRound=0; nRoundGetStakeEntropyBit()) << nRound);\n // add the selected block from candidates to selected list\n mapSelectedBlocks.insert(make_pair(pindex->GetBlockHash(), pindex));\n if (fDebug && GetBoolArg(\"-printstakemodifier\"))\n printf(\"ComputeNextStakeModifier: selected round %d stop=%s height=%d bit=%d\\n\",\n nRound, DateTimeStrFormat(nSelectionIntervalStop).c_str(), pindex->nHeight, pindex->GetStakeEntropyBit());\n }\n\n // Print selection map for visualization of the selected blocks\n if (fDebug && GetBoolArg(\"-printstakemodifier\"))\n {\n string strSelectionMap = \"\";\n // '-' indicates proof-of-work blocks not selected\n strSelectionMap.insert(0, pindexPrev->nHeight - nHeightFirstCandidate + 1, '-');\n pindex = pindexPrev;\n while (pindex && pindex->nHeight >= nHeightFirstCandidate)\n {\n // '=' indicates proof-of-stake blocks not selected\n if (pindex->IsProofOfStake())\n strSelectionMap.replace(pindex->nHeight - nHeightFirstCandidate, 1, \"=\");\n pindex = pindex->pprev;\n }\n BOOST_FOREACH(const PAIRTYPE(uint256, const CBlockIndex*)& item, mapSelectedBlocks)\n {\n // 'S' indicates selected proof-of-stake blocks\n // 'W' indicates selected proof-of-work blocks\n strSelectionMap.replace(item.second->nHeight - nHeightFirstCandidate, 1, item.second->IsProofOfStake()? \"S\" : \"W\");\n }\n printf(\"ComputeNextStakeModifier: selection height [%d, %d] map %s\\n\", nHeightFirstCandidate, pindexPrev->nHeight, strSelectionMap.c_str());\n }\n if (fDebug)\n {\n printf(\"ComputeNextStakeModifier: new modifier=0x%016\"PRI64x\" time=%s\\n\", nStakeModifierNew, DateTimeStrFormat(pindexPrev->GetBlockTime()).c_str());\n }\n\n nStakeModifier = nStakeModifierNew;\n fGeneratedStakeModifier = true;\n return true;\n}\n\n// The stake modifier used to hash for a stake kernel is chosen as the stake\n// modifier about a selection interval later than the coin generating the kernel\nstatic bool GetKernelStakeModifier(uint256 hashBlockFrom, uint64& nStakeModifier, int& nStakeModifierHeight, int64& nStakeModifierTime, bool fPrintProofOfStake)\n{\n nStakeModifier = 0;\n if (!mapBlockIndex.count(hashBlockFrom))\n return error(\"GetKernelStakeModifier() : block not indexed\");\n const CBlockIndex* pindexFrom = mapBlockIndex[hashBlockFrom];\n nStakeModifierHeight = pindexFrom->nHeight;\n nStakeModifierTime = pindexFrom->GetBlockTime();\n int64 nStakeModifierSelectionInterval = GetStakeModifierSelectionInterval();\n const CBlockIndex* pindex = pindexFrom;\n // loop to find the stake modifier later by a selection interval\n while (nStakeModifierTime < pindexFrom->GetBlockTime() + nStakeModifierSelectionInterval)\n {\n if (!pindex->pnext)\n { // reached best block; may happen if node is behind on block chain\n if (fPrintProofOfStake || (pindex->GetBlockTime() + nStakeMinAge - nStakeModifierSelectionInterval > GetAdjustedTime()))\n return error(\"GetKernelStakeModifier() : reached best block %s at height %d from block %s\",\n pindex->GetBlockHash().ToString().c_str(), pindex->nHeight, hashBlockFrom.ToString().c_str());\n else\n return false;\n }\n pindex = pindex->pnext;\n if (pindex->GeneratedStakeModifier())\n {\n nStakeModifierHeight = pindex->nHeight;\n nStakeModifierTime = pindex->GetBlockTime();\n }\n }\n nStakeModifier = pindex->nStakeModifier;\n return true;\n}\n\n// nexus kernel protocol\n// coinstake must meet hash target according to the protocol:\n// kernel (input 0) must meet the formula\n// hash(nStakeModifier + txPrev.block.nTime + txPrev.offset + txPrev.nTime + txPrev.vout.n + nTime) < bnTarget * nCoinDayWeight\n// this ensures that the chance of getting a coinstake is proportional to the\n// amount of coin age one owns.\n// The reason this hash is chosen is the following:\n// nStakeModifier: \n// (v0.3) scrambles computation to make it very difficult to precompute\n// future proof-of-stake at the time of the coin's confirmation\n// (v0.2) nBits (deprecated): encodes all past block timestamps\n// txPrev.block.nTime: prevent nodes from guessing a good timestamp to\n// generate transaction for future advantage\n// txPrev.offset: offset of txPrev inside block, to reduce the chance of \n// nodes generating coinstake at the same time\n// txPrev.nTime: reduce the chance of nodes generating coinstake at the same\n// time\n// txPrev.vout.n: output number of txPrev, to reduce the chance of nodes\n// generating coinstake at the same time\n// block/tx hash should not be used here as they can be generated in vast\n// quantities so as to generate blocks faster, degrading the system back into\n// a proof-of-work situation.\n//\nbool CheckStakeKernelHash(unsigned int nBits, const CBlock& blockFrom, unsigned int nTxPrevOffset, const CTransaction& txPrev, const COutPoint& prevout, unsigned int nTimeTx, uint256& hashProofOfStake, bool fPrintProofOfStake)\n{\n if (nTimeTx < txPrev.nTime) // Transaction timestamp violation\n return error(\"CheckStakeKernelHash() : nTime violation\");\n\n unsigned int nTimeBlockFrom = blockFrom.GetBlockTime();\n if (nTimeBlockFrom + nStakeMinAge > nTimeTx) // Min age requirement\n return error(\"CheckStakeKernelHash() : min age violation\");\n\n CBigNum bnTargetPerCoinDay;\n bnTargetPerCoinDay.SetCompact(nBits);\n int64 nValueIn = txPrev.vout[prevout.n].nValue;\n\n // v0.3 protocol kernel hash weight starts from 0 at the 30-day min age\n // this change increases active coins participating the hash and helps\n // to secure the network when proof-of-stake difficulty is low\n int64 nTimeWeight = min((int64)nTimeTx - txPrev.nTime, (int64)nStakeMaxAge) - nStakeMinAge;\n CBigNum bnCoinDayWeight = CBigNum(nValueIn) * nTimeWeight / COIN / (24 * 60 * 60);\n\n // Calculate hash\n CDataStream ss(SER_GETHASH, 0);\n uint64 nStakeModifier = 0;\n int nStakeModifierHeight = 0;\n int64 nStakeModifierTime = 0;\n\n if (!GetKernelStakeModifier(blockFrom.GetHash(), nStakeModifier, nStakeModifierHeight, nStakeModifierTime, fPrintProofOfStake))\n return false;\n ss << nStakeModifier;\n\n ss << nTimeBlockFrom << nTxPrevOffset << txPrev.nTime << prevout.n << nTimeTx;\n hashProofOfStake = Hash(ss.begin(), ss.end());\n if (fPrintProofOfStake)\n {\n printf(\"CheckStakeKernelHash() : using modifier 0x%016\"PRI64x\" at height=%d timestamp=%s for block from height=%d timestamp=%s\\n\",\n nStakeModifier, nStakeModifierHeight,\n DateTimeStrFormat(nStakeModifierTime).c_str(),\n mapBlockIndex[blockFrom.GetHash()]->nHeight,\n DateTimeStrFormat(blockFrom.GetBlockTime()).c_str());\n printf(\"CheckStakeKernelHash() : check protocol=%s modifier=0x%016\"PRI64x\" nTimeBlockFrom=%u nTxPrevOffset=%u nTimeTxPrev=%u nPrevout=%u nTimeTx=%u hashProof=%s\\n\",\n \"0.3\",\n nStakeModifier,\n nTimeBlockFrom, nTxPrevOffset, txPrev.nTime, prevout.n, nTimeTx,\n hashProofOfStake.ToString().c_str());\n }\n\n // Now check if proof-of-stake hash meets target protocol\n if (CBigNum(hashProofOfStake) > bnCoinDayWeight * bnTargetPerCoinDay)\n return false;\n if (fDebug && !fPrintProofOfStake)\n {\n printf(\"CheckStakeKernelHash() : using modifier 0x%016\"PRI64x\" at height=%d timestamp=%s for block from height=%d timestamp=%s\\n\",\n nStakeModifier, nStakeModifierHeight, \n DateTimeStrFormat(nStakeModifierTime).c_str(),\n mapBlockIndex[blockFrom.GetHash()]->nHeight,\n DateTimeStrFormat(blockFrom.GetBlockTime()).c_str());\n printf(\"CheckStakeKernelHash() : pass protocol=%s modifier=0x%016\"PRI64x\" nTimeBlockFrom=%u nTxPrevOffset=%u nTimeTxPrev=%u nPrevout=%u nTimeTx=%u hashProof=%s\\n\",\n \"0.3\",\n nStakeModifier,\n nTimeBlockFrom, nTxPrevOffset, txPrev.nTime, prevout.n, nTimeTx,\n hashProofOfStake.ToString().c_str());\n }\n return true;\n}\n\n// Check kernel hash target and coinstake signature\nbool CheckProofOfStake(const CTransaction& tx, unsigned int nBits, uint256& hashProofOfStake)\n{\n if (!tx.IsCoinStake())\n return error(\"CheckProofOfStake() : called on non-coinstake %s\", tx.GetHash().ToString().c_str());\n\n // Kernel (input 0) must match the stake hash target per coin age (nBits)\n const CTxIn& txin = tx.vin[0];\n\n // First try finding the previous transaction in database\n CTxDB txdb(\"r\");\n CTransaction txPrev;\n CTxIndex txindex;\n if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))\n return tx.DoS(1, error(\"CheckProofOfStake() : INFO: read txPrev failed\")); // previous transaction not in main chain, may occur during initial download\n txdb.Close();\n\n // Verify signature\n if (!VerifySignature(txPrev, tx, 0, true, 0))\n return tx.DoS(100, error(\"CheckProofOfStake() : VerifySignature failed on coinstake %s\", tx.GetHash().ToString().c_str()));\n\n // Read block header\n CBlock block;\n if (!block.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos, false))\n return fDebug? error(\"CheckProofOfStake() : read block failed\") : false; // unable to read block of previous transaction\n\n if (!CheckStakeKernelHash(nBits, block, txindex.pos.nTxPos - txindex.pos.nBlockPos, txPrev, txin.prevout, tx.nTime, hashProofOfStake, fDebug))\n return tx.DoS(1, error(\"CheckProofOfStake() : INFO: check kernel failed on coinstake %s, hashProof=%s\", tx.GetHash().ToString().c_str(), hashProofOfStake.ToString().c_str())); // may occur during initial download or if behind on block chain sync\n\n return true;\n}\n\n// Check whether the coinstake timestamp meets protocol\nbool CheckCoinStakeTimestamp(int64 nTimeBlock, int64 nTimeTx)\n{\n // v0.3 protocol\n return (nTimeBlock == nTimeTx);\n}\n\n// Get stake modifier checksum\nunsigned int GetStakeModifierChecksum(const CBlockIndex* pindex)\n{\n assert (pindex->pprev || pindex->GetBlockHash() == (!fTestNet ? hashGenesisBlock : hashGenesisBlockTestNet));\n // Hash previous checksum with flags, hashProofOfStake and nStakeModifier\n CDataStream ss(SER_GETHASH, 0);\n if (pindex->pprev)\n ss << pindex->pprev->nStakeModifierChecksum;\n ss << pindex->nFlags << pindex->hashProofOfStake << pindex->nStakeModifier;\n uint256 hashChecksum = Hash(ss.begin(), ss.end());\n hashChecksum >>= (256 - 32);\n return hashChecksum.Get64();\n}\n\n// Check stake modifier hard checkpoints\nbool CheckStakeModifierCheckpoints(int nHeight, unsigned int nStakeModifierChecksum)\n{\n if (fTestNet) return true; // Testnet has no checkpoints\n if (mapStakeModifierCheckpoints.count(nHeight))\n return nStakeModifierChecksum == mapStakeModifierCheckpoints[nHeight];\n return true;\n}\n", "meta": {"hexsha": "4f364df39d8b2c9665d93d0ccef44593663f4a8c", "size": 18533, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/kernel.cpp", "max_stars_repo_name": "Skryptex/Nexus-Proof-of-Stake-Coin", "max_stars_repo_head_hexsha": "4aa4d86adcda4c10fa2fdc5139632f2a264ea08c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kernel.cpp", "max_issues_repo_name": "Skryptex/Nexus-Proof-of-Stake-Coin", "max_issues_repo_head_hexsha": "4aa4d86adcda4c10fa2fdc5139632f2a264ea08c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kernel.cpp", "max_forks_repo_name": "Skryptex/Nexus-Proof-of-Stake-Coin", "max_forks_repo_head_hexsha": "4aa4d86adcda4c10fa2fdc5139632f2a264ea08c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8888888889, "max_line_length": 253, "alphanum_fraction": 0.7015593806, "num_tokens": 4716, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165233795671, "lm_q2_score": 0.3775406617944891, "lm_q1q2_score": 0.19908342921189096}} {"text": "// __BEGIN_LICENSE__\n// Copyright (c) 2006-2013, United States Government as represented by the\n// Administrator of the National Aeronautics and Space Administration. All\n// rights reserved.\n//\n// The NASA Vision Workbench is licensed under the Apache License,\n// Version 2.0 (the \"License\"); you may not use this file except in\n// compliance with the License. You may obtain a copy of the License at\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n// __END_LICENSE__\n\n\n#include \n#include \n\nusing namespace vw;\n\nVector3 cartography::GeodeticToCartesian::operator()( Vector3 const& v ) const {\n if ( boost::math::isnan(v[2]) )\n return Vector3();\n return m_datum.geodetic_to_cartesian(v);\n}\n\nVector3 cartography::CartesianToGeodetic::operator()( Vector3 const& v ) const {\n if ( v == Vector3() )\n return Vector3(0,0,std::numeric_limits::quiet_NaN());\n return m_datum.cartesian_to_geodetic(v);\n}\n\nVector3 cartography::GeodeticToProjection::operator()( Vector3 const& v ) const {\n if ( boost::math::isnan(v[2]) )\n return v;\n Vector2 pix = m_reference.lonlat_to_pixel( subvector(v, 0, 2) );\n return Vector3( pix[0], pix[1], v[2] );\n}\n\nVector3 cartography::ProjectionToGeodetic::operator()( Vector3 const& v ) const {\n Vector2 ll = m_reference.pixel_to_lonlat( subvector( v, 0, 2 ) );\n return Vector3( ll[0], ll[1], v[2] );\n}\n\nVector3 cartography::GeodeticToPoint::operator()( Vector3 const& v ) const {\n if ( boost::math::isnan(v[2]) )\n return v;\n Vector2 pix = m_reference.lonlat_to_point( subvector(v, 0, 2) );\n return Vector3( pix[0], pix[1], v[2] );\n}\n\nVector3 cartography::PointToGeodetic::operator()( Vector3 const& v ) const {\n Vector2 ll = m_reference.point_to_lonlat( subvector( v, 0, 2 ) );\n return Vector3( ll[0], ll[1], v[2] );\n}\n", "meta": {"hexsha": "e925f49edbcfa4c32a5e99f2700ba464212b59eb", "size": 2191, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_stars_repo_name": "maxerbubba/visionworkbench", "max_stars_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 318.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T16:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T07:12:20.000Z", "max_issues_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_issues_repo_name": "maxerbubba/visionworkbench", "max_issues_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 39.0, "max_issues_repo_issues_event_min_datetime": "2015-07-30T22:22:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-23T16:11:55.000Z", "max_forks_repo_path": "src/vw/Cartography/PointImageManipulation.cc", "max_forks_repo_name": "maxerbubba/visionworkbench", "max_forks_repo_head_hexsha": "b06ba0597cd3864bb44ca52671966ca580c02af1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 135.0, "max_forks_repo_forks_event_min_datetime": "2015-01-19T00:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T13:51:40.000Z", "avg_line_length": 37.1355932203, "max_line_length": 81, "alphanum_fraction": 0.7161113647, "num_tokens": 620, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5273165233795671, "lm_q2_score": 0.37754065479083276, "lm_q1q2_score": 0.19908342551874725}} {"text": "#include \"blob.h\"\n#include \"adafFunctions.h\"\n#include \"globalVariables.h\"\n#include \"State.h\"\n#include \n#include \n#include \n\nvoid blob(State& st)\n{\n\tfactorDensity = GlobalConfig.get(\"factorDensity\");\n\t//tAccBlob = GlobalConfig.get(\"tAccBlob\"); // [lightcurve variability time in sec]\n\t//double tFlare = 5000.0;\n\tdouble sumt = 0.0;\n\tdouble pasoRaux = pow(100.0,1.0/1000);\n\trBlob = schwRadius*1.1;\n\twhile ( sumt < timeAfterFlare) {\n\t\tdouble dr = rBlob*(pasoRaux-1.0);\n\t\tsumt += dr / (-radialVel(rBlob));\n\t\trBlob *= pasoRaux;\n\t}\n\tstd::cout << \"rBlob = \" << rBlob/schwRadius << endl;\n}", "meta": {"hexsha": "0ebc9a767eb419549e21273ffbb971436bb48367", "size": 666, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/adaf/blob.cpp", "max_stars_repo_name": "eduardomgutierrez/RIAF_radproc", "max_stars_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-08-30T06:56:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T06:56:03.000Z", "max_issues_repo_path": "src/adaf/blob.cpp", "max_issues_repo_name": "eduardomgutierrez/RIAF_radproc", "max_issues_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/adaf/blob.cpp", "max_forks_repo_name": "eduardomgutierrez/RIAF_radproc", "max_forks_repo_head_hexsha": "0e4166f04cce27fed2cbd2c7078023c10e0e8d12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9565217391, "max_line_length": 94, "alphanum_fraction": 0.6876876877, "num_tokens": 209, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.6619228758499942, "lm_q2_score": 0.30074557267388247, "lm_q1q2_score": 0.1990703743634497}} {"text": "/* Copyright (c) 2010-2019, Delft University of Technology\n * All rigths reserved\n *\n * This file is part of the Tudat. Redistribution and use in source and\n * binary forms, with or without modification, are permitted exclusively\n * under the terms of the Modified BSD license. You should have received\n * a copy of the license with this file. If not, please or visit:\n * http://tudat.tudelft.nl/LICENSE.\n */\n\n#define BOOST_TEST_DYN_LINK\n#define BOOST_TEST_MAIN\n\n\n#include \n\n#include \n\n#include \"tudat/simulation/estimation_setup/orbitDeterminationTestCases.h\"\n\n\nnamespace tudat\n{\nnamespace unit_tests\n{\nBOOST_AUTO_TEST_SUITE( test_estimation_from_positions )\n\n\n\n//! This test checks, for double states/observables and Time time, if the orbit determination correctly converges\n//! when simulating data, perturbing the dynamical parameters, and then retrieving the original parameters\nBOOST_AUTO_TEST_CASE( test_EstimationFromPosition )\n{\n for( int simulationType = 0; simulationType < 4; simulationType++ )\n {\n std::cout << \"=============================================== Running Case: \" << simulationType << std::endl;\n\n // Simulate estimated parameter error.\n Eigen::VectorXd totalError;\n\n totalError = executePlanetaryParameterEstimation< Time, double >( simulationType ).second;\n\n // Adjust tolerance based on simulation settings\n double toleranceMultiplier = 20.0;\n\n\n // Check error.\n for( unsigned int j = 0; j < 3; j++ )\n {\n BOOST_CHECK_SMALL( totalError( j ), toleranceMultiplier * 5.0E-3 );\n }\n\n for( unsigned int j = 0; j < 3; j++ )\n {\n BOOST_CHECK_SMALL( totalError( j + 3 ), toleranceMultiplier * 1.0E-7 );\n }\n\n BOOST_CHECK_SMALL( totalError( 6 ), toleranceMultiplier * 1.0E3 );\n std::cout << totalError.transpose( ) << std::endl;\n }\n}\n\nBOOST_AUTO_TEST_SUITE_END( )\n\n}\n\n}\n\n\n", "meta": {"hexsha": "2960c644144c52018ccbd47dd424e97be84c1d84", "size": 1978, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/src/astro/orbit_determination/unitTestEstimationFromIdealDataTimeDouble.cpp", "max_stars_repo_name": "kimonito98/tudat", "max_stars_repo_head_hexsha": "c28f2a3e78b8492e2e054ad5e0d1f9ad785cd092", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/src/astro/orbit_determination/unitTestEstimationFromIdealDataTimeDouble.cpp", "max_issues_repo_name": "kimonito98/tudat", "max_issues_repo_head_hexsha": "c28f2a3e78b8492e2e054ad5e0d1f9ad785cd092", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/src/astro/orbit_determination/unitTestEstimationFromIdealDataTimeDouble.cpp", "max_forks_repo_name": "kimonito98/tudat", "max_forks_repo_head_hexsha": "c28f2a3e78b8492e2e054ad5e0d1f9ad785cd092", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2571428571, "max_line_length": 117, "alphanum_fraction": 0.6648129424, "num_tokens": 469, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5428632831725053, "lm_q2_score": 0.3665897501624599, "lm_q1q2_score": 0.19900811535058147}} {"text": "#pragma once\n\n#include \n\n#include \n\n#include \"abstract_cardinality_estimator.hpp\"\n#include \"operators/operator_scan_predicate.hpp\"\n#include \"statistics/statistics_objects/abstract_histogram.hpp\"\n#include \"statistics/statistics_objects/generic_histogram_builder.hpp\"\n\nnamespace opossum {\n\ntemplate \nclass AbstractHistogram;\ntemplate \nclass GenericHistogram;\ntemplate \nclass AttributeStatistics;\nclass AliasNode;\nclass ProjectionNode;\nclass AggregateNode;\nclass ValidateNode;\nclass PredicateNode;\nclass JoinNode;\nclass UnionNode;\nclass LimitNode;\n\n/**\n * Hyrise's default, statistics-based cardinality estimator\n */\nclass CardinalityEstimator : public AbstractCardinalityEstimator {\n public:\n std::shared_ptr new_instance() const override;\n\n Cardinality estimate_cardinality(const std::shared_ptr& lqp) const override;\n std::shared_ptr estimate_statistics(const std::shared_ptr& lqp) const;\n\n /**\n * Per-node-type estimation functions\n * @{\n */\n static std::shared_ptr estimate_alias_node(\n const AliasNode& alias_node, const std::shared_ptr& input_table_statistics);\n\n static std::shared_ptr estimate_projection_node(\n const ProjectionNode& projection_node, const std::shared_ptr& input_table_statistics);\n\n static std::shared_ptr estimate_aggregate_node(\n const AggregateNode& aggregate_node, const std::shared_ptr& input_table_statistics);\n\n static std::shared_ptr estimate_validate_node(\n const ValidateNode& validate_node, const std::shared_ptr& input_table_statistics);\n\n static std::shared_ptr estimate_predicate_node(\n const PredicateNode& predicate_node, const std::shared_ptr& input_table_statistics);\n\n static std::shared_ptr estimate_join_node(\n const JoinNode& join_node, const std::shared_ptr& left_input_table_statistics,\n const std::shared_ptr& right_input_table_statistics);\n\n static std::shared_ptr estimate_union_node(\n const UnionNode& union_node, const std::shared_ptr& left_input_table_statistics,\n const std::shared_ptr& right_input_table_statistics);\n\n static std::shared_ptr estimate_limit_node(\n const LimitNode& limit_node, const std::shared_ptr& input_table_statistics);\n /** @} */\n\n /**\n * Filter estimations\n * @{\n */\n\n /**\n * Estimate a simple scanning predicate. This function analyses the given predicate and dispatches the actual\n * estimation algorithm.\n */\n static std::shared_ptr estimate_operator_scan_predicate(\n const std::shared_ptr& input_table_statistics, const OperatorScanPredicate& predicate);\n\n /**\n * Estimation of an equi scan between two histograms. Estimating equi scans without correlation information is\n * impossible, so this function is restricted to computing an upper bound of the resulting histogram.\n */\n template \n static std::shared_ptr> estimate_column_vs_column_equi_scan_with_histograms(\n const AbstractHistogram& left_histogram, const AbstractHistogram& right_histogram) {\n /**\n * Column-to-column scan estimation is notoriously hard, selectivities from 0 to 1 are possible for the same histogram\n * pairs.\n * Thus, we do the most conservative estimation and compute the upper bound of value- and distinct counts for each\n * bin pair.\n */\n\n auto left_idx = BinID{0};\n auto right_idx = BinID{0};\n auto left_bin_count = left_histogram.bin_count();\n auto right_bin_count = right_histogram.bin_count();\n\n GenericHistogramBuilder builder;\n\n for (; left_idx < left_bin_count && right_idx < right_bin_count;) {\n const auto& left_min = left_histogram.bin_minimum(left_idx);\n const auto& right_min = right_histogram.bin_minimum(right_idx);\n\n if (left_min < right_min) {\n ++left_idx;\n continue;\n }\n\n if (right_min < left_min) {\n ++right_idx;\n continue;\n }\n\n DebugAssert(left_histogram.bin_maximum(left_idx) == right_histogram.bin_maximum(right_idx),\n \"Histogram bin boundaries do not match\");\n\n const auto height = std::min(left_histogram.bin_height(left_idx), right_histogram.bin_height(right_idx));\n const auto distinct_count =\n std::min(left_histogram.bin_distinct_count(left_idx), right_histogram.bin_distinct_count(right_idx));\n\n if (height > 0 && distinct_count > 0) {\n builder.add_bin(left_min, left_histogram.bin_maximum(left_idx), height, distinct_count);\n }\n\n ++left_idx;\n ++right_idx;\n }\n\n if (builder.empty()) {\n return nullptr;\n }\n\n return builder.build();\n }\n /** @} */\n\n /**\n * Join estimations\n * @{\n */\n static std::shared_ptr estimate_inner_equi_join(const ColumnID left_column_id,\n const ColumnID right_column_id,\n const TableStatistics& left_input_table_statistics,\n const TableStatistics& right_input_table_statistics);\n\n static std::shared_ptr estimate_semi_join(const ColumnID left_column_id,\n const ColumnID right_column_id,\n const TableStatistics& left_input_table_statistics,\n const TableStatistics& right_input_table_statistics);\n\n static std::shared_ptr estimate_cross_join(const TableStatistics& left_input_table_statistics,\n const TableStatistics& right_input_table_statistics);\n\n template \n static std::shared_ptr> estimate_inner_equi_join_with_histograms(\n const AbstractHistogram& left_histogram, const AbstractHistogram& right_histogram) {\n /**\n * left_histogram and right_histogram are turned into \"unified\" histograms by `split_at_bin_bounds`, meaning that\n * their bins are split so that their bin boundaries match.\n * E.g., if left_histogram has a single bin [1, 10] and right histogram has a single bin [5, 20] then\n * unified_left_histogram == {[1, 4], [5, 10]}\n * unified_right_histogram == {[5, 10], [11, 20]}\n * The estimation is performed on overlapping bins only, e.g., only the two bins [5, 10] will produce matches.\n */\n\n auto unified_left_histogram = left_histogram.split_at_bin_bounds(right_histogram.bin_bounds());\n auto unified_right_histogram = right_histogram.split_at_bin_bounds(left_histogram.bin_bounds());\n\n auto left_idx = BinID{0};\n auto right_idx = BinID{0};\n auto left_bin_count = unified_left_histogram->bin_count();\n auto right_bin_count = unified_right_histogram->bin_count();\n\n GenericHistogramBuilder builder;\n\n // Iterate over both unified histograms and find overlapping bins\n for (; left_idx < left_bin_count && right_idx < right_bin_count;) {\n const auto& left_min = unified_left_histogram->bin_minimum(left_idx);\n const auto& right_min = unified_right_histogram->bin_minimum(right_idx);\n\n if (left_min < right_min) {\n ++left_idx;\n continue;\n }\n\n if (right_min < left_min) {\n ++right_idx;\n continue;\n }\n\n DebugAssert(unified_left_histogram->bin_maximum(left_idx) == unified_right_histogram->bin_maximum(right_idx),\n \"Histogram bin boundaries do not match\");\n\n // Overlapping bins found, estimate the join for these bins' range\n const auto [height, distinct_count] = estimate_inner_equi_join_of_bins( // NOLINT\n unified_left_histogram->bin_height(left_idx), unified_left_histogram->bin_distinct_count(left_idx),\n unified_right_histogram->bin_height(right_idx), unified_right_histogram->bin_distinct_count(right_idx));\n\n if (height > 0) {\n builder.add_bin(left_min, unified_left_histogram->bin_maximum(left_idx), height, distinct_count);\n }\n\n ++left_idx;\n ++right_idx;\n }\n\n return builder.build();\n }\n\n /**\n * Given two HistogramBins with equal bounds and the specified height and distinct counts, estimate the number of\n * matches and distinct values for an equi-inner join of these two bins using a principle-of-inclusion estimation.\n * @return {estimated_height, estimated_distinct_count}\n */\n static std::pair estimate_inner_equi_join_of_bins(\n const float left_height, const float left_distinct_count, const float right_height,\n const float right_distinct_count);\n\n /** @} */\n\n /**\n * Helper\n * @{\n */\n static std::shared_ptr prune_column_statistics(\n const std::shared_ptr& table_statistics, const std::vector& pruned_column_ids);\n\n /** @} */\n};\n} // namespace opossum\n", "meta": {"hexsha": "2f36aea24f6159d6b587ae44723cf3413630b680", "size": 9398, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/lib/statistics/cardinality_estimator.hpp", "max_stars_repo_name": "mrcl-tst/hyrise", "max_stars_repo_head_hexsha": "eec50b39de9f530b0a1732ceb5822b7222f3fe17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 583.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T00:55:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T12:24:30.000Z", "max_issues_repo_path": "src/lib/statistics/cardinality_estimator.hpp", "max_issues_repo_name": "mrcl-tst/hyrise", "max_issues_repo_head_hexsha": "eec50b39de9f530b0a1732ceb5822b7222f3fe17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1573.0, "max_issues_repo_issues_event_min_datetime": "2015-01-07T15:47:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:48:03.000Z", "max_forks_repo_path": "src/lib/statistics/cardinality_estimator.hpp", "max_forks_repo_name": "mrcl-tst/hyrise", "max_forks_repo_head_hexsha": "eec50b39de9f530b0a1732ceb5822b7222f3fe17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 145.0, "max_forks_repo_forks_event_min_datetime": "2015-03-09T16:26:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T12:53:23.000Z", "avg_line_length": 40.1623931624, "max_line_length": 122, "alphanum_fraction": 0.7097254735, "num_tokens": 1927, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.542863297964157, "lm_q2_score": 0.36658973632215985, "lm_q1q2_score": 0.19900811325965842}} {"text": "// --------------------------------------------------------------------------\n// OpenMS -- Open-Source Mass Spectrometry\n// --------------------------------------------------------------------------\n// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,\n// ETH Zurich, and Freie Universitaet Berlin 2002-2015.\n//\n// This software is released under a three-clause BSD license:\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above copyright\n// notice, this list of conditions and the following disclaimer in the\n// documentation and/or other materials provided with the distribution.\n// * Neither the name of any author or any participating institution\n// may be used to endorse or promote products derived from this software\n// without specific prior written permission.\n// For a full list of authors, refer to the file AUTHORS.\n// --------------------------------------------------------------------------\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING\n// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// --------------------------------------------------------------------------\n// $Maintainer: Clemens Groepl $\n// $Authors: $\n// --------------------------------------------------------------------------\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace OpenMS\n{\n int EmgFitter1D::EgmFitterFunctor::operator()(const Eigen::VectorXd& x, Eigen::VectorXd& fvec)\n {\n Size n = m_data->n;\n EmgFitter1D::RawDataArrayType set = m_data->set;\n\n EmgFitter1D::CoordinateType h = x(0);\n EmgFitter1D::CoordinateType w = x(1);\n EmgFitter1D::CoordinateType s = x(2);\n EmgFitter1D::CoordinateType z = x(3);\n\n EmgFitter1D::CoordinateType Yi = 0.0;\n\n // iterate over all points of the signal\n for (Size i = 0; i < n; i++)\n {\n double t = set[i].getPos();\n\n // Simplified EMG\n Yi = (h * w / s) * sqrt(2.0 * Constants::PI) * exp((pow(w, 2) / (2 * pow(s, 2))) - ((t - z) / s)) / (1 + exp((-2.4055 / sqrt(2.0)) * (((t - z) / w) - w / s)));\n\n fvec(i) = Yi - set[i].getIntensity();\n }\n return 0;\n }\n\n // compute Jacobian matrix for the different parameters\n int EmgFitter1D::EgmFitterFunctor::df(const Eigen::VectorXd& x, Eigen::MatrixXd& J)\n {\n Size n = m_data->n;\n EmgFitter1D::RawDataArrayType set = m_data->set;\n\n EmgFitter1D::CoordinateType h = x(0);\n EmgFitter1D::CoordinateType w = x(1);\n EmgFitter1D::CoordinateType s = x(2);\n EmgFitter1D::CoordinateType z = x(3);\n\n const EmgFitter1D::CoordinateType emg_const = 2.4055;\n const EmgFitter1D::CoordinateType sqrt_2pi = sqrt(2 * Constants::PI);\n const EmgFitter1D::CoordinateType sqrt_2 = sqrt(2.0);\n\n EmgFitter1D::CoordinateType exp1, exp2, exp3 = 0.0;\n EmgFitter1D::CoordinateType derivative_height, derivative_width, derivative_symmetry, derivative_retention = 0.0;\n\n // iterate over all points of the signal\n for (Size i = 0; i < n; i++)\n {\n EmgFitter1D::CoordinateType t = set[i].getPos();\n\n exp1 = exp(((w * w) / (2 * s * s)) - ((t - z) / s));\n exp2 = (1 + exp((-emg_const / sqrt_2) * (((t - z) / w) - w / s)));\n exp3 = exp((-emg_const / sqrt_2) * (((t - z) / w) - w / s));\n\n // f'(h)\n derivative_height = w / s * sqrt_2pi * exp1 / exp2;\n\n // f'(h)\n derivative_width = h / s * sqrt_2pi * exp1 / exp2 + (h * w * w) / (s * s * s) * sqrt_2pi * exp1 / exp2 + (emg_const * h * w) / s * sqrt_2pi * exp1 * (-(t - z) / (w * w) - 1 / s) * exp3 / ((exp2 * exp2) * sqrt_2);\n\n // f'(s)\n derivative_symmetry = -h * w / (s * s) * sqrt_2pi * exp1 / exp2 + h * w / s * sqrt_2pi * (-(w * w) / (s * s * s) + (t - z) / (s * s)) * exp1 / exp2 + (emg_const * h * w * w) / (s * s * s) * sqrt_2pi * exp1 * exp3 / ((exp2 * exp2) * sqrt_2);\n\n // f'(z)\n derivative_retention = h * w / (s * s) * sqrt_2pi * exp1 / exp2 - (emg_const * h) / s * sqrt_2pi * exp1 * exp3 / ((exp2 * exp2) * sqrt_2);\n\n // set the jacobian matrix\n J(i, 0) = derivative_height;\n J(i, 1) = derivative_width;\n J(i, 2) = derivative_symmetry;\n J(i, 3) = derivative_retention;\n }\n return 0;\n }\n\n EmgFitter1D::EmgFitter1D() :\n LevMarqFitter1D()\n {\n setName(getProductName());\n defaults_.setValue(\"statistics:variance\", 1.0, \"Variance of the model.\", ListUtils::create(\"advanced\"));\n defaultsToParam_();\n }\n\n EmgFitter1D::EmgFitter1D(const EmgFitter1D& source) :\n LevMarqFitter1D(source)\n {\n setParameters(source.getParameters());\n updateMembers_();\n }\n\n EmgFitter1D::~EmgFitter1D()\n {\n }\n\n EmgFitter1D& EmgFitter1D::operator=(const EmgFitter1D& source)\n {\n if (&source == this)\n return *this;\n\n LevMarqFitter1D::operator=(source);\n setParameters(source.getParameters());\n updateMembers_();\n\n return *this;\n }\n\n EmgFitter1D::QualityType EmgFitter1D::fit1d(const RawDataArrayType& set, InterpolationModel*& model)\n {\n // Calculate bounding box\n CoordinateType min_bb = set[0].getPos(), max_bb = set[0].getPos();\n for (Size pos = 1; pos < set.size(); ++pos)\n {\n CoordinateType tmp = set[pos].getPos();\n if (min_bb > tmp)\n min_bb = tmp;\n if (max_bb < tmp)\n max_bb = tmp;\n }\n\n // Enlarge the bounding box by a few multiples of the standard deviation\n const CoordinateType stdev = sqrt(statistics_.variance()) * tolerance_stdev_box_;\n min_bb -= stdev;\n max_bb += stdev;\n\n\n // Set advanced parameters for residual_ und jacobian_ method\n EmgFitter1D::Data d;\n d.n = set.size();\n d.set = set;\n\n // Compute start parameters\n setInitialParameters_(set);\n\n // Optimize parameter with Levenberg-Marquardt algorithm\n// CoordinateType x_init[4] = { height_, width_, symmetry_, retention_ };\n Eigen::VectorXd x_init(4);\n x_init(0) = height_;\n x_init(1) = width_;\n x_init(2) = symmetry_;\n x_init(3) = retention_;\n if (symmetric_ == false)\n {\n EgmFitterFunctor functor(4, &d);\n optimize_(x_init, functor);\n }\n\n // Set optimized parameters\n height_ = x_init[0];\n width_ = x_init[1];\n symmetry_ = x_init[2];\n retention_ = x_init[3];\n\n#ifdef DEBUG_FEATUREFINDER\n if (getGslStatus_() != \"success\")\n {\n std::cout << \"status: \" << getGslStatus_() << std::endl;\n }\n#endif\n\n // build model\n model = static_cast(Factory >::create(\"EmgModel\"));\n model->setInterpolationStep(interpolation_step_);\n\n Param tmp;\n tmp.setValue(\"bounding_box:min\", min_bb);\n tmp.setValue(\"bounding_box:max\", max_bb);\n tmp.setValue(\"statistics:variance\", statistics_.variance());\n tmp.setValue(\"statistics:mean\", statistics_.mean());\n tmp.setValue(\"emg:height\", height_);\n tmp.setValue(\"emg:width\", width_);\n tmp.setValue(\"emg:symmetry\", symmetry_);\n tmp.setValue(\"emg:retention\", retention_);\n model->setParameters(tmp);\n\n\n // calculate pearson correlation\n std::vector real_data;\n real_data.reserve(set.size());\n std::vector model_data;\n model_data.reserve(set.size());\n\n for (Size i = 0; i < set.size(); ++i)\n {\n real_data.push_back(set[i].getIntensity());\n model_data.push_back(model->getIntensity(DPosition<1>(set[i].getPosition())));\n }\n\n QualityType correlation = Math::pearsonCorrelationCoefficient(real_data.begin(), real_data.end(), model_data.begin(), model_data.end());\n if (boost::math::isnan(correlation))\n correlation = -1.0;\n\n return correlation;\n }\n\n void EmgFitter1D::setInitialParameters_(const RawDataArrayType& set)\n {\n // sum over all intensities\n CoordinateType sum = 0.0;\n for (Size i = 0; i < set.size(); ++i)\n sum += set[i].getIntensity();\n\n // calculate the median\n Size median = 0;\n float count = 0.0;\n for (Size i = 0; i < set.size(); ++i)\n {\n count += set[i].getIntensity();\n if (count <= sum / 2)\n median = i;\n }\n\n // calculate the height of the peak\n height_ = set[median].getIntensity();\n\n // calculate retention time\n retention_ = set[median].getPos();\n\n // default is an asymmetric peak\n symmetric_ = false;\n\n // calculate the symmetry (fronted peak: s<1 , tailed peak: s>1)\n symmetry_ = fabs(set[set.size() - 1].getPos() - set[median].getPos()) / fabs(set[median].getPos() - set[0].getPos());\n\n // check the symmetry\n if (boost::math::isinf(symmetry_) || boost::math::isnan(symmetry_))\n {\n symmetric_ = true;\n symmetry_ = 10;\n }\n\n // optimize the symmetry\n // The computations can lead to an overflow error at very low values of symmetry (s~0).\n // For s~5 the parameter can be approximated by the Levenberg-Marquardt algorithms.\n // (the other parameters are much greater than one)\n if (symmetry_ < 1)\n symmetry_ += 5;\n\n // calculate the width of the peak\n // rt-values with intensity zero are not allowed for calculation of the width\n // normally: width_ = fabs( set[set.size() - 1].getPos() - set[0].getPos() );\n // but its better for the emg function to proceed from narrow peaks\n width_ = symmetry_;\n }\n\n void EmgFitter1D::updateMembers_()\n {\n LevMarqFitter1D::updateMembers_();\n statistics_.setVariance(param_.getValue(\"statistics:variance\"));\n }\n\n}\n", "meta": {"hexsha": "fd7fd650cec18f6fc1381153c274371e6d2fd576", "size": 10382, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/openms/source/TRANSFORMATIONS/FEATUREFINDER/EmgFitter1D.cpp", "max_stars_repo_name": "tomas-pluskal/openms", "max_stars_repo_head_hexsha": "136ec9057435f6d45d65a8e1465b2a6cff9621a8", "max_stars_repo_licenses": ["Zlib", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/openms/source/TRANSFORMATIONS/FEATUREFINDER/EmgFitter1D.cpp", "max_issues_repo_name": "tomas-pluskal/openms", "max_issues_repo_head_hexsha": "136ec9057435f6d45d65a8e1465b2a6cff9621a8", "max_issues_repo_licenses": ["Zlib", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/openms/source/TRANSFORMATIONS/FEATUREFINDER/EmgFitter1D.cpp", "max_forks_repo_name": "tomas-pluskal/openms", "max_forks_repo_head_hexsha": "136ec9057435f6d45d65a8e1465b2a6cff9621a8", "max_forks_repo_licenses": ["Zlib", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3129251701, "max_line_length": 246, "alphanum_fraction": 0.6230013485, "num_tokens": 2939, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. NO", "lm_q1_score": 0.5428632979641571, "lm_q2_score": 0.36658972248186, "lm_q1q2_score": 0.19900810574626762}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace gamebase;\nusing namespace std;\n\nclass Figure\n{\npublic:\n Figure() {}\n\n Figure(int startYArg, const Color& colorArg, const vector& geomArg)\n {\n startY = startYArg;\n color = colorArg;\n geom = geomArg;\n }\n\n void rotateLeft()\n {\n for (auto it = geom.begin(); it != geom.end(); ++it)\n *it = rotate90CCW(*it);\n }\n\n void rotateRight()\n {\n for (auto it = geom.begin(); it != geom.end(); ++it)\n *it = rotate90CW(*it);\n }\n\n int startY;\n Color color;\n vector geom;\n};\n\nclass MyApp : public SimpleApplication\n{\npublic:\n void load()\n {\n srand(GetTickCount());\n\n design = deserialize(\"tetris\\\\Design.json\");\n m_view->addObject(design);\n\n fieldLayout = design->getChild(\"#field\");\n\n design->getChild