diff --git a/.gitignore b/.gitignore index 3084703..7633e71 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,10 @@ *.pyc *.[oa] *~ +*.zip +*.png +.Python +bin/ media/t*png media/profpic*png todo.txt @@ -8,13 +12,21 @@ settings.py temp.txt probmap -*.zip +arc-models/*.pdf +arc-models/*_flymake.py +arc-models/diffusionwriteup.synctex.gz -*.png +*.7 -arc-models/lognorm_summary.pdf +junk.py -arc-models/motion_model_flymake.py +*.rel +*.aux +*.bbl +*.bib +*.blg +*.log +*.toc .idea/* .idea* diff --git a/Views.py b/Views.py index 6890cc2..0b2c6e0 100644 --- a/Views.py +++ b/Views.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # -*- mode: python -*- # -*- py-indent-offset: 4 -*- @@ -57,9 +57,10 @@ def get_sorted_models(all_models, condition=lambda model: True): ''' Return list of rated models, highest-rated first. Uses avgrating attribute and operator.attrgetter method. ''' - rated_models = list(model for model in all_models - if model.avgrating != 'unrated' and condition(model)) - return sorted(rated_models, key=attrgetter('avgrating'), reverse=True) + rated_models = [model for model in all_models + if model.model_avgrating != 'unrated' + and condition(model)] + return sorted(rated_models, key=attrgetter('model_avgrating'), reverse=True) def confidence_interval(scores): @@ -79,10 +80,10 @@ def confidence_interval(scores): lowerbound = round(avg-halfwidth, 4) upperbound = round(avg+halfwidth, 4) assert lowerbound < avg < upperbound - return (lowerbound, upperbound) + return (lowerbound, upperbound, avg) except: print >> sys.stderr, 'No 95%% CI. N=%d, avg=%6.2f, std=%6.2f' % (N, avg, stdev) - return (0, 0) + return (-1, 1, scores[0]) def check_account_fields(fields, new_user=True): @@ -754,7 +755,7 @@ def model_access(request): request_to_input(request.session, input_dic, 'info', 'error') return render_to_response('ModelScreen.html',input_dic) - # If comming frm account + # If coming frm account else: selection = request.GET['model_in'] if selection == '0': @@ -963,13 +964,19 @@ def completed_test(request): @login_required def leaderboard(request): + '''Generate the data for the main leaderboard: Average, 95% CI, and N. + + Handles filter choice of 'case', 'category', and usual 'model'. + + ''' input_dict = dict(csrf(request)) models = list(Model.objects.filter(completed_cases__gt=0)) filter_choice = request.POST.get('filter') - instname = lambda model: model.account_set.all()[0].institution_name + if filter_choice == 'case': - model_data, case_name = list(), request.POST.get('case_name') + model_data, case_name = [], request.POST.get('case_name') + input_dict['msg'] = 'of case "%s"' % case_name for model in models: try: test = model.tests.get(test_name=case_name, active=False) @@ -977,28 +984,22 @@ def leaderboard(request): round(float(test.test_rating), 3), '', 1)) except Test.DoesNotExist: pass - input_dict['msg'] = 'of case "%s"' % case_name - elif filter_choice == 'category': - model_data, category = list(), request.POST.get('case_category') - for model in models: - all_tests = model.tests.filter(active=False) - valid_tests = list(test for test in all_tests if test.test_case.subject_category == category) - if len(valid_tests) > 0: - scores = list(float(test.test_rating) for test in valid_tests) - lowerbound, upperbound = (-1, 1) if len(scores) == 1 else confidence_interval(scores) - model_data.append((instname(model), model.name_id, - round(sum(scores) / len(scores), 3), '[%5.3f, %5.3f]' % (lowerbound, upperbound), len(scores))) - input_dict['msg'] = 'of category "%s"' % category else: - model_data = list() + if filter_choice == 'category': + category = request.POST.get('case_category') + input_dict['msg'] = 'of "%s"' % category + model_data = [] for model in models: - all_tests = model.tests.filter(active=False) - scores = list(float(test.test_rating) for test in all_tests) - lowerbound, upperbound = (-1, 1) - if len(scores) > 1: - lowerbound, upperbound = confidence_interval(scores) - model_data.append((instname(model), model.name_id, - round(float(model.avgrating), 3), '[%5.3f, %5.3f]' % (lowerbound, upperbound), len(model.tests.all()))) + tests = model.model_tests.filter(Active=False) + if filter_choice == 'category': + tests = [t for t in tests if t.test_case.subject_category == category] + if len(tests) > 0: + scores = [float(test.test_rating) for test in tests] + lowerbound, upperbound, avg = confidence_interval(scores) + model_data.append((instname(model), model.model_nameID, + round(avg, 2), '[%5.2f, %5.2f]' % (lowerbound, upperbound), len(scores))) + model.model_avgrating = '%5.3f' % avg + model.save() input_dict['case_names'] = Case.objects.values_list('case_name', flat=True) input_dict['case_categories'] = set(case.subject_category for case in Case.objects.all()) @@ -1009,7 +1010,11 @@ def leaderboard(request): @login_required def Leader_model(request): - '''Create the leaderboard.''' + '''Duplicate(?) function to create leaderboard. + + TODO: figure out what calls this and why it can't use previous. + + ''' sorted_models = get_sorted_models(Model.objects.all()) # Build Leaderboard inputlist = [] @@ -1018,24 +1023,20 @@ def Leader_model(request): account = model.account_set.all()[0] institution = account.institution_name username = account.username - name = model.name_id - rating = float(model.avgrating) - tests = model.tests.all() - finished_tests = [test for test in tests if not test.active] - N = len(finished_tests) + name = model.model_nameID + + tests = model.model_tests.all() + finished_tests = [test for test in tests if not test.Active] scores = [float(x.test_rating) for x in finished_tests] + N = len(scores) + lowerbound, upperbound, avg = confidence_interval(scores) # Build case, depending on sample size - case = [institution, name, '%5.3f'%rating, N, username] - if N <= 1: - case.extend([-1, 1, True]) # std=False, sm.sample=True - else: - lowerbound, upperbound = confidence_interval(scores) - if N < 10: # small sample=True - case.extend([lowerbound, upperbound, True]) - else: # small sample = False - case.extend([lowerbound, upperbound, False]) - #print >> sys.stderr, case + case = [institution, name, '%5.3f'%avg, N, username] + if N < 10: # small sample=True + case.extend([lowerbound, upperbound, True]) + else: # small sample = False + case.extend([lowerbound, upperbound, False]) inputlist.append(case) # Prepare variables to send to HTML template @@ -1815,7 +1816,7 @@ def switchboard_toscenario(request): if N < 2: entry.extend([-1, 1, True]) # std=False, sm.sample=True else: - lowerbound, upperbound = confidence_interval(scores) + lowerbound, upperbound, avg = confidence_interval(scores) if N < 10: # small sample=True entry.extend([lowerbound, upperbound, True]) else: # small sample = False diff --git a/arc-models/diffusionwriteup.tex b/arc-models/diffusionwriteup.tex new file mode 100644 index 0000000..4454fdc --- /dev/null +++ b/arc-models/diffusionwriteup.tex @@ -0,0 +1,56 @@ +\documentclass[]{article} + +%opening +\title{Modeling Lost Person Movement as Diffusion Using Elevation and Land Cover} +\author{Eric Cawi} + +\begin{document} + +\maketitle + +\begin{abstract} +This work creates a model for lost person movement based on gas diffusion. to be filled in +\end{abstract} +\tableofcontents + +We use Burgess and Darken's \ycite{burgess_realistic_2004} gas diffusion model of path planning to create probability maps for lost hikers. Where Burgess and Darken sample the diffusion gradient to find discrete likely paths from source to target, we use the diffusion gradient itself as a probability map, and find it is competitive with the Euclidean ring model in \cite{koester_lost_2008}. + +\section{Create Working Gas Model} + +\textbf{Insert diagram here}. + +First, the area is divided into cells. Each cell has a concentrationThen the elevation data is transformed onto the interval $[0,.25]$, which represents the maximum transfer of gas from that cell to the another cell. For example, a cell with a value of .25 and a concentration of 1 (units) can transfer up to .25 of its gas to each of the four neighboring cells. This ensures the conservation of concentration in the simulation. This simulation also allows for sources and sinks to be placed at arbitrary grid cells. Burgess and Darken run this simulation until a relative equilibrium is reached (mathematically taking the limit as time goes to infinity), and then use the gradient of the final concentration to generate possible paths of approach for military applications. Note-using the gradient to generate the avenues of approach is solving the transport equation-hard to solve numerically. + +For search and rescue, I am more interested in the final gas concentration. Modeling the last known point as a source, the gas model simulates the overall movement of many hikers from the last known point (assertion, want to prove this). I use the source because it models a spike of probability at the LKP, and as B/D used originally it represents a continuous stream of people moving around the material + +In this model, I use elevation/land cover data as the speed at which the gas is allowed to propagate. Because the arrays involved are very large, I downsample by a factor of 4 on the NED dataset, then resample up to 5001x5001 pixels using Matlab's default interpolation- i think it's bicubic. Currently I'm running out to a huge number of iterations because I haven't calibrated the transformations to match different categories/mean distances yet. +\section{Transformations from elevation/land cover to impedance} +Elevation- using Tobler's hiking function +Land Cover - using 1-Don's values +\section{Diffusion Equation with source} +The diffusion equation models the overall random walks of particles diffusing through materials. At each point there is some diffusion constant determining the maximum velocity of the concentration. Mathematically, freely diffusing gas like the Burgess Darken model can be modeled with this equation. In 1-D with a constant diffusion coefficient, the solution is a normal distribution, and generalizations give similarly smooth distributions. In our case the diffusion is given by: +\[\frac{dc(x,y,t)}{dt}=\nabla \cdot(D(x,y)\nabla c(x,y,t)=\] +\[(\frac{dD(x,y)}{dx},\frac{dD(x,y)}{dy})\cdot\nabla c(x,y,t) + D(x,y)\Delta c(x,y,t)\] +Current work: finite difference scheme for this equation. +\section{Boundary Considerations} +I'm considering using the boundary of the map as a sink and seeing what happens, or having some other way of modeling the interactions with the outside layer because in the current state we might be getting innaccurate reflection. Boundary conditions will be important for the PDE formulation as well. + +\section{Static Model Preliminary Testing} +Done on 10 arizona cases I had elevation data for. Observations- On flat terrain the concentration behaves like regular diffusion, i.e. a smooth version of the distance rings. Otherwise, it avoids big changes in elevation, as expected. E.g. if a subject doesn't start on a mountain then the mountain will have lower probability. However, a 2 or 3 of the hikers tested were going for the mountain, which makes sense because they were probably trying to hike on the mountain, but this model isn't going to catch that. Fixing this would require more information about the subject, for example "They were heading towards Mt. Awesome when they got lost." Since the odds are they moved in the direction of Mt. Awesome, a sink dropped at the intended location would skew the concentration in that direction. But that isn't available all the time. +Not done for land cover yet +\section{Scaling with Bob's Book} +not done +\section{Testing/fitting on Yosemite Cases} +Prediction- lots of mountains in yosemite, so we should see higher probabilities around ridges/trails that stay at the same elevation. I would like to somehow set up this as a batch + +\section{Comparison with diffusion Models} +Compare results and speed. +\section{Discussion} +Goal: An easy GIS ready application/A diffusion model that responds to terrain and is about as good as the distance model, which means that the search team now has more information. + +\section{References} +\bibliographystyle{plain} +\bibliography{diffusionwriteup} + +%Need Citations +\end{document} diff --git a/arc-models/gas_move.m b/arc-models/gas_move.m new file mode 100644 index 0000000..fb413f2 --- /dev/null +++ b/arc-models/gas_move.m @@ -0,0 +1,38 @@ +function gas = gas_move(con,speed_mat,src_vec, sink_vec,num_it) +%con- initial concentrations of the gas +%speed_mat- matrix of the max percentage transference,takes values between +%0 and .25-each cell can get rid of at most 25% of it's gas +%src_vec = locations of sources in the thing +%sink_vec= locations of sinks in the domain +%num_it- number of iterations to transfer +[m,n] = size(con);%also the size of speed matrix +if ~isempty(src_vec) + src_inds = sub2ind([m,n],src_vec(:,1),src_vec(:,2)); +end +if ~isempty(sink_vec) + sink_inds = sub2ind([m,n],sink_vec(:,1),sink_vec(:,2)); +end +for i = 1:num_it + %create left, right, up, and down transition matrices, keeping in mind + %the borders only have things from 2-3 directions + left = speed_mat(1:m,1:n-1).*(con(1:m,1:n-1)-con(1:m,2:n));%represents the contribution of the cells coming from the left + right = speed_mat(1:m,2:n).*(con(1:m,2:n)-con(1:m,1:n-1));%contribution of cells coming from the right + up = speed_mat(1:m-1,1:n).*(con(1:m-1,1:n)-con(2:m,1:n));%contribution from the cells above + down = speed_mat(2:m,1:n).*(con(2:m,1:n)-con(1:m-1,1:n));%contribution from the below cells + %update concentration + con(1:m,2:n) = con(1:m,2:n)+left; + con(1:m,1:n-1) = con(1:m,1:n-1)+right; + con(2:m,1:n) = con(2:m,1:n)+up; + con(1:m-1,1:n)=con(1:m-1,1:n)+down; + if ~isempty(src_vec) + con(src_inds)=1;%fills up the source(s) + end + if ~isempty(sink_vec) + con(sink_inds)=0;%drains sink + end +end +%at the end number of desired iterations, gas is the final concentration +gas = con; + + + diff --git a/arc-models/land2speed.m b/arc-models/land2speed.m new file mode 100644 index 0000000..849512c --- /dev/null +++ b/arc-models/land2speed.m @@ -0,0 +1,13 @@ +function speed_mat = land2speed(data,land_cover_class) +%converts the land cover data to impedance values +%land cover class has two columns, 1 is the values taken by the raster. The second is impedance values. In my case i'm scaling down to [0,.25] for the gas model +temp = data; +for i = 1:length(land_cover_class(:,1)) + inds = find(data == land_cover_class(i,1));% find all of the points that have a particular classification + temp(inds) = land_cover_class(i,2); %reset all of the data +end +%get rid of any border values by setting unknown things to 0 go +inds = find(temp>.25); +temp(inds) = 0; +speed_mat = temp; +end diff --git a/arc-models/walking_speed.m b/arc-models/walking_speed.m new file mode 100644 index 0000000..d27bbad --- /dev/null +++ b/arc-models/walking_speed.m @@ -0,0 +1,122 @@ +function map = walking_speed(data1,data2,R1,num_it,elev_flag,elev_weight,land_flag,land_weight,fname) +%creates a map simulating hiker movement as gas +%data1 - elevation data +%data2 - land cover data +%R - reference matrix in terms of [cells/degree,top latitude, west +%longitude] or georeference object +%num_it - number of iterations +%elev_flag = doing based on elevation +%land_flag - doing calculation based on land cover +%elev/land weight - weight for combination, if applicabfunction map = walking_speed(data1,data2,R1,num_it,elev_flag,elev_weight,land_flag,land_weight,landclass,fname) +%creates a map simulating hiker movement as gas +%data1 - elevation data +%data2 - land cover data +%R - reference matrix in terms of [cells/degree,top latitude, west +%longitude] or georeference object +%num_it - number of iterations +%elev_flag = doing based on elevation +%land_flag - doing calculation based on land cover +%elev/land weight - weight for combination, if applicable +%fname- desired file location name +%first, calculate the slope function +if elev_flag==1 && land_flag==0; + figure(); imagesc(data1); title('elevation data') + [ASPECT, SLOPE, gradN, gradE] = gradientm(data1, R1); +%calculate walking speed using tobler's formula + speed_mat1 = 6*exp(-3.5*abs(tand(SLOPE)+.05)); +%scale speed matrix down to [0,.25] + speed_mat1 = .25*speed_mat1/max(max(speed_mat1)); +elseif land_flag ==1&&elev_flag==0; + %convert land cover to impedance + speed_mat1 = land2speed(data2,landclass);%using speed mat 1 in case of only using one dataset + figure(); imagesc(speed_mat1);title('impedance data') + +elseif land_flag ==1 && elev_flag==1; + %use two speed matrices + figure(); imagesc(data1);title('elevation data') + [ASPECT, SLOPE, gradN, gradE] = gradientm(data1, R1); +%calculate walking speed using tobler's formula + speed_mat1 = 6*exp(-3.5*abs(tand(SLOPE)+.05)); + %scale speed matrix down to [0,.25] + speed_mat1 = .25*speed_mat1/max(max(speed_mat1)); + %calculate second speed matrix, assumed to be land cover + figure(); imagesc(speed_mat1);title('elevation data') + + speed_mat2 = land2speed(data2,landclass);%using speed mat 1 in case of only using one dataset + figure(); imagesc(speed_mat2);title('impedance data') + %resize one matrix to the size of the other + speed_mat2 = imresize(speed_mat2,size(speed_mat1)); + %combine speed matrix + speed_mat1 = elev_weight*speed_mat1 + land_weight*speed_mat2; + +end +%downsample +speed_mat1 = downsample(downsample(speed_mat1,4)',4)'; +%set up last known point +[m,n] = size(speed_mat1); +con = zeros(m,n); +con(ceil(m/2),ceil(n/2)) = 1; +%run the iterations +con_2 = gas_move(con,speed_mat1,[ceil(m/2) ceil(n/2)],[],num_it); +%upsample +con_2 = con_2/sum(sum(con_2));%normalizing +map = con_2; +figure(); imagesc(map); +%create and resize png to 5000 by 5000 +im_map = imresize(map,[5001,5001]); +%write image to file +im_map = mat2gray(im_map); +imwrite(im_map,fname); + +endle +%first, calculate the slope function +if elev_flag==1 && land_flag==0; + figure(); imagesc(data1); title('elevation data') + [ASPECT, SLOPE, gradN, gradE] = gradientm(data1, R1); +%calculate walking speed using tobler's formula + speed_mat1 = 6*exp(-3.5*abs(tand(SLOPE)+.05)); +%scale speed matrix down to [0,.25] + speed_mat1 = .25*speed_mat1/max(max(speed_mat1)); + %downsample + speed_mat1 = downsample(downsample(speed_mat1,4)',4)'; +elseif land_flag ==1&&elev_flag==0; + %convert land cover to impedance + speed_mat1 = land2speed(data2);%using speed mat 1 in case of only using one dataset + figure(); imagesc(speed_mat1);title('impedance data') + +elseif land_flag ==1 && elev_flag==1; + %use two speed matrices + figure(); imagesc(data1);title('elevation data') + [ASPECT, SLOPE, gradN, gradE] = gradientm(data1, R1); +%calculate walking speed using tobler's formula + speed_mat1 = 6*exp(-3.5*abs(tand(SLOPE)+.05)); + %scale speed matrix down to [0,.25] + speed_mat1 = .25*speed_mat1/max(max(speed_mat1)); + %calculate second speed matrix, assumed to be land cover + speed_ma%downsample + speed_mat1 = downsample(downsample(speed_mat1,4)',4)';t2 = land2speed(data2);%using speed mat 1 in case of only using one dataset + figure(); imagesc(speed_mat2);title('impedance data') + %resize one matrix to the size of the other + speed_mat2 = imresize(speed_mat2,size(speed_mat1)); + %combine speed matrix + speed_mat1 = elev_weight*speed_mat1 + land_weight*speed_mat2; + %downsample + speed_mat1 = downsample(downsample(speed_mat1,4)',4)'; +end +%set up last known point +[m,n] = size(speed_mat1); +con = zeros(m,n); +con(ceil(m/2),ceil(n/2)) = 1; +%run the iterations +con_2 = gas_move(con,speed_mat1,[ceil(m/2) ceil(n/2)],[],num_it); +%upsample +con_2 = con_2/sum(sum(con_2));%normalizing +map = con_2; +figure(); imagesc(map); +%create and resize png to 5000 by 5000 +im_map = imresize(map,[5001,5001]); +%write image to file +im_map = mat2gray(im_map); +imwrite(im_map,fname); + +end diff --git a/senior-design-code/S.m b/senior-design-code/S.m new file mode 100644 index 0000000..21ef94e --- /dev/null +++ b/senior-design-code/S.m @@ -0,0 +1,12 @@ +function [fx] = S(x) + %f(x) for kadane's probability function + %computes suml(pl*exp(sumj(xlj*log(alphalj)) + %x is the matrix of continuous variables given by [n1,n2,...nL] is the + %effort put into each search area, since with simplifications j isn't + %needed + %pl, alphal are taken as global varables + global alphal pl + eIl = exp(x.*log(alphal));% e^(sumj(n*log(alphal + fx = sum(pl.* eIl); + + diff --git a/senior-design-code/add_node.m b/senior-design-code/add_node.m new file mode 100644 index 0000000..9839bd8 --- /dev/null +++ b/senior-design-code/add_node.m @@ -0,0 +1,56 @@ +function [path_sum_new, new_path, probs_update] = add_node(current_path,current_probs, path_sum, m,n,prob_detect) + %this function will insert a point in between the current x,y position + %and the next x,y position in the path or between the previous node and + %current node. Parameters: current_path: the current path of the + %unit, probs: the array of probability distributions + %get neighbors of the different points, path_sum: current path sum, + %m,n: dimensions of array, k: current step on the path + %get the current point on the path and it's immediate neighbors + new_point_choice = zeros(size(current_path)); + best_probs = zeros(length(current_path(1,:)),1); + for k = 2:(length(current_path(:,1))-1) + current_xy = current_path(k,:); + next_xy = current_path(k+1,:); %next point on path + previous_xy = current_path(k-1,:); %previous point on path + [ncx,ncy] = get_neighbors(current_xy(1), current_xy(2), m,n); + neighbors = [ncx ncy]; + [nnx,nny] = get_neighbors(next_xy(1), next_xy(2),m,n); + next_neighbors = [nnx nny]; + [npx,npy] = get_neighbors(previous_xy(1), previous_xy(2),m,n); + prev_neighbors = [npx npy]; + v1 = intersect(neighbors,next_neighbors,'rows'); + v2 = intersect(neighbors,prev_neighbors,'rows'); + valid_choices = union(v1,v2,'rows'); %the choices for the new point based on the old points + %get intersection of all three arrays as valid points + %look at the probabilities of the + n_probs = zeros(length(valid_choices(:,1)),1); + for p = 1: length(valid_choices(:,1)) + n_probs(p) = current_probs(valid_choices(p,2), valid_choices(p,1)); + end + + a = find(n_probs == max(n_probs)); + new_point_choice(k,:) = valid_choices(a(1),:); + best_probs(k) = current_probs(new_point_choice(k,2), new_point_choice(k,1)); + end + best_point = find(best_probs == max(best_probs)); + if length(best_point)>1 + best_point = best_point(1);%pick the first best choice if there are more than one + end + %update path sum new + path_sum_new = path_sum + prob_detect*current_probs(new_point_choice(best_point,2), new_point_choice(best_point,1)); + probs_update = current_probs; + probs_update(new_point_choice(best_point,2), new_point_choice(best_point,1)) = (1-prob_detect)*probs_update(new_point_choice(best_point,2), new_point_choice(best_point,1));%reflect that the new point has been visited in the probabilities matrix + %update new path + current_xy = current_path(best_point,:); %the current point that yielded the new best point + [ncx,ncy] = get_neighbors(current_xy(1), current_xy(2), m,n); + neighbors = [ncx ncy]; + previous_xy = current_path(best_point-1,:); %previous point on path + [npx,npy] = get_neighbors(previous_xy(1), previous_xy(2),m,n); + prev_neighbors = [npx npy]; + v = intersect(neighbors,prev_neighbors,'rows'); + if ~isempty(intersect(v,new_point_choice(best_point,:), 'rows')) %check to see if it's between the kth point and the previous neighbor + new_path = [current_path(1:best_point-1,:) ; new_point_choice(best_point,:); current_path(best_point:end,:)]; + else %if not,it's between the current point and the next point in the path + new_path = [current_path(1:best_point,:); new_point_choice(best_point,:); current_path(best_point+1:end,:)]; + end +end diff --git a/senior-design-code/array_to_meters_lon.py b/senior-design-code/array_to_meters_lon.py new file mode 100644 index 0000000..7190ec7 --- /dev/null +++ b/senior-design-code/array_to_meters_lon.py @@ -0,0 +1,49 @@ +import math +import numpy as np +def array_to_meters_lon(arr, center_lat, center_lon,HFOV, VFOV, L,theta, alt,met_or_lon) +#arr - numpy array- probabilities or a portion of the picture with possible object in it +#center_lat, center_lon - central coordinates of the array +#hfov,vfov- horizontal and vertical fields of view +#L - focal length of camera +#theta - yaw of quad, in radians +#alt - altitude of quad +#met_or_lon - does the user want global coordinates in meters or longitude, 0 for meters, 1 for lat/lon +#this function takes arrays, along with a central point-for example tagged from a picture, and returns x,y,z or lat lon coordinates +#equations based off of "Robot Navigation", 2011 by Gerald Cook + shp = arr.shape + xs = np.zeros(shp) + ys = np.zeros(shp) + N = shp[0] + M = shp[1] + #convert central point to meters + c_index = [shp[0]/2 - 1,shp[1]/2 -1] + #convert to cartesian + for i in range(shp[0]) #rows of array + ys[i][:] = float(L* (N+1.0-2.0*i)/(N-1.0) * np.tan(VFOV/2)) + for j in range(shp[1]) + xs[:][j] = float(L* (2*j - M -1.0)/(M-1.0) * np.tan(HFOV/2)) + #azimuth and zenith + az = np.atan(-1*xs/L) + zen = np.atan(np.divide(ys, np.sqrt(L*L + np.square(xs)))) + #camera coordinates + xc = Z*np.tan(az) + yc = Z*np.tan(zen) + #world coordinates, 2x2 rotation matrix * [xc,yc], rotates by yaw + xw = xc*np.cos(theta) - yc*np.sin(theta) + yw = xc*np.sin(theta) + yc*np.cos(theta) + #global coordinates in meters + R = 6371000 #average radius of earth, equatorial radius is 6378137 meters + lat_rad = center_lat*math.pi/180.0 + lon_rad = center_lon*math.pi/180.0 + xg = np.add(R*np.cos(lat_rad), xw) + yg = np.add(R*np.cos(lat_rad)*np.sin(lon_rad),yw) + zg = R*np.sin(lat_rad)+Z + if met_or_lon == 0 #users + return xg,yg,zg + elif met_or_lon == 1 + lats = np.atan(np.divide(Zg,np.sqrt(np.add(np.square(xg),np.square(yg))))) + lons = np.atan(np.divide(yg,xg))#in design document it's yw/xw but I think it should be yg/xg + return lats, lons #returns array of x,y coordinates for each element of the array + else + return 'bad input' + diff --git a/senior-design-code/big_path b/senior-design-code/big_path new file mode 100644 index 0000000..becc2c7 --- /dev/null +++ b/senior-design-code/big_path @@ -0,0 +1,133 @@ +%stitching paths together +%human planning level +%assumptions: rectangular search region, split up into other rectangular +%regions, search regions numbered columnwise (standard matlab convention)as [1 4 7; 2 5 8; 3 6 9], each region is +%the same size, want to spend time in every search region +%inputs from user: probability files, number of levels to go down - N +% the inputs should be interfaced with the gui somehow +clear all; close all; clc; +%declare some global variables +global pl alphal m_current n_current m_new n_new Cp +N = input('How many layers of Resolution?: '); %integer +prob_detect = input('What is the UAVs probability of detection?: '); % number between 0 and 1 +Cp = input('What percentage of the total area can the UAV cover?: '); % needs a number between 0 and 1 +output_filename = 'C:\Users\Eric Cawi\Documents\senior design\test.txt'; +fid = fopen(output_filename,'w'); +%write some basic information to the test file +strN = sprintf('%d layers of resolution',N); +fprintf(fid,'%s \r\n',strN); +strp = sprintf('probability of detection: %f',prob_detect); +fprintf(fid,'%s \r\n',strp); +strc = sprintf('Total Area that the system can cover: %f',Cp); +fprintf(fid,'%s \r\n',strc); +for i = 1:N %for every level of resolution + str = sprintf('Resolution level %d ',i); + disp(str); + current_res_struct = load(input('resolution probability array: ','s')); + fname = fieldnames(current_res_struct); + current_res_probs = current_res_struct.(fname{1}); + if i ==1 %at the top level, run a greedy algorithm with a starting point + [m_old, n_old] = size(current_res_probs); + spt = input('What is the starting point of the search?'); + C_area = m_old*n_old; %going through every cell in this thing since I haven't figured out how to find a path based on kadane cells yet + current_path = greedy_algorithm(current_res_probs, prob_detect, spt, C_area); + %update the file + strn = sprintf('x y - level %d path',i); + fprintf(fid,'\r\n%s \r\n',strn); + for k = 1:length(current_path(:,1)) + fprintf(fid,'%d %d \r\n',current_path(k,:)-1); + end + lower_res_probs = current_res_probs; + else + [m_new, n_new] = size(current_res_probs); + path_part = zeros(m_new*n_new, 2,length(current_path(:,1))); + %prepare to optimize effort + pl = zeros(length(current_path(:,1)),1); + for k =1:length(current_path(:,1)) + pl(k) = lower_res_probs(current_path(k,2),current_path(k,1)); + end + alphal = (1-prob_detect)*ones(size(pl)); + m_current = m_new/m_old; + n_current = n_new/n_old; + %determine the effort for the points in the current path with fmincon + n0 = rand(length(current_path(:,1)),1); + n = fmincon(@S,n0,[],[],[],[],zeros(size(n0)),ones(size(n0)),@cnstr); + disp(n) + for j = 1: length(current_path(:,1)) + %figure out which segment of the path to use for path planning, + %assumes that m_new is a multiple of m_old, i.e. each cell in the + %old resolution is expanding into an integer number of cells in the + %new resolution + current_probs = current_res_probs((current_path(j,2)-1)*m_current+1:current_path(j,2)*m_current, (current_path(j,1)-1)*n_current+1:current_path(j,1)*n_current); + C_area = floor(m_current*n_current*n(j)); %current cost * probability subject is in an area = proportional time spent in that area * number of cells = number of cells in new resolution we can cover + disp(C_area) + %calculate starting and ending points for this level from previous + %path resolution + if j=2 + dx_old = current_path(j,1) - current_path(j-1,1);%previous direction, determines starting point + dy_old = current_path(j,2) - current_path(j-1,2); + else %j = 1 + dx_old = 0; + dy_old = 0; + end + [spt,ept] = endpts(dx_old, dy_old, dx_new, dy_new,m_current, n_current); + if ((ept(1) == 0)&&(ept(2) == 0)) %at the end of the path, do a greedy algorithm + disp('end of current path') + path_part(1:C_area,:,j) = greedy_algorithm(current_probs, prob_detect, spt, C_area); + elseif ((dx_old == 0)&&(dy_old == 0)) %this happens at the beginning, do greedy from the endpoint and then reverse the path + disp('start of current path'); + path_temp = greedy_algorithm(current_probs,prob_detect,ept, C_area); % ending point is starting point of the path + path_part(1:C_area,:,j) = flipud(path_temp);%flips up/down so that the ending point is hte last part of the array + else % the normal case, hill climbing with fixed start and end points + disp('hill climbing'); + path_part(1:C_area,:,j) = hill_climbing(m_current,n_current,current_probs,prob_detect,C_area,spt,ept); + end + strn = sprintf('x y - level %d path through level %d point (%d,%d)',i,i-1, current_path(j,1)-1,current_path(j,2)-1); + fprintf(fid,'\r\n%s \r\n',strn); + for k = 1:length(path_part(1:C_area,1,j)) + fprintf(fid,'%d %d \r\n',path_part(k,:,j)-1); + end + end + %link the paths together to update current path + current_path = link_paths(path_part,current_path, m_current,n_current,m_new, n_new); + lower_res_probs = current_res_probs; % update the lower resolution for the next iteration + m_old = m_new; + n_old = n_new; + end +end +%close the file at the end +%compute the final path sum +path_sum = 0; +disp('plotting') +background_probs = imread('C:\Users\Eric Cawi\Documents\senior design\ring_test.png'); +min_x = 1; +max_x = 40; +min_y = 1; +max_y = 40; +figure(); +imagesc([min_x max_x], [min_y max_y], background_probs); +hold on +plot(current_path(:,1),current_path(:,2),'-*','linewidth',2.25) +plot(current_path(1,1),current_path(1,2),'r*','markersize',20) +quiver(current_path(:,1),current_path(:,2),[current_path(2:end,1)-current_path(1:end-1,1);0],[current_path(2:end,2)-current_path(1:end-1,2);0],'linewidth',2.5) +xlabel('x','fontsize',14) +ylabel('y','fontsize',14) +title('Search Path overlaid onto the probability matrix','fontsize',20) +for j = 1:length(current_path(:,1)) + path_sum = path_sum + prob_detect*current_res_probs(current_path(j,2), current_path(j,1)); + current_res_probs(current_path(j,2), current_path(j,1)) = current_res_probs(current_path(j,2), current_path(j,1))*(1-prob_detect); +end +strsum = sprintf('total probability covered: %f', path_sum); +fprintf(fid,'\r\n%s \r\n',strsum); +fclose(fid); + +disp('done'); +%note, for the output file, the indices are presented as python compatible, +%for further matlab stuff you'll have to add 1 to every array diff --git a/senior-design-code/cnstr.m b/senior-design-code/cnstr.m new file mode 100644 index 0000000..3af2904 --- /dev/null +++ b/senior-design-code/cnstr.m @@ -0,0 +1,9 @@ +%cost constraint function +function [c,ceq] = cnstr(x) + global m_new n_new m_current n_current Cp + %x is the variable representing effort of searching a search region + %m_new/current, n_new/current are global variables from the big_path.m + %code + C = floor(Cp*m_new*n_new); %x should be m_new*n_new long + c =sum(x*(m_current*n_current))-C; %assumes search areas have equal probability + ceq = [];%not using this constraint diff --git a/senior-design-code/comparison.m b/senior-design-code/comparison.m new file mode 100644 index 0000000..574b552 --- /dev/null +++ b/senior-design-code/comparison.m @@ -0,0 +1,20 @@ +%hill climbing +clear all; close all; + +m = 5; +n = 5; +prob_detect = .9; +spt = [1 1]; +ept = [4 5]; + +C = floor(m*n*.75); %total number of cells drone can travel to +for i = 1:30 + search_value = rand(m,n); + probs = search_value/sum(sum(search_value)); %turn the example set into probabilities. + best_hill_sum(i) = hill_climbing(m,n,probs,prob_detect,C,spt, ept); + %compare to greedy algorithm for the same set of probabilities + best_greedy_sum(i) = greedy_algorithm(m,n,probs,prob_detect); + i +end +hill_avg = mean(best_hill_sum); +greedy_avg = mean(best_greedy_sum); diff --git a/senior-design-code/endpts.m b/senior-design-code/endpts.m new file mode 100644 index 0000000..a699c3e --- /dev/null +++ b/senior-design-code/endpts.m @@ -0,0 +1,46 @@ +function [spt,ept] = endpts(dx_old, dy_old, dx_new, dy_new,m, n) + %dx_old, dy_old - which direction is it coming from + %dx_new, dy_new - which direction is it going + %m, n - dimensions of array + + %spt if statements - depend on old values + if (dx_old == 0) && (dy_old ==0) %starting point of path, arbitrarily start at top middle + spt = [floor(n/2),1]; + elseif (dx_old > 0) && (dy_old == 0) %coming from the left + spt = [1, floor(m/2)]; + elseif (dx_old < 0) && (dy_old == 0) %coming from the right + spt = [n,floor(m/2)]; + elseif (dx_old > 0) && (dy_old < 0) %coming from the bottom left + spt = [1,m]; + elseif (dx_old > 0) && (dy_old > 0) %coming from the top left + spt = [1,1]; + elseif (dx_old < 0) && (dy_old < 0) %coming from the bottom right + spt = [n,m]; + elseif (dx_old < 0) && (dy_old > 0) %coming from the top right + spt = [n,1]; + elseif (dx_old == 0) && (dy_old < 0) %coming from the bottom + spt = [floor(n/2),m]; + elseif (dx_old == 0) && (dy_old > 0) %coming from the top + spt = [floor(n/2),1]; + end + %ept if statements - depend on new values + if (dx_new > 0) && (dy_new == 0) %going right + ept = [n,floor(m/2)]; + elseif (dx_new < 0) && (dy_new == 0) %going left + ept = [1,floor(m/2)]; + elseif (dx_new > 0) && (dy_new > 0) %going bottom right + ept = [n,m]; + elseif (dx_new > 0) && (dy_new < 0) %going top left + ept = [1,1]; + elseif (dx_new < 0) && (dy_new > 0) %going bottom left + ept = [1,m]; + elseif (dx_new < 0) && (dy_new < 0) %going top right + ept = [n,1]; + elseif (dx_new == 0) && (dy_new < 0) %going top + ept = [floor(n/2),1]; + elseif (dx_new == 0) && (dy_new > 0) %going bottom + ept = [floor(n/2),m]; + elseif (dx_new == 0) && (dy_new ==0) %hit end of path, will do greedy + ept = [0,0]; + end +end diff --git a/senior-design-code/gen_rand_path.m b/senior-design-code/gen_rand_path.m new file mode 100644 index 0000000..f9080e3 --- /dev/null +++ b/senior-design-code/gen_rand_path.m @@ -0,0 +1,16 @@ +function rand_path = gen_rand_path(spt, ept,m,n) + %function to generate random starting path + rand_path = spt; + x = spt(1); + y = spt(2); + current_end = spt; + while isempty(intersect(ept,current_end,'rows')) + [x_neighbors,y_neighbors] = get_neighbors(x, y, m,n); + k = randi([1 length(x_neighbors)],1); + x = x_neighbors(k); %update x and y + y = y_neighbors(k); + current_end = [x y]; + rand_path = [rand_path; current_end]; + + end +end diff --git a/senior-design-code/get_neighbors b/senior-design-code/get_neighbors new file mode 100644 index 0000000..563fed5 --- /dev/null +++ b/senior-design-code/get_neighbors @@ -0,0 +1,36 @@ +function [neighbors_x, neighbors_y] = get_neighbors(x,y,m,n) + % x and y are assumed to be column and row indeces, respectively, of + % an mxn matrix. gets all neighbors, including diagonals + + %check for corner cases + if (x == 1) && (y==1) %top left corner + neighbors_x = [1;2;2]; + neighbors_y = [2; 2;1]; + elseif (x == m) && (y==1) %top right corner + neighbors_x = [n; n-1; n-1]; + neighbors_y = [2; 2; 1]; + elseif (x==1) && (y==n) %bottom left corner + neighbors_x = [1;2;2]; + neighbors_y = [n-1; n-1; n]; + elseif (x==m) && (y ==n) %bottom right corner + neighbors_x = [n, n-1, n-1]'; + neighbors_y = [m-1, m-1, m]'; + %check for edge cases + elseif (x~=1) && (y==1) %top edge + neighbors_x = [x-1, x-1, x, x+1, x+1]'; + neighbors_y = [1,2, 2, 2, 1]'; + elseif (x ~=1) && (y == n) %bottom edge + neighbors_x = [x-1, x-1, x, x+1, x+1]'; + neighbors_y = [n, n-1, n-1, n-1, n]'; + elseif (x==1) && (y~=1) %left edge + neighbors_x = [ 1, 2, 2, 2, 1]'; + neighbors_y = [y-1, y-1, y, y+1, y+1]'; + elseif (x == m) && (y~=1) %right edge + neighbors_x = [m, m-1, m-1, m-1, m]'; + neighbors_y = [y-1, y-1, y, y+1, y+1]'; + %middle case + else + neighbors_x = [x-1, x-1, x-1, x, x, x+1, x+1, x+1]'; + neighbors_y = [y-1, y, y+1, y-1, y+1, y-1, y, y+1]'; + end +end diff --git a/senior-design-code/greedy_algorithm.m b/senior-design-code/greedy_algorithm.m new file mode 100644 index 0000000..6d62511 --- /dev/null +++ b/senior-design-code/greedy_algorithm.m @@ -0,0 +1,45 @@ +function greedy_path = greedy_algorithm(probs,prob_detect,spt,C) +%probs - probability array +%prob_detect - probability of detection +%spt - starting point +%C- percentage of cells coverable +%performs a greedy algorithm given starting point and probability of +%detection, no fixed ending point +[m,n] = size(probs); +num_cells = C; + +search_value_temp = probs; %going to be changing this matrix so i set the thing +current_x = zeros(num_cells,1); +current_y = zeros(num_cells,1); +current_x(1) = spt(1); +current_y(1) = spt(2); +search_value_temp(current_y(1), current_x(1)) = search_value_temp(current_y(1), current_x(1))*(1 - prob_detect); %subtracting some probability from the first cell + +for k = 2:1:num_cells + [neighbors_x, neighbors_y] = get_neighbors(current_x(k-1), current_y(k-1), m, n); + current_neighbors = [neighbors_x neighbors_y]; + neighbor_prob = zeros(size(neighbors_x)); + for i = 1:length(neighbors_x) + neighbor_prob(i) = search_value_temp(current_neighbors(i,2),current_neighbors(i,1)); + end + best_neighbor = find(neighbor_prob == max(neighbor_prob)); + next_x = current_neighbors(best_neighbor,1); + next_y = current_neighbors(best_neighbor,2); + if length(next_x)>1 %if there are two equal probability cells in the neighbors list just pick one randomly + p = randperm(length(next_x), 1); + current_x(k) = next_x(p); + current_y(k) = next_y(p); + else + current_x(k) = next_x; + current_y(k) = next_y; + end; + %update search probability by (1-probdetect) * p to reflect searching + %that cell + search_value_temp(current_y(k), current_x(k)) = search_value_temp(current_y(k), current_x(k))*(1 - prob_detect); +end + +greedy_path = [current_x current_y]; + + + + diff --git a/senior-design-code/hand_test_cases.m b/senior-design-code/hand_test_cases.m new file mode 100644 index 0000000..e203866 --- /dev/null +++ b/senior-design-code/hand_test_cases.m @@ -0,0 +1,43 @@ +%test cases for small matrices on greedy and hill climbing, which can be +%verified by hand to ensure algorithms are working +clear all; close all; + +m = 5; +n = 5; +prob_detect = .9; +spt = [1 1]; +ept = [5 5]; + +C = floor(m*n*.75); %total number of cells drone can travel to +best_hill_path = zeros(C,2,3); +best_hill_sum = zeros(1,3); +best_greedy_path = zeros(C,2,3); +best_greed_sum = zeros(1,3); +%%magic square matrix +search_value = magic(m); +probs = search_value/sum(sum(search_value)); %turn the example set into probabilities. +[best_hill_path(:,:,1),best_hill_sum(1)] = hill_climbing(m,n,probs,prob_detect,C,spt, ept); +%compare to greedy algorithm for the same set of probabilities +[best_greedy_path(:,:,1),best_greedy_sum(1)] = greedy_algorithm(m,n,probs,prob_detect); + +search_value = gallery('cauchy',1:m,1:n); +probs = search_value/sum(sum(search_value)); %turn the example set into probabilities. +[best_hill_path(:,:,2),best_hill_sum(2)] = hill_climbing(m,n,probs,prob_detect,C,spt, ept); +%compare to greedy algorithm for the same set of probabilities +[best_greedy_path(:,:,2),best_greedy_sum(2)] = greedy_algorithm(m,n,probs,prob_detect); + +%circular bivariate normal distribution, ro = 0, mu = 0, sigma = 1 for x +%and y +y = linspace(-1,1,m); +x = linspace(-1,1,n); +[xx,yy] = meshgrid(x,y); +mu = [0 0]; +sig = [1 0; 0 1]; +search_value = mvnpdf([xx(:),yy(:)],mu, sig); +search_value = reshape(search_value, length(y), length(x)); +probs = search_value/sum(sum(search_value)); %turn the example set into probabilities. +[best_hill_path(:,:,3),best_hill_sum(3)] = hill_climbing(m,n,probs,prob_detect,C,spt, ept); +%compare to greedy algorithm for the same set of probabilities +[best_greedy_path(:,:,3),best_greedy_sum(3)] = greedy_algorithm(m,n,probs,prob_detect); +hill_avg = mean(best_hill_sum); +greedy_avg = mean(best_greedy_sum); diff --git a/senior-design-code/hill_climbing.m b/senior-design-code/hill_climbing.m new file mode 100644 index 0000000..bc5cb47 --- /dev/null +++ b/senior-design-code/hill_climbing.m @@ -0,0 +1,56 @@ +function [hill_path] = hill_climbing(m,n,probs,prob_detect,C,spt, ept) + %m,n - dimensions of matrix + %probs - array of probability that the subject is in location + %prob_detect - probability that searcher will see subject + %C - total cost i.e. number of cells that can be visited + %spt, ept - starting and ending points, respectively + i_best_path = zeros(C,2,10); %for storing the best path + i_path_sum = zeros(1,10); + path_sum_start = zeros(1,10); + path_start = zeros(C,2,10); + for i = 1:10 + probs_current = probs; + %first find random starting path with length less than or equal to C + l = C+1; + current_path = spt; + while ((l>C)||(length(current_path(:,1))== 2)) + current_path = gen_rand_path(spt,ept,m,n); + l = length(current_path(:,1)); + end + %compute first path sum + path_sum = 0; + for j = 1:length(current_path(:,1)) + path_sum = path_sum + prob_detect*probs_current(current_path(j,2), current_path(j,1)); + probs_current(current_path(j,2), current_path(j,1)) = probs_current(current_path(j,2), current_path(j,1))*(1-prob_detect); + end + path_sum_start(i) = path_sum; + + path_start(1:length(current_path(:,1)),:,i) = current_path; + %hill climbing loop: two rules: add a node, replace a node + for a = 1:200 + if length(current_path(:,1)) 0 %new path is a better sum + path_sum = path_sum_new; %take new path/new path sum + current_path = new_path; + probs_current = probs_update; + end %if the old path was better, don't update anything + end + i_path_sum(i) = path_sum; %store the current path sum + i_best_path(:,:,i) = current_path; %store the current path for access later + end + + best_path_sum = max(i_path_sum); + best_i = find(i_path_sum == best_path_sum); + hill_path = i_best_path(:,:,best_i(1)); + +% disp('best hill climbing path is:') +% disp(best_path); +% disp('best hill climbing sum is:') +% disp(best_path_sum); + + diff --git a/senior-design-code/link_paths.m b/senior-design-code/link_paths.m new file mode 100644 index 0000000..350564f --- /dev/null +++ b/senior-design-code/link_paths.m @@ -0,0 +1,21 @@ +function [linked_path] = link_paths(path_part,current_path, m_current,n_current) + %path_part - array of all the different path parts + %current_path - path of the lower resolution + %m_current, n_current - dimensions of newer resolution arrays + %stitches path together from path parts and current path + high_res_path = []; + for i = 1:length(current_path(:,1)) + current_point = current_path(i,:); + %get path part that isn't zero + p = path_part(:,:,i); + [good_rows,good_col] = find(p ~= 0); + good_path = p(1:length(good_rows)/2,:) + %calculate indeces in larger matrix + index_update = ones(size(good_path)); + index_update(:,1) = (current_point(1)-1)*m_current; + index_update(:,2) = (current_point(2)-1)*n_current; + good_path + index_update + high_res_path = [high_res_path; (good_path + index_update)]; + end + linked_path = high_res_path; % update current path at the end +end diff --git a/senior-design-code/replace_node.m b/senior-design-code/replace_node.m new file mode 100644 index 0000000..34fbedf --- /dev/null +++ b/senior-design-code/replace_node.m @@ -0,0 +1,40 @@ +function [path_sum_new, new_path, probs_update] = replace_node(current_path, probs, current_probs,m,n,prob_detect) + %function to replace the kth element of the path. parameters: + %current_path: array representing column, row elements of the current + %path. probs: array of probabilities at the area needed to recompute path sum, current_probs m,n: dimensions of + %array, k, current spot on the path + new_point_choice = zeros(size(current_path)); + best_probs = zeros(length(current_path(1,:)),1); + for k = 2:(length(current_path(:,1))-1) + current_xy = current_path(k,:); + next_xy = current_path(k+1,:); %previous point on path + previous_xy = current_path(k-1,:); %next point on path + [nnx,nny] = get_neighbors(next_xy(1), next_xy(2),m,n); %don't need neighbors of current point because that's being gotten rid of + next_neighbors = [nnx nny]; + [npx,npy] = get_neighbors(previous_xy(1), previous_xy(2),m,n); + prev_neighbors = [npx npy] ; + valid_choices = intersect(next_neighbors, prev_neighbors,'rows');%the idea is that the new in between point should be a neighbor of both points + %choose valid choices as in "add_node" + n_probs = zeros(length(valid_choices(:,1)),1); + for i = 1: length(valid_choices(:,1)) + n_probs(i) = current_probs(valid_choices(i,2), valid_choices(i,1));%have to look at the current probs + end + a = find(n_probs == max(n_probs)); + new_point_choice(k,:) = valid_choices(a(1),:); + best_probs(k) = probs(new_point_choice(k,2), new_point_choice(k,1)); + end + best_point = find(best_probs == max(best_probs)); + if length(best_point)>1 + best_point = best_point(1);%pick the first best choice if there are more than one + end + %update probability + new_path = [current_path(1:best_point-1,:); new_point_choice(best_point,:); current_path(best_point+1:end,:)]; + %have to update the path sum the same way as in the first loop because + %if a point is replaced it doesn't factor into the whole sum + path_sum_new = 0; + probs_update = probs; + for j = 1:length(current_path(:,1)) + path_sum_new = path_sum_new + prob_detect*probs(new_path(j,2), new_path(j,1)); + probs_update(new_path(j,2), new_path(j,1)) = probs(new_path(j,2), new_path(j,1))*(1-prob_detect); + end +end diff --git a/urls.py b/urls.py index caf1262..68c07d6 100644 --- a/urls.py +++ b/urls.py @@ -1,4 +1,7 @@ -from django.conf.urls.defaults import patterns, include, url +try: + from django.conf.urls.defaults import patterns, include, url +except: + from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.http import HttpResponse from django.conf import settings